Total coverage: 267056 (18%)of 1563885
13 15 15 13 13 13 13 5 18 18 22 2 20 21 4 24 22 23 24 19 24 8 5 5 3 3 1 1 23 24 5 19 24 24 17 18 13 13 13 13 13 13 13 8 8 8 8 8 8 8 5 8 1 7 6 15 16 23 1 1 4 4 1 1 2 6 3 4 3 3 22 51 22 3 21 22 3 21 3 3 6 6 6 6 1 5 1 10 3 8 2 6 4 2 4 4 2 2 130 126 126 131 130 127 126 21 22 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 /* * Copyright © 2008 Intel Corporation * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. * * Authors: * Eric Anholt <eric@anholt.net> * */ #include <linux/dma-buf.h> #include <linux/file.h> #include <linux/fs.h> #include <linux/iosys-map.h> #include <linux/mem_encrypt.h> #include <linux/mm.h> #include <linux/mman.h> #include <linux/module.h> #include <linux/pagemap.h> #include <linux/pagevec.h> #include <linux/shmem_fs.h> #include <linux/slab.h> #include <linux/string_helpers.h> #include <linux/types.h> #include <linux/uaccess.h> #include <drm/drm.h> #include <drm/drm_device.h> #include <drm/drm_drv.h> #include <drm/drm_file.h> #include <drm/drm_gem.h> #include <drm/drm_managed.h> #include <drm/drm_print.h> #include <drm/drm_vma_manager.h> #include "drm_internal.h" /** @file drm_gem.c * * This file provides some of the base ioctls and library routines for * the graphics memory manager implemented by each device driver. * * Because various devices have different requirements in terms of * synchronization and migration strategies, implementing that is left up to * the driver, and all that the general API provides should be generic -- * allocating objects, reading/writing data with the cpu, freeing objects. * Even there, platform-dependent optimizations for reading/writing data with * the CPU mean we'll likely hook those out to driver-specific calls. However, * the DRI2 implementation wants to have at least allocate/mmap be generic. * * The goal was to have swap-backed object allocation managed through * struct file. However, file descriptors as handles to a struct file have * two major failings: * - Process limits prevent more than 1024 or so being used at a time by * default. * - Inability to allocate high fds will aggravate the X Server's select() * handling, and likely that of many GL client applications as well. * * This led to a plan of using our own integer IDs (called handles, following * DRM terminology) to mimic fds, and implement the fd syscalls we need as * ioctls. The objects themselves will still include the struct file so * that we can transition to fds if the required kernel infrastructure shows * up at a later date, and as our interface with shmfs for memory allocation. */ static void drm_gem_init_release(struct drm_device *dev, void *ptr) { drm_vma_offset_manager_destroy(dev->vma_offset_manager); } /** * drm_gem_init - Initialize the GEM device fields * @dev: drm_devic structure to initialize */ int drm_gem_init(struct drm_device *dev) { struct drm_vma_offset_manager *vma_offset_manager; mutex_init(&dev->object_name_lock); idr_init_base(&dev->object_name_idr, 1); vma_offset_manager = drmm_kzalloc(dev, sizeof(*vma_offset_manager), GFP_KERNEL); if (!vma_offset_manager) { DRM_ERROR("out of memory\n"); return -ENOMEM; } dev->vma_offset_manager = vma_offset_manager; drm_vma_offset_manager_init(vma_offset_manager, DRM_FILE_PAGE_OFFSET_START, DRM_FILE_PAGE_OFFSET_SIZE); return drmm_add_action(dev, drm_gem_init_release, NULL); } /** * drm_gem_object_init_with_mnt - initialize an allocated shmem-backed GEM * object in a given shmfs mountpoint * * @dev: drm_device the object should be initialized for * @obj: drm_gem_object to initialize * @size: object size * @gemfs: tmpfs mount where the GEM object will be created. If NULL, use * the usual tmpfs mountpoint (`shm_mnt`). * * Initialize an already allocated GEM object of the specified size with * shmfs backing store. */ int drm_gem_object_init_with_mnt(struct drm_device *dev, struct drm_gem_object *obj, size_t size, struct vfsmount *gemfs) { struct file *filp; drm_gem_private_object_init(dev, obj, size); if (gemfs) filp = shmem_file_setup_with_mnt(gemfs, "drm mm object", size, VM_NORESERVE); else filp = shmem_file_setup("drm mm object", size, VM_NORESERVE); if (IS_ERR(filp)) return PTR_ERR(filp); obj->filp = filp; return 0; } EXPORT_SYMBOL(drm_gem_object_init_with_mnt); /** * drm_gem_object_init - initialize an allocated shmem-backed GEM object * @dev: drm_device the object should be initialized for * @obj: drm_gem_object to initialize * @size: object size * * Initialize an already allocated GEM object of the specified size with * shmfs backing store. */ int drm_gem_object_init(struct drm_device *dev, struct drm_gem_object *obj, size_t size) { return drm_gem_object_init_with_mnt(dev, obj, size, NULL); } EXPORT_SYMBOL(drm_gem_object_init); /** * drm_gem_private_object_init - initialize an allocated private GEM object * @dev: drm_device the object should be initialized for * @obj: drm_gem_object to initialize * @size: object size * * Initialize an already allocated GEM object of the specified size with * no GEM provided backing store. Instead the caller is responsible for * backing the object and handling it. */ void drm_gem_private_object_init(struct drm_device *dev, struct drm_gem_object *obj, size_t size) { BUG_ON((size & (PAGE_SIZE - 1)) != 0); obj->dev = dev; obj->filp = NULL; kref_init(&obj->refcount); obj->handle_count = 0; obj->size = size; dma_resv_init(&obj->_resv); if (!obj->resv) obj->resv = &obj->_resv; if (drm_core_check_feature(dev, DRIVER_GEM_GPUVA)) drm_gem_gpuva_init(obj); drm_vma_node_reset(&obj->vma_node); INIT_LIST_HEAD(&obj->lru_node); } EXPORT_SYMBOL(drm_gem_private_object_init); /** * drm_gem_private_object_fini - Finalize a failed drm_gem_object * @obj: drm_gem_object * * Uninitialize an already allocated GEM object when it initialized failed */ void drm_gem_private_object_fini(struct drm_gem_object *obj) { WARN_ON(obj->dma_buf); dma_resv_fini(&obj->_resv); } EXPORT_SYMBOL(drm_gem_private_object_fini); /** * drm_gem_object_handle_free - release resources bound to userspace handles * @obj: GEM object to clean up. * * Called after the last handle to the object has been closed * * Removes any name for the object. Note that this must be * called before drm_gem_object_free or we'll be touching * freed memory */ static void drm_gem_object_handle_free(struct drm_gem_object *obj) { struct drm_device *dev = obj->dev; /* Remove any name for this object */ if (obj->name) { idr_remove(&dev->object_name_idr, obj->name); obj->name = 0; } } static void drm_gem_object_exported_dma_buf_free(struct drm_gem_object *obj) { /* Unbreak the reference cycle if we have an exported dma_buf. */ if (obj->dma_buf) { dma_buf_put(obj->dma_buf); obj->dma_buf = NULL; } } static void drm_gem_object_handle_put_unlocked(struct drm_gem_object *obj) { struct drm_device *dev = obj->dev; bool final = false; if (WARN_ON(READ_ONCE(obj->handle_count) == 0)) return; /* * Must bump handle count first as this may be the last * ref, in which case the object would disappear before we * checked for a name */ mutex_lock(&dev->object_name_lock); if (--obj->handle_count == 0) { drm_gem_object_handle_free(obj); drm_gem_object_exported_dma_buf_free(obj); final = true; } mutex_unlock(&dev->object_name_lock); if (final) drm_gem_object_put(obj); } /* * Called at device or object close to release the file's * handle references on objects. */ static int drm_gem_object_release_handle(int id, void *ptr, void *data) { struct drm_file *file_priv = data; struct drm_gem_object *obj = ptr; if (obj->funcs->close) obj->funcs->close(obj, file_priv); drm_prime_remove_buf_handle(&file_priv->prime, id); drm_vma_node_revoke(&obj->vma_node, file_priv); drm_gem_object_handle_put_unlocked(obj); return 0; } /** * drm_gem_handle_delete - deletes the given file-private handle * @filp: drm file-private structure to use for the handle look up * @handle: userspace handle to delete * * Removes the GEM handle from the @filp lookup table which has been added with * drm_gem_handle_create(). If this is the last handle also cleans up linked * resources like GEM names. */ int drm_gem_handle_delete(struct drm_file *filp, u32 handle) { struct drm_gem_object *obj; spin_lock(&filp->table_lock); /* Check if we currently have a reference on the object */ obj = idr_replace(&filp->object_idr, NULL, handle); spin_unlock(&filp->table_lock); if (IS_ERR_OR_NULL(obj)) return -EINVAL; /* Release driver's reference and decrement refcount. */ drm_gem_object_release_handle(handle, obj, filp); /* And finally make the handle available for future allocations. */ spin_lock(&filp->table_lock); idr_remove(&filp->object_idr, handle); spin_unlock(&filp->table_lock); return 0; } EXPORT_SYMBOL(drm_gem_handle_delete); /** * drm_gem_dumb_map_offset - return the fake mmap offset for a gem object * @file: drm file-private structure containing the gem object * @dev: corresponding drm_device * @handle: gem object handle * @offset: return location for the fake mmap offset * * This implements the &drm_driver.dumb_map_offset kms driver callback for * drivers which use gem to manage their backing storage. * * Returns: * 0 on success or a negative error code on failure. */ int drm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev, u32 handle, u64 *offset) { struct drm_gem_object *obj; int ret; obj = drm_gem_object_lookup(file, handle); if (!obj) return -ENOENT; /* Don't allow imported objects to be mapped */ if (obj->import_attach) { ret = -EINVAL; goto out; } ret = drm_gem_create_mmap_offset(obj); if (ret) goto out; *offset = drm_vma_node_offset_addr(&obj->vma_node); out: drm_gem_object_put(obj); return ret; } EXPORT_SYMBOL_GPL(drm_gem_dumb_map_offset); /** * drm_gem_handle_create_tail - internal functions to create a handle * @file_priv: drm file-private structure to register the handle for * @obj: object to register * @handlep: pointer to return the created handle to the caller * * This expects the &drm_device.object_name_lock to be held already and will * drop it before returning. Used to avoid races in establishing new handles * when importing an object from either an flink name or a dma-buf. * * Handles must be release again through drm_gem_handle_delete(). This is done * when userspace closes @file_priv for all attached handles, or through the * GEM_CLOSE ioctl for individual handles. */ int drm_gem_handle_create_tail(struct drm_file *file_priv, struct drm_gem_object *obj, u32 *handlep) { struct drm_device *dev = obj->dev; u32 handle; int ret; WARN_ON(!mutex_is_locked(&dev->object_name_lock)); if (obj->handle_count++ == 0) drm_gem_object_get(obj); /* * Get the user-visible handle using idr. Preload and perform * allocation under our spinlock. */ idr_preload(GFP_KERNEL); spin_lock(&file_priv->table_lock); ret = idr_alloc(&file_priv->object_idr, obj, 1, 0, GFP_NOWAIT); spin_unlock(&file_priv->table_lock); idr_preload_end(); mutex_unlock(&dev->object_name_lock); if (ret < 0) goto err_unref; handle = ret; ret = drm_vma_node_allow(&obj->vma_node, file_priv); if (ret) goto err_remove; if (obj->funcs->open) { ret = obj->funcs->open(obj, file_priv); if (ret) goto err_revoke; } *handlep = handle; return 0; err_revoke: drm_vma_node_revoke(&obj->vma_node, file_priv); err_remove: spin_lock(&file_priv->table_lock); idr_remove(&file_priv->object_idr, handle); spin_unlock(&file_priv->table_lock); err_unref: drm_gem_object_handle_put_unlocked(obj); return ret; } /** * drm_gem_handle_create - create a gem handle for an object * @file_priv: drm file-private structure to register the handle for * @obj: object to register * @handlep: pointer to return the created handle to the caller * * Create a handle for this object. This adds a handle reference to the object, * which includes a regular reference count. Callers will likely want to * dereference the object afterwards. * * Since this publishes @obj to userspace it must be fully set up by this point, * drivers must call this last in their buffer object creation callbacks. */ int drm_gem_handle_create(struct drm_file *file_priv, struct drm_gem_object *obj, u32 *handlep) { mutex_lock(&obj->dev->object_name_lock); return drm_gem_handle_create_tail(file_priv, obj, handlep); } EXPORT_SYMBOL(drm_gem_handle_create); /** * drm_gem_free_mmap_offset - release a fake mmap offset for an object * @obj: obj in question * * This routine frees fake offsets allocated by drm_gem_create_mmap_offset(). * * Note that drm_gem_object_release() already calls this function, so drivers * don't have to take care of releasing the mmap offset themselves when freeing * the GEM object. */ void drm_gem_free_mmap_offset(struct drm_gem_object *obj) { struct drm_device *dev = obj->dev; drm_vma_offset_remove(dev->vma_offset_manager, &obj->vma_node); } EXPORT_SYMBOL(drm_gem_free_mmap_offset); /** * drm_gem_create_mmap_offset_size - create a fake mmap offset for an object * @obj: obj in question * @size: the virtual size * * GEM memory mapping works by handing back to userspace a fake mmap offset * it can use in a subsequent mmap(2) call. The DRM core code then looks * up the object based on the offset and sets up the various memory mapping * structures. * * This routine allocates and attaches a fake offset for @obj, in cases where * the virtual size differs from the physical size (ie. &drm_gem_object.size). * Otherwise just use drm_gem_create_mmap_offset(). * * This function is idempotent and handles an already allocated mmap offset * transparently. Drivers do not need to check for this case. */ int drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size) { struct drm_device *dev = obj->dev; return drm_vma_offset_add(dev->vma_offset_manager, &obj->vma_node, size / PAGE_SIZE); } EXPORT_SYMBOL(drm_gem_create_mmap_offset_size); /** * drm_gem_create_mmap_offset - create a fake mmap offset for an object * @obj: obj in question * * GEM memory mapping works by handing back to userspace a fake mmap offset * it can use in a subsequent mmap(2) call. The DRM core code then looks * up the object based on the offset and sets up the various memory mapping * structures. * * This routine allocates and attaches a fake offset for @obj. * * Drivers can call drm_gem_free_mmap_offset() before freeing @obj to release * the fake offset again. */ int drm_gem_create_mmap_offset(struct drm_gem_object *obj) { return drm_gem_create_mmap_offset_size(obj, obj->size); } EXPORT_SYMBOL(drm_gem_create_mmap_offset); /* * Move folios to appropriate lru and release the folios, decrementing the * ref count of those folios. */ static void drm_gem_check_release_batch(struct folio_batch *fbatch) { check_move_unevictable_folios(fbatch); __folio_batch_release(fbatch); cond_resched(); } /** * drm_gem_get_pages - helper to allocate backing pages for a GEM object * from shmem * @obj: obj in question * * This reads the page-array of the shmem-backing storage of the given gem * object. An array of pages is returned. If a page is not allocated or * swapped-out, this will allocate/swap-in the required pages. Note that the * whole object is covered by the page-array and pinned in memory. * * Use drm_gem_put_pages() to release the array and unpin all pages. * * This uses the GFP-mask set on the shmem-mapping (see mapping_set_gfp_mask()). * If you require other GFP-masks, you have to do those allocations yourself. * * Note that you are not allowed to change gfp-zones during runtime. That is, * shmem_read_mapping_page_gfp() must be called with the same gfp_zone(gfp) as * set during initialization. If you have special zone constraints, set them * after drm_gem_object_init() via mapping_set_gfp_mask(). shmem-core takes care * to keep pages in the required zone during swap-in. * * This function is only valid on objects initialized with * drm_gem_object_init(), but not for those initialized with * drm_gem_private_object_init() only. */ struct page **drm_gem_get_pages(struct drm_gem_object *obj) { struct address_space *mapping; struct page **pages; struct folio *folio; struct folio_batch fbatch; long i, j, npages; if (WARN_ON(!obj->filp)) return ERR_PTR(-EINVAL); /* This is the shared memory object that backs the GEM resource */ mapping = obj->filp->f_mapping; /* We already BUG_ON() for non-page-aligned sizes in * drm_gem_object_init(), so we should never hit this unless * driver author is doing something really wrong: */ WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0); npages = obj->size >> PAGE_SHIFT; pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL); if (pages == NULL) return ERR_PTR(-ENOMEM); mapping_set_unevictable(mapping); i = 0; while (i < npages) { long nr; folio = shmem_read_folio_gfp(mapping, i, mapping_gfp_mask(mapping)); if (IS_ERR(folio)) goto fail; nr = min(npages - i, folio_nr_pages(folio)); for (j = 0; j < nr; j++, i++) pages[i] = folio_file_page(folio, i); /* Make sure shmem keeps __GFP_DMA32 allocated pages in the * correct region during swapin. Note that this requires * __GFP_DMA32 to be set in mapping_gfp_mask(inode->i_mapping) * so shmem can relocate pages during swapin if required. */ BUG_ON(mapping_gfp_constraint(mapping, __GFP_DMA32) && (folio_pfn(folio) >= 0x00100000UL)); } return pages; fail: mapping_clear_unevictable(mapping); folio_batch_init(&fbatch); j = 0; while (j < i) { struct folio *f = page_folio(pages[j]); if (!folio_batch_add(&fbatch, f)) drm_gem_check_release_batch(&fbatch); j += folio_nr_pages(f); } if (fbatch.nr) drm_gem_check_release_batch(&fbatch); kvfree(pages); return ERR_CAST(folio); } EXPORT_SYMBOL(drm_gem_get_pages); /** * drm_gem_put_pages - helper to free backing pages for a GEM object * @obj: obj in question * @pages: pages to free * @dirty: if true, pages will be marked as dirty * @accessed: if true, the pages will be marked as accessed */ void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages, bool dirty, bool accessed) { int i, npages; struct address_space *mapping; struct folio_batch fbatch; mapping = file_inode(obj->filp)->i_mapping; mapping_clear_unevictable(mapping); /* We already BUG_ON() for non-page-aligned sizes in * drm_gem_object_init(), so we should never hit this unless * driver author is doing something really wrong: */ WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0); npages = obj->size >> PAGE_SHIFT; folio_batch_init(&fbatch); for (i = 0; i < npages; i++) { struct folio *folio; if (!pages[i]) continue; folio = page_folio(pages[i]); if (dirty) folio_mark_dirty(folio); if (accessed) folio_mark_accessed(folio); /* Undo the reference we took when populating the table */ if (!folio_batch_add(&fbatch, folio)) drm_gem_check_release_batch(&fbatch); i += folio_nr_pages(folio) - 1; } if (folio_batch_count(&fbatch)) drm_gem_check_release_batch(&fbatch); kvfree(pages); } EXPORT_SYMBOL(drm_gem_put_pages); static int objects_lookup(struct drm_file *filp, u32 *handle, int count, struct drm_gem_object **objs) { int i, ret = 0; struct drm_gem_object *obj; spin_lock(&filp->table_lock); for (i = 0; i < count; i++) { /* Check if we currently have a reference on the object */ obj = idr_find(&filp->object_idr, handle[i]); if (!obj) { ret = -ENOENT; break; } drm_gem_object_get(obj); objs[i] = obj; } spin_unlock(&filp->table_lock); return ret; } /** * drm_gem_objects_lookup - look up GEM objects from an array of handles * @filp: DRM file private date * @bo_handles: user pointer to array of userspace handle * @count: size of handle array * @objs_out: returned pointer to array of drm_gem_object pointers * * Takes an array of userspace handles and returns a newly allocated array of * GEM objects. * * For a single handle lookup, use drm_gem_object_lookup(). * * Returns: * @objs filled in with GEM object pointers. Returned GEM objects need to be * released with drm_gem_object_put(). -ENOENT is returned on a lookup * failure. 0 is returned on success. * */ int drm_gem_objects_lookup(struct drm_file *filp, void __user *bo_handles, int count, struct drm_gem_object ***objs_out) { int ret; u32 *handles; struct drm_gem_object **objs; if (!count) return 0; objs = kvmalloc_array(count, sizeof(struct drm_gem_object *), GFP_KERNEL | __GFP_ZERO); if (!objs) return -ENOMEM; *objs_out = objs; handles = kvmalloc_array(count, sizeof(u32), GFP_KERNEL); if (!handles) { ret = -ENOMEM; goto out; } if (copy_from_user(handles, bo_handles, count * sizeof(u32))) { ret = -EFAULT; DRM_DEBUG("Failed to copy in GEM handles\n"); goto out; } ret = objects_lookup(filp, handles, count, objs); out: kvfree(handles); return ret; } EXPORT_SYMBOL(drm_gem_objects_lookup); /** * drm_gem_object_lookup - look up a GEM object from its handle * @filp: DRM file private date * @handle: userspace handle * * If looking up an array of handles, use drm_gem_objects_lookup(). * * Returns: * A reference to the object named by the handle if such exists on @filp, NULL * otherwise. */ struct drm_gem_object * drm_gem_object_lookup(struct drm_file *filp, u32 handle) { struct drm_gem_object *obj = NULL; objects_lookup(filp, &handle, 1, &obj); return obj; } EXPORT_SYMBOL(drm_gem_object_lookup); /** * drm_gem_dma_resv_wait - Wait on GEM object's reservation's objects * shared and/or exclusive fences. * @filep: DRM file private date * @handle: userspace handle * @wait_all: if true, wait on all fences, else wait on just exclusive fence * @timeout: timeout value in jiffies or zero to return immediately * * Returns: * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or * greater than 0 on success. */ long drm_gem_dma_resv_wait(struct drm_file *filep, u32 handle, bool wait_all, unsigned long timeout) { long ret; struct drm_gem_object *obj; obj = drm_gem_object_lookup(filep, handle); if (!obj) { DRM_DEBUG("Failed to look up GEM BO %d\n", handle); return -EINVAL; } ret = dma_resv_wait_timeout(obj->resv, dma_resv_usage_rw(wait_all), true, timeout); if (ret == 0) ret = -ETIME; else if (ret > 0) ret = 0; drm_gem_object_put(obj); return ret; } EXPORT_SYMBOL(drm_gem_dma_resv_wait); /** * drm_gem_close_ioctl - implementation of the GEM_CLOSE ioctl * @dev: drm_device * @data: ioctl data * @file_priv: drm file-private structure * * Releases the handle to an mm object. */ int drm_gem_close_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_gem_close *args = data; int ret; if (!drm_core_check_feature(dev, DRIVER_GEM)) return -EOPNOTSUPP; ret = drm_gem_handle_delete(file_priv, args->handle); return ret; } /** * drm_gem_flink_ioctl - implementation of the GEM_FLINK ioctl * @dev: drm_device * @data: ioctl data * @file_priv: drm file-private structure * * Create a global name for an object, returning the name. * * Note that the name does not hold a reference; when the object * is freed, the name goes away. */ int drm_gem_flink_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_gem_flink *args = data; struct drm_gem_object *obj; int ret; if (!drm_core_check_feature(dev, DRIVER_GEM)) return -EOPNOTSUPP; obj = drm_gem_object_lookup(file_priv, args->handle); if (obj == NULL) return -ENOENT; mutex_lock(&dev->object_name_lock); /* prevent races with concurrent gem_close. */ if (obj->handle_count == 0) { ret = -ENOENT; goto err; } if (!obj->name) { ret = idr_alloc(&dev->object_name_idr, obj, 1, 0, GFP_KERNEL); if (ret < 0) goto err; obj->name = ret; } args->name = (uint64_t) obj->name; ret = 0; err: mutex_unlock(&dev->object_name_lock); drm_gem_object_put(obj); return ret; } /** * drm_gem_open_ioctl - implementation of the GEM_OPEN ioctl * @dev: drm_device * @data: ioctl data * @file_priv: drm file-private structure * * Open an object using the global name, returning a handle and the size. * * This handle (of course) holds a reference to the object, so the object * will not go away until the handle is deleted. */ int drm_gem_open_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_gem_open *args = data; struct drm_gem_object *obj; int ret; u32 handle; if (!drm_core_check_feature(dev, DRIVER_GEM)) return -EOPNOTSUPP; mutex_lock(&dev->object_name_lock); obj = idr_find(&dev->object_name_idr, (int) args->name); if (obj) { drm_gem_object_get(obj); } else { mutex_unlock(&dev->object_name_lock); return -ENOENT; } /* drm_gem_handle_create_tail unlocks dev->object_name_lock. */ ret = drm_gem_handle_create_tail(file_priv, obj, &handle); if (ret) goto err; args->handle = handle; args->size = obj->size; err: drm_gem_object_put(obj); return ret; } /** * drm_gem_open - initializes GEM file-private structures at devnode open time * @dev: drm_device which is being opened by userspace * @file_private: drm file-private structure to set up * * Called at device open time, sets up the structure for handling refcounting * of mm objects. */ void drm_gem_open(struct drm_device *dev, struct drm_file *file_private) { idr_init_base(&file_private->object_idr, 1); spin_lock_init(&file_private->table_lock); } /** * drm_gem_release - release file-private GEM resources * @dev: drm_device which is being closed by userspace * @file_private: drm file-private structure to clean up * * Called at close time when the filp is going away. * * Releases any remaining references on objects by this filp. */ void drm_gem_release(struct drm_device *dev, struct drm_file *file_private) { idr_for_each(&file_private->object_idr, &drm_gem_object_release_handle, file_private); idr_destroy(&file_private->object_idr); } /** * drm_gem_object_release - release GEM buffer object resources * @obj: GEM buffer object * * This releases any structures and resources used by @obj and is the inverse of * drm_gem_object_init(). */ void drm_gem_object_release(struct drm_gem_object *obj) { if (obj->filp) fput(obj->filp); drm_gem_private_object_fini(obj); drm_gem_free_mmap_offset(obj); drm_gem_lru_remove(obj); } EXPORT_SYMBOL(drm_gem_object_release); /** * drm_gem_object_free - free a GEM object * @kref: kref of the object to free * * Called after the last reference to the object has been lost. * * Frees the object */ void drm_gem_object_free(struct kref *kref) { struct drm_gem_object *obj = container_of(kref, struct drm_gem_object, refcount); if (WARN_ON(!obj->funcs->free)) return; obj->funcs->free(obj); } EXPORT_SYMBOL(drm_gem_object_free); /** * drm_gem_vm_open - vma->ops->open implementation for GEM * @vma: VM area structure * * This function implements the #vm_operations_struct open() callback for GEM * drivers. This must be used together with drm_gem_vm_close(). */ void drm_gem_vm_open(struct vm_area_struct *vma) { struct drm_gem_object *obj = vma->vm_private_data; drm_gem_object_get(obj); } EXPORT_SYMBOL(drm_gem_vm_open); /** * drm_gem_vm_close - vma->ops->close implementation for GEM * @vma: VM area structure * * This function implements the #vm_operations_struct close() callback for GEM * drivers. This must be used together with drm_gem_vm_open(). */ void drm_gem_vm_close(struct vm_area_struct *vma) { struct drm_gem_object *obj = vma->vm_private_data; drm_gem_object_put(obj); } EXPORT_SYMBOL(drm_gem_vm_close); /** * drm_gem_mmap_obj - memory map a GEM object * @obj: the GEM object to map * @obj_size: the object size to be mapped, in bytes * @vma: VMA for the area to be mapped * * Set up the VMA to prepare mapping of the GEM object using the GEM object's * vm_ops. Depending on their requirements, GEM objects can either * provide a fault handler in their vm_ops (in which case any accesses to * the object will be trapped, to perform migration, GTT binding, surface * register allocation, or performance monitoring), or mmap the buffer memory * synchronously after calling drm_gem_mmap_obj. * * This function is mainly intended to implement the DMABUF mmap operation, when * the GEM object is not looked up based on its fake offset. To implement the * DRM mmap operation, drivers should use the drm_gem_mmap() function. * * drm_gem_mmap_obj() assumes the user is granted access to the buffer while * drm_gem_mmap() prevents unprivileged users from mapping random objects. So * callers must verify access restrictions before calling this helper. * * Return 0 or success or -EINVAL if the object size is smaller than the VMA * size, or if no vm_ops are provided. */ int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size, struct vm_area_struct *vma) { int ret; /* Check for valid size. */ if (obj_size < vma->vm_end - vma->vm_start) return -EINVAL; /* Take a ref for this mapping of the object, so that the fault * handler can dereference the mmap offset's pointer to the object. * This reference is cleaned up by the corresponding vm_close * (which should happen whether the vma was created by this call, or * by a vm_open due to mremap or partial unmap or whatever). */ drm_gem_object_get(obj); vma->vm_private_data = obj; vma->vm_ops = obj->funcs->vm_ops; if (obj->funcs->mmap) { ret = obj->funcs->mmap(obj, vma); if (ret) goto err_drm_gem_object_put; WARN_ON(!(vma->vm_flags & VM_DONTEXPAND)); } else { if (!vma->vm_ops) { ret = -EINVAL; goto err_drm_gem_object_put; } vm_flags_set(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP); vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot); } return 0; err_drm_gem_object_put: drm_gem_object_put(obj); return ret; } EXPORT_SYMBOL(drm_gem_mmap_obj); /** * drm_gem_mmap - memory map routine for GEM objects * @filp: DRM file pointer * @vma: VMA for the area to be mapped * * If a driver supports GEM object mapping, mmap calls on the DRM file * descriptor will end up here. * * Look up the GEM object based on the offset passed in (vma->vm_pgoff will * contain the fake offset we created when the GTT map ioctl was called on * the object) and map it with a call to drm_gem_mmap_obj(). * * If the caller is not granted access to the buffer object, the mmap will fail * with EACCES. Please see the vma manager for more information. */ int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma) { struct drm_file *priv = filp->private_data; struct drm_device *dev = priv->minor->dev; struct drm_gem_object *obj = NULL; struct drm_vma_offset_node *node; int ret; if (drm_dev_is_unplugged(dev)) return -ENODEV; drm_vma_offset_lock_lookup(dev->vma_offset_manager); node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager, vma->vm_pgoff, vma_pages(vma)); if (likely(node)) { obj = container_of(node, struct drm_gem_object, vma_node); /* * When the object is being freed, after it hits 0-refcnt it * proceeds to tear down the object. In the process it will * attempt to remove the VMA offset and so acquire this * mgr->vm_lock. Therefore if we find an object with a 0-refcnt * that matches our range, we know it is in the process of being * destroyed and will be freed as soon as we release the lock - * so we have to check for the 0-refcnted object and treat it as * invalid. */ if (!kref_get_unless_zero(&obj->refcount)) obj = NULL; } drm_vma_offset_unlock_lookup(dev->vma_offset_manager); if (!obj) return -EINVAL; if (!drm_vma_node_is_allowed(node, priv)) { drm_gem_object_put(obj); return -EACCES; } ret = drm_gem_mmap_obj(obj, drm_vma_node_size(node) << PAGE_SHIFT, vma); drm_gem_object_put(obj); return ret; } EXPORT_SYMBOL(drm_gem_mmap); void drm_gem_print_info(struct drm_printer *p, unsigned int indent, const struct drm_gem_object *obj) { drm_printf_indent(p, indent, "name=%d\n", obj->name); drm_printf_indent(p, indent, "refcount=%u\n", kref_read(&obj->refcount)); drm_printf_indent(p, indent, "start=%08lx\n", drm_vma_node_start(&obj->vma_node)); drm_printf_indent(p, indent, "size=%zu\n", obj->size); drm_printf_indent(p, indent, "imported=%s\n", str_yes_no(obj->import_attach)); if (obj->funcs->print_info) obj->funcs->print_info(p, indent, obj); } int drm_gem_pin_locked(struct drm_gem_object *obj) { if (obj->funcs->pin) return obj->funcs->pin(obj); return 0; } void drm_gem_unpin_locked(struct drm_gem_object *obj) { if (obj->funcs->unpin) obj->funcs->unpin(obj); } int drm_gem_pin(struct drm_gem_object *obj) { int ret; dma_resv_lock(obj->resv, NULL); ret = drm_gem_pin_locked(obj); dma_resv_unlock(obj->resv); return ret; } void drm_gem_unpin(struct drm_gem_object *obj) { dma_resv_lock(obj->resv, NULL); drm_gem_unpin_locked(obj); dma_resv_unlock(obj->resv); } int drm_gem_vmap(struct drm_gem_object *obj, struct iosys_map *map) { int ret; dma_resv_assert_held(obj->resv); if (!obj->funcs->vmap) return -EOPNOTSUPP; ret = obj->funcs->vmap(obj, map); if (ret) return ret; else if (iosys_map_is_null(map)) return -ENOMEM; return 0; } EXPORT_SYMBOL(drm_gem_vmap); void drm_gem_vunmap(struct drm_gem_object *obj, struct iosys_map *map) { dma_resv_assert_held(obj->resv); if (iosys_map_is_null(map)) return; if (obj->funcs->vunmap) obj->funcs->vunmap(obj, map); /* Always set the mapping to NULL. Callers may rely on this. */ iosys_map_clear(map); } EXPORT_SYMBOL(drm_gem_vunmap); void drm_gem_lock(struct drm_gem_object *obj) { dma_resv_lock(obj->resv, NULL); } EXPORT_SYMBOL(drm_gem_lock); void drm_gem_unlock(struct drm_gem_object *obj) { dma_resv_unlock(obj->resv); } EXPORT_SYMBOL(drm_gem_unlock); int drm_gem_vmap_unlocked(struct drm_gem_object *obj, struct iosys_map *map) { int ret; dma_resv_lock(obj->resv, NULL); ret = drm_gem_vmap(obj, map); dma_resv_unlock(obj->resv); return ret; } EXPORT_SYMBOL(drm_gem_vmap_unlocked); void drm_gem_vunmap_unlocked(struct drm_gem_object *obj, struct iosys_map *map) { dma_resv_lock(obj->resv, NULL); drm_gem_vunmap(obj, map); dma_resv_unlock(obj->resv); } EXPORT_SYMBOL(drm_gem_vunmap_unlocked); /** * drm_gem_lock_reservations - Sets up the ww context and acquires * the lock on an array of GEM objects. * * Once you've locked your reservations, you'll want to set up space * for your shared fences (if applicable), submit your job, then * drm_gem_unlock_reservations(). * * @objs: drm_gem_objects to lock * @count: Number of objects in @objs * @acquire_ctx: struct ww_acquire_ctx that will be initialized as * part of tracking this set of locked reservations. */ int drm_gem_lock_reservations(struct drm_gem_object **objs, int count, struct ww_acquire_ctx *acquire_ctx) { int contended = -1; int i, ret; ww_acquire_init(acquire_ctx, &reservation_ww_class); retry: if (contended != -1) { struct drm_gem_object *obj = objs[contended]; ret = dma_resv_lock_slow_interruptible(obj->resv, acquire_ctx); if (ret) { ww_acquire_fini(acquire_ctx); return ret; } } for (i = 0; i < count; i++) { if (i == contended) continue; ret = dma_resv_lock_interruptible(objs[i]->resv, acquire_ctx); if (ret) { int j; for (j = 0; j < i; j++) dma_resv_unlock(objs[j]->resv); if (contended != -1 && contended >= i) dma_resv_unlock(objs[contended]->resv); if (ret == -EDEADLK) { contended = i; goto retry; } ww_acquire_fini(acquire_ctx); return ret; } } ww_acquire_done(acquire_ctx); return 0; } EXPORT_SYMBOL(drm_gem_lock_reservations); void drm_gem_unlock_reservations(struct drm_gem_object **objs, int count, struct ww_acquire_ctx *acquire_ctx) { int i; for (i = 0; i < count; i++) dma_resv_unlock(objs[i]->resv); ww_acquire_fini(acquire_ctx); } EXPORT_SYMBOL(drm_gem_unlock_reservations); /** * drm_gem_lru_init - initialize a LRU * * @lru: The LRU to initialize * @lock: The lock protecting the LRU */ void drm_gem_lru_init(struct drm_gem_lru *lru, struct mutex *lock) { lru->lock = lock; lru->count = 0; INIT_LIST_HEAD(&lru->list); } EXPORT_SYMBOL(drm_gem_lru_init); static void drm_gem_lru_remove_locked(struct drm_gem_object *obj) { obj->lru->count -= obj->size >> PAGE_SHIFT; WARN_ON(obj->lru->count < 0); list_del(&obj->lru_node); obj->lru = NULL; } /** * drm_gem_lru_remove - remove object from whatever LRU it is in * * If the object is currently in any LRU, remove it. * * @obj: The GEM object to remove from current LRU */ void drm_gem_lru_remove(struct drm_gem_object *obj) { struct drm_gem_lru *lru = obj->lru; if (!lru) return; mutex_lock(lru->lock); drm_gem_lru_remove_locked(obj); mutex_unlock(lru->lock); } EXPORT_SYMBOL(drm_gem_lru_remove); /** * drm_gem_lru_move_tail_locked - move the object to the tail of the LRU * * Like &drm_gem_lru_move_tail but lru lock must be held * * @lru: The LRU to move the object into. * @obj: The GEM object to move into this LRU */ void drm_gem_lru_move_tail_locked(struct drm_gem_lru *lru, struct drm_gem_object *obj) { lockdep_assert_held_once(lru->lock); if (obj->lru) drm_gem_lru_remove_locked(obj); lru->count += obj->size >> PAGE_SHIFT; list_add_tail(&obj->lru_node, &lru->list); obj->lru = lru; } EXPORT_SYMBOL(drm_gem_lru_move_tail_locked); /** * drm_gem_lru_move_tail - move the object to the tail of the LRU * * If the object is already in this LRU it will be moved to the * tail. Otherwise it will be removed from whichever other LRU * it is in (if any) and moved into this LRU. * * @lru: The LRU to move the object into. * @obj: The GEM object to move into this LRU */ void drm_gem_lru_move_tail(struct drm_gem_lru *lru, struct drm_gem_object *obj) { mutex_lock(lru->lock); drm_gem_lru_move_tail_locked(lru, obj); mutex_unlock(lru->lock); } EXPORT_SYMBOL(drm_gem_lru_move_tail); /** * drm_gem_lru_scan - helper to implement shrinker.scan_objects * * If the shrink callback succeeds, it is expected that the driver * move the object out of this LRU. * * If the LRU possibly contain active buffers, it is the responsibility * of the shrink callback to check for this (ie. dma_resv_test_signaled()) * or if necessary block until the buffer becomes idle. * * @lru: The LRU to scan * @nr_to_scan: The number of pages to try to reclaim * @remaining: The number of pages left to reclaim, should be initialized by caller * @shrink: Callback to try to shrink/reclaim the object. */ unsigned long drm_gem_lru_scan(struct drm_gem_lru *lru, unsigned int nr_to_scan, unsigned long *remaining, bool (*shrink)(struct drm_gem_object *obj)) { struct drm_gem_lru still_in_lru; struct drm_gem_object *obj; unsigned freed = 0; drm_gem_lru_init(&still_in_lru, lru->lock); mutex_lock(lru->lock); while (freed < nr_to_scan) { obj = list_first_entry_or_null(&lru->list, typeof(*obj), lru_node); if (!obj) break; drm_gem_lru_move_tail_locked(&still_in_lru, obj); /* * If it's in the process of being freed, gem_object->free() * may be blocked on lock waiting to remove it. So just * skip it. */ if (!kref_get_unless_zero(&obj->refcount)) continue; /* * Now that we own a reference, we can drop the lock for the * rest of the loop body, to reduce contention with other * code paths that need the LRU lock */ mutex_unlock(lru->lock); /* * Note that this still needs to be trylock, since we can * hit shrinker in response to trying to get backing pages * for this obj (ie. while it's lock is already held) */ if (!dma_resv_trylock(obj->resv)) { *remaining += obj->size >> PAGE_SHIFT; goto tail; } if (shrink(obj)) { freed += obj->size >> PAGE_SHIFT; /* * If we succeeded in releasing the object's backing * pages, we expect the driver to have moved the object * out of this LRU */ WARN_ON(obj->lru == &still_in_lru); WARN_ON(obj->lru == lru); } dma_resv_unlock(obj->resv); tail: drm_gem_object_put(obj); mutex_lock(lru->lock); } /* * Move objects we've skipped over out of the temporary still_in_lru * back into this LRU */ list_for_each_entry (obj, &still_in_lru.list, lru_node) obj->lru = lru; list_splice_tail(&still_in_lru.list, &lru->list); lru->count += still_in_lru.count; mutex_unlock(lru->lock); return freed; } EXPORT_SYMBOL(drm_gem_lru_scan); /** * drm_gem_evict - helper to evict backing pages for a GEM object * @obj: obj in question */ int drm_gem_evict(struct drm_gem_object *obj) { dma_resv_assert_held(obj->resv); if (!dma_resv_test_signaled(obj->resv, DMA_RESV_USAGE_READ)) return -EBUSY; if (obj->funcs->evict) return obj->funcs->evict(obj); return 0; } EXPORT_SYMBOL(drm_gem_evict);
3 1 1 1 3 3 3 3 3 3 3 3 3 3 8 6 2 4 1 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 // SPDX-License-Identifier: GPL-2.0-or-later /* * USB IBM C-It Video Camera driver * * Supports Xirlink C-It Video Camera, IBM PC Camera, * IBM NetCamera and Veo Stingray. * * Copyright (C) 2010 Hans de Goede <hdegoede@redhat.com> * * This driver is based on earlier work of: * * (C) Copyright 1999 Johannes Erdfelt * (C) Copyright 1999 Randy Dunlap */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #define MODULE_NAME "xirlink-cit" #include <linux/input.h> #include "gspca.h" MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>"); MODULE_DESCRIPTION("Xirlink C-IT"); MODULE_LICENSE("GPL"); /* FIXME we should autodetect this */ static int ibm_netcam_pro; module_param(ibm_netcam_pro, int, 0); MODULE_PARM_DESC(ibm_netcam_pro, "Use IBM Netcamera Pro init sequences for Model 3 cams"); /* FIXME this should be handled through the V4L2 input selection API */ static int rca_input; module_param(rca_input, int, 0644); MODULE_PARM_DESC(rca_input, "Use rca input instead of ccd sensor on Model 3 cams"); /* specific webcam descriptor */ struct sd { struct gspca_dev gspca_dev; /* !! must be the first item */ struct v4l2_ctrl *lighting; u8 model; #define CIT_MODEL0 0 /* bcd version 0.01 cams ie the xvp-500 */ #define CIT_MODEL1 1 /* The model 1 - 4 nomenclature comes from the old */ #define CIT_MODEL2 2 /* ibmcam driver */ #define CIT_MODEL3 3 #define CIT_MODEL4 4 #define CIT_IBM_NETCAM_PRO 5 u8 input_index; u8 button_state; u8 stop_on_control_change; u8 sof_read; u8 sof_len; }; static void sd_stop0(struct gspca_dev *gspca_dev); static const struct v4l2_pix_format cif_yuv_mode[] = { {176, 144, V4L2_PIX_FMT_CIT_YYVYUY, V4L2_FIELD_NONE, .bytesperline = 176, .sizeimage = 176 * 144 * 3 / 2 + 4, .colorspace = V4L2_COLORSPACE_SRGB}, {352, 288, V4L2_PIX_FMT_CIT_YYVYUY, V4L2_FIELD_NONE, .bytesperline = 352, .sizeimage = 352 * 288 * 3 / 2 + 4, .colorspace = V4L2_COLORSPACE_SRGB}, }; static const struct v4l2_pix_format vga_yuv_mode[] = { {160, 120, V4L2_PIX_FMT_CIT_YYVYUY, V4L2_FIELD_NONE, .bytesperline = 160, .sizeimage = 160 * 120 * 3 / 2 + 4, .colorspace = V4L2_COLORSPACE_SRGB}, {320, 240, V4L2_PIX_FMT_CIT_YYVYUY, V4L2_FIELD_NONE, .bytesperline = 320, .sizeimage = 320 * 240 * 3 / 2 + 4, .colorspace = V4L2_COLORSPACE_SRGB}, {640, 480, V4L2_PIX_FMT_CIT_YYVYUY, V4L2_FIELD_NONE, .bytesperline = 640, .sizeimage = 640 * 480 * 3 / 2 + 4, .colorspace = V4L2_COLORSPACE_SRGB}, }; static const struct v4l2_pix_format model0_mode[] = { {160, 120, V4L2_PIX_FMT_CIT_YYVYUY, V4L2_FIELD_NONE, .bytesperline = 160, .sizeimage = 160 * 120 * 3 / 2 + 4, .colorspace = V4L2_COLORSPACE_SRGB}, {176, 144, V4L2_PIX_FMT_CIT_YYVYUY, V4L2_FIELD_NONE, .bytesperline = 176, .sizeimage = 176 * 144 * 3 / 2 + 4, .colorspace = V4L2_COLORSPACE_SRGB}, {320, 240, V4L2_PIX_FMT_CIT_YYVYUY, V4L2_FIELD_NONE, .bytesperline = 320, .sizeimage = 320 * 240 * 3 / 2 + 4, .colorspace = V4L2_COLORSPACE_SRGB}, }; static const struct v4l2_pix_format model2_mode[] = { {160, 120, V4L2_PIX_FMT_CIT_YYVYUY, V4L2_FIELD_NONE, .bytesperline = 160, .sizeimage = 160 * 120 * 3 / 2 + 4, .colorspace = V4L2_COLORSPACE_SRGB}, {176, 144, V4L2_PIX_FMT_CIT_YYVYUY, V4L2_FIELD_NONE, .bytesperline = 176, .sizeimage = 176 * 144 * 3 / 2 + 4, .colorspace = V4L2_COLORSPACE_SRGB}, {320, 240, V4L2_PIX_FMT_SGRBG8, V4L2_FIELD_NONE, .bytesperline = 320, .sizeimage = 320 * 240 + 4, .colorspace = V4L2_COLORSPACE_SRGB}, {352, 288, V4L2_PIX_FMT_SGRBG8, V4L2_FIELD_NONE, .bytesperline = 352, .sizeimage = 352 * 288 + 4, .colorspace = V4L2_COLORSPACE_SRGB}, }; /* * 01.01.08 - Added for RCA video in support -LO * This struct is used to init the Model3 cam to use the RCA video in port * instead of the CCD sensor. */ static const u16 rca_initdata[][3] = { {0, 0x0000, 0x010c}, {0, 0x0006, 0x012c}, {0, 0x0078, 0x012d}, {0, 0x0046, 0x012f}, {0, 0xd141, 0x0124}, {0, 0x0000, 0x0127}, {0, 0xfea8, 0x0124}, {1, 0x0000, 0x0116}, {0, 0x0064, 0x0116}, {1, 0x0000, 0x0115}, {0, 0x0003, 0x0115}, {0, 0x0008, 0x0123}, {0, 0x0000, 0x0117}, {0, 0x0000, 0x0112}, {0, 0x0080, 0x0100}, {0, 0x0000, 0x0100}, {1, 0x0000, 0x0116}, {0, 0x0060, 0x0116}, {0, 0x0002, 0x0112}, {0, 0x0000, 0x0123}, {0, 0x0001, 0x0117}, {0, 0x0040, 0x0108}, {0, 0x0019, 0x012c}, {0, 0x0040, 0x0116}, {0, 0x000a, 0x0115}, {0, 0x000b, 0x0115}, {0, 0x0078, 0x012d}, {0, 0x0046, 0x012f}, {0, 0xd141, 0x0124}, {0, 0x0000, 0x0127}, {0, 0xfea8, 0x0124}, {0, 0x0064, 0x0116}, {0, 0x0000, 0x0115}, {0, 0x0001, 0x0115}, {0, 0xffff, 0x0124}, {0, 0xfff9, 0x0124}, {0, 0x0086, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x00aa, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0000, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0xfffa, 0x0124}, {0, 0xffff, 0x0124}, {0, 0xfff9, 0x0124}, {0, 0x0086, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x00f2, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x000f, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0xfffa, 0x0124}, {0, 0xffff, 0x0124}, {0, 0xfff9, 0x0124}, {0, 0x0086, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x00f8, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x00fc, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0xfffa, 0x0124}, {0, 0xffff, 0x0124}, {0, 0xfff9, 0x0124}, {0, 0x0086, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x00f9, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x003c, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0xfffa, 0x0124}, {0, 0xffff, 0x0124}, {0, 0xfff9, 0x0124}, {0, 0x0086, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0027, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0019, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0xfffa, 0x0124}, {0, 0xfff9, 0x0124}, {0, 0x0086, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0037, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0000, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0021, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0xfffa, 0x0124}, {0, 0xfff9, 0x0124}, {0, 0x0086, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0038, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0006, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0045, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0xfffa, 0x0124}, {0, 0xfff9, 0x0124}, {0, 0x0086, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0037, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0001, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x002a, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0xfffa, 0x0124}, {0, 0xfff9, 0x0124}, {0, 0x0086, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0038, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0000, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x000e, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0xfffa, 0x0124}, {0, 0xfff9, 0x0124}, {0, 0x0086, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0037, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0001, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x002b, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0xfffa, 0x0124}, {0, 0xfff9, 0x0124}, {0, 0x0086, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0038, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0001, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x00f4, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0xfffa, 0x0124}, {0, 0xfff9, 0x0124}, {0, 0x0086, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0037, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0001, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x002c, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0xfffa, 0x0124}, {0, 0xfff9, 0x0124}, {0, 0x0086, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0038, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0001, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0004, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0xfffa, 0x0124}, {0, 0xfff9, 0x0124}, {0, 0x0086, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0037, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0001, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x002d, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0xfffa, 0x0124}, {0, 0xfff9, 0x0124}, {0, 0x0086, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0038, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0000, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0014, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0xfffa, 0x0124}, {0, 0xfff9, 0x0124}, {0, 0x0086, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0037, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0001, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x002e, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0xfffa, 0x0124}, {0, 0xfff9, 0x0124}, {0, 0x0086, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0038, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0003, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0000, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0xfffa, 0x0124}, {0, 0xfff9, 0x0124}, {0, 0x0086, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0037, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0001, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x002f, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0xfffa, 0x0124}, {0, 0xfff9, 0x0124}, {0, 0x0086, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0038, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0003, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0014, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0xfffa, 0x0124}, {0, 0xfff9, 0x0124}, {0, 0x0086, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0037, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0001, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0040, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0xfffa, 0x0124}, {0, 0xfff9, 0x0124}, {0, 0x0086, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0038, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0000, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0040, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0xfffa, 0x0124}, {0, 0xfff9, 0x0124}, {0, 0x0086, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0037, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0001, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0053, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0xfffa, 0x0124}, {0, 0xfff9, 0x0124}, {0, 0x0086, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0038, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0000, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0038, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0xfffa, 0x0124}, {0, 0x0000, 0x0101}, {0, 0x00a0, 0x0103}, {0, 0x0078, 0x0105}, {0, 0x0000, 0x010a}, {0, 0x0024, 0x010b}, {0, 0x0028, 0x0119}, {0, 0x0088, 0x011b}, {0, 0x0002, 0x011d}, {0, 0x0003, 0x011e}, {0, 0x0000, 0x0129}, {0, 0x00fc, 0x012b}, {0, 0x0008, 0x0102}, {0, 0x0000, 0x0104}, {0, 0x0008, 0x011a}, {0, 0x0028, 0x011c}, {0, 0x0021, 0x012a}, {0, 0x0000, 0x0118}, {0, 0x0000, 0x0132}, {0, 0x0000, 0x0109}, {0, 0xfff9, 0x0124}, {0, 0x0086, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0037, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0001, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0031, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0xfffa, 0x0124}, {0, 0xfff9, 0x0124}, {0, 0x0086, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0038, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0000, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0000, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0xfffa, 0x0124}, {0, 0xfff9, 0x0124}, {0, 0x0086, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0037, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0001, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0040, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0xfffa, 0x0124}, {0, 0xfff9, 0x0124}, {0, 0x0086, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0038, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0000, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0040, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0xfffa, 0x0124}, {0, 0xfff9, 0x0124}, {0, 0x0086, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0037, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0000, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x00dc, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0xfffa, 0x0124}, {0, 0xfff9, 0x0124}, {0, 0x0086, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0038, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0000, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0000, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0xfffa, 0x0124}, {0, 0xfff9, 0x0124}, {0, 0x0086, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0037, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0001, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0032, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0xfffa, 0x0124}, {0, 0xfff9, 0x0124}, {0, 0x0086, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0038, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0001, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0020, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0xfffa, 0x0124}, {0, 0xfff9, 0x0124}, {0, 0x0086, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0037, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0001, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0040, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0xfffa, 0x0124}, {0, 0xfff9, 0x0124}, {0, 0x0086, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0038, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0000, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0040, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0xfffa, 0x0124}, {0, 0xfff9, 0x0124}, {0, 0x0086, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0037, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0000, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0030, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0xfffa, 0x0124}, {0, 0xfff9, 0x0124}, {0, 0x0086, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0038, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0008, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0x0000, 0x0127}, {0, 0xfff8, 0x0124}, {0, 0xfffd, 0x0124}, {0, 0xfffa, 0x0124}, {0, 0x0003, 0x0111}, }; /* TESTME the old ibmcam driver repeats certain commands to Model1 cameras, we do the same for now (testing needed to see if this is really necessary) */ static const int cit_model1_ntries = 5; static const int cit_model1_ntries2 = 2; static int cit_write_reg(struct gspca_dev *gspca_dev, u16 value, u16 index) { struct usb_device *udev = gspca_dev->dev; int err; err = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x00, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_ENDPOINT, value, index, NULL, 0, 1000); if (err < 0) pr_err("Failed to write a register (index 0x%04X, value 0x%02X, error %d)\n", index, value, err); return 0; } static int cit_read_reg(struct gspca_dev *gspca_dev, u16 index, int verbose) { struct usb_device *udev = gspca_dev->dev; __u8 *buf = gspca_dev->usb_buf; int res; res = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), 0x01, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_ENDPOINT, 0x00, index, buf, 8, 1000); if (res < 0) { pr_err("Failed to read a register (index 0x%04X, error %d)\n", index, res); return res; } if (verbose) gspca_dbg(gspca_dev, D_PROBE, "Register %04x value: %02x\n", index, buf[0]); return 0; } /* * cit_send_FF_04_02() * * This procedure sends magic 3-command prefix to the camera. * The purpose of this prefix is not known. * * History: * 1/2/00 Created. */ static void cit_send_FF_04_02(struct gspca_dev *gspca_dev) { cit_write_reg(gspca_dev, 0x00FF, 0x0127); cit_write_reg(gspca_dev, 0x0004, 0x0124); cit_write_reg(gspca_dev, 0x0002, 0x0124); } static void cit_send_00_04_06(struct gspca_dev *gspca_dev) { cit_write_reg(gspca_dev, 0x0000, 0x0127); cit_write_reg(gspca_dev, 0x0004, 0x0124); cit_write_reg(gspca_dev, 0x0006, 0x0124); } static void cit_send_x_00(struct gspca_dev *gspca_dev, unsigned short x) { cit_write_reg(gspca_dev, x, 0x0127); cit_write_reg(gspca_dev, 0x0000, 0x0124); } static void cit_send_x_00_05(struct gspca_dev *gspca_dev, unsigned short x) { cit_send_x_00(gspca_dev, x); cit_write_reg(gspca_dev, 0x0005, 0x0124); } static void cit_send_x_00_05_02(struct gspca_dev *gspca_dev, unsigned short x) { cit_write_reg(gspca_dev, x, 0x0127); cit_write_reg(gspca_dev, 0x0000, 0x0124); cit_write_reg(gspca_dev, 0x0005, 0x0124); cit_write_reg(gspca_dev, 0x0002, 0x0124); } static void cit_send_x_01_00_05(struct gspca_dev *gspca_dev, u16 x) { cit_write_reg(gspca_dev, x, 0x0127); cit_write_reg(gspca_dev, 0x0001, 0x0124); cit_write_reg(gspca_dev, 0x0000, 0x0124); cit_write_reg(gspca_dev, 0x0005, 0x0124); } static void cit_send_x_00_05_02_01(struct gspca_dev *gspca_dev, u16 x) { cit_write_reg(gspca_dev, x, 0x0127); cit_write_reg(gspca_dev, 0x0000, 0x0124); cit_write_reg(gspca_dev, 0x0005, 0x0124); cit_write_reg(gspca_dev, 0x0002, 0x0124); cit_write_reg(gspca_dev, 0x0001, 0x0124); } static void cit_send_x_00_05_02_08_01(struct gspca_dev *gspca_dev, u16 x) { cit_write_reg(gspca_dev, x, 0x0127); cit_write_reg(gspca_dev, 0x0000, 0x0124); cit_write_reg(gspca_dev, 0x0005, 0x0124); cit_write_reg(gspca_dev, 0x0002, 0x0124); cit_write_reg(gspca_dev, 0x0008, 0x0124); cit_write_reg(gspca_dev, 0x0001, 0x0124); } static void cit_Packet_Format1(struct gspca_dev *gspca_dev, u16 fkey, u16 val) { cit_send_x_01_00_05(gspca_dev, 0x0088); cit_send_x_00_05(gspca_dev, fkey); cit_send_x_00_05_02_08_01(gspca_dev, val); cit_send_x_00_05(gspca_dev, 0x0088); cit_send_x_00_05_02_01(gspca_dev, fkey); cit_send_x_00_05(gspca_dev, 0x0089); cit_send_x_00(gspca_dev, fkey); cit_send_00_04_06(gspca_dev); cit_read_reg(gspca_dev, 0x0126, 0); cit_send_FF_04_02(gspca_dev); } static void cit_PacketFormat2(struct gspca_dev *gspca_dev, u16 fkey, u16 val) { cit_send_x_01_00_05(gspca_dev, 0x0088); cit_send_x_00_05(gspca_dev, fkey); cit_send_x_00_05_02(gspca_dev, val); } static void cit_model2_Packet2(struct gspca_dev *gspca_dev) { cit_write_reg(gspca_dev, 0x00ff, 0x012d); cit_write_reg(gspca_dev, 0xfea3, 0x0124); } static void cit_model2_Packet1(struct gspca_dev *gspca_dev, u16 v1, u16 v2) { cit_write_reg(gspca_dev, 0x00aa, 0x012d); cit_write_reg(gspca_dev, 0x00ff, 0x012e); cit_write_reg(gspca_dev, v1, 0x012f); cit_write_reg(gspca_dev, 0x00ff, 0x0130); cit_write_reg(gspca_dev, 0xc719, 0x0124); cit_write_reg(gspca_dev, v2, 0x0127); cit_model2_Packet2(gspca_dev); } /* * cit_model3_Packet1() * * 00_0078_012d * 00_0097_012f * 00_d141_0124 * 00_0096_0127 * 00_fea8_0124 */ static void cit_model3_Packet1(struct gspca_dev *gspca_dev, u16 v1, u16 v2) { cit_write_reg(gspca_dev, 0x0078, 0x012d); cit_write_reg(gspca_dev, v1, 0x012f); cit_write_reg(gspca_dev, 0xd141, 0x0124); cit_write_reg(gspca_dev, v2, 0x0127); cit_write_reg(gspca_dev, 0xfea8, 0x0124); } static void cit_model4_Packet1(struct gspca_dev *gspca_dev, u16 v1, u16 v2) { cit_write_reg(gspca_dev, 0x00aa, 0x012d); cit_write_reg(gspca_dev, v1, 0x012f); cit_write_reg(gspca_dev, 0xd141, 0x0124); cit_write_reg(gspca_dev, v2, 0x0127); cit_write_reg(gspca_dev, 0xfea8, 0x0124); } static void cit_model4_BrightnessPacket(struct gspca_dev *gspca_dev, u16 val) { cit_write_reg(gspca_dev, 0x00aa, 0x012d); cit_write_reg(gspca_dev, 0x0026, 0x012f); cit_write_reg(gspca_dev, 0xd141, 0x0124); cit_write_reg(gspca_dev, val, 0x0127); cit_write_reg(gspca_dev, 0x00aa, 0x0130); cit_write_reg(gspca_dev, 0x82a8, 0x0124); cit_write_reg(gspca_dev, 0x0038, 0x012d); cit_write_reg(gspca_dev, 0x0004, 0x012f); cit_write_reg(gspca_dev, 0xd145, 0x0124); cit_write_reg(gspca_dev, 0xfffa, 0x0124); } /* this function is called at probe time */ static int sd_config(struct gspca_dev *gspca_dev, const struct usb_device_id *id) { struct sd *sd = (struct sd *) gspca_dev; struct cam *cam; sd->model = id->driver_info; if (sd->model == CIT_MODEL3 && ibm_netcam_pro) sd->model = CIT_IBM_NETCAM_PRO; cam = &gspca_dev->cam; switch (sd->model) { case CIT_MODEL0: cam->cam_mode = model0_mode; cam->nmodes = ARRAY_SIZE(model0_mode); sd->sof_len = 4; break; case CIT_MODEL1: cam->cam_mode = cif_yuv_mode; cam->nmodes = ARRAY_SIZE(cif_yuv_mode); sd->sof_len = 4; break; case CIT_MODEL2: cam->cam_mode = model2_mode + 1; /* no 160x120 */ cam->nmodes = 3; break; case CIT_MODEL3: cam->cam_mode = vga_yuv_mode; cam->nmodes = ARRAY_SIZE(vga_yuv_mode); sd->stop_on_control_change = 1; sd->sof_len = 4; break; case CIT_MODEL4: cam->cam_mode = model2_mode; cam->nmodes = ARRAY_SIZE(model2_mode); break; case CIT_IBM_NETCAM_PRO: cam->cam_mode = vga_yuv_mode; cam->nmodes = 2; /* no 640 x 480 */ cam->input_flags = V4L2_IN_ST_VFLIP; sd->stop_on_control_change = 1; sd->sof_len = 4; break; } return 0; } static int cit_init_model0(struct gspca_dev *gspca_dev) { cit_write_reg(gspca_dev, 0x0000, 0x0100); /* turn on led */ cit_write_reg(gspca_dev, 0x0001, 0x0112); /* turn on autogain ? */ cit_write_reg(gspca_dev, 0x0000, 0x0400); cit_write_reg(gspca_dev, 0x0001, 0x0400); cit_write_reg(gspca_dev, 0x0000, 0x0420); cit_write_reg(gspca_dev, 0x0001, 0x0420); cit_write_reg(gspca_dev, 0x000d, 0x0409); cit_write_reg(gspca_dev, 0x0002, 0x040a); cit_write_reg(gspca_dev, 0x0018, 0x0405); cit_write_reg(gspca_dev, 0x0008, 0x0435); cit_write_reg(gspca_dev, 0x0026, 0x040b); cit_write_reg(gspca_dev, 0x0007, 0x0437); cit_write_reg(gspca_dev, 0x0015, 0x042f); cit_write_reg(gspca_dev, 0x002b, 0x0439); cit_write_reg(gspca_dev, 0x0026, 0x043a); cit_write_reg(gspca_dev, 0x0008, 0x0438); cit_write_reg(gspca_dev, 0x001e, 0x042b); cit_write_reg(gspca_dev, 0x0041, 0x042c); return 0; } static int cit_init_ibm_netcam_pro(struct gspca_dev *gspca_dev) { cit_read_reg(gspca_dev, 0x128, 1); cit_write_reg(gspca_dev, 0x0003, 0x0133); cit_write_reg(gspca_dev, 0x0000, 0x0117); cit_write_reg(gspca_dev, 0x0008, 0x0123); cit_write_reg(gspca_dev, 0x0000, 0x0100); cit_read_reg(gspca_dev, 0x0116, 0); cit_write_reg(gspca_dev, 0x0060, 0x0116); cit_write_reg(gspca_dev, 0x0002, 0x0112); cit_write_reg(gspca_dev, 0x0000, 0x0133); cit_write_reg(gspca_dev, 0x0000, 0x0123); cit_write_reg(gspca_dev, 0x0001, 0x0117); cit_write_reg(gspca_dev, 0x0040, 0x0108); cit_write_reg(gspca_dev, 0x0019, 0x012c); cit_write_reg(gspca_dev, 0x0060, 0x0116); cit_write_reg(gspca_dev, 0x0002, 0x0115); cit_write_reg(gspca_dev, 0x000b, 0x0115); cit_write_reg(gspca_dev, 0x0078, 0x012d); cit_write_reg(gspca_dev, 0x0001, 0x012f); cit_write_reg(gspca_dev, 0xd141, 0x0124); cit_write_reg(gspca_dev, 0x0079, 0x012d); cit_write_reg(gspca_dev, 0x00ff, 0x0130); cit_write_reg(gspca_dev, 0xcd41, 0x0124); cit_write_reg(gspca_dev, 0xfffa, 0x0124); cit_read_reg(gspca_dev, 0x0126, 1); cit_model3_Packet1(gspca_dev, 0x0000, 0x0000); cit_model3_Packet1(gspca_dev, 0x0000, 0x0001); cit_model3_Packet1(gspca_dev, 0x000b, 0x0000); cit_model3_Packet1(gspca_dev, 0x000c, 0x0008); cit_model3_Packet1(gspca_dev, 0x000d, 0x003a); cit_model3_Packet1(gspca_dev, 0x000e, 0x0060); cit_model3_Packet1(gspca_dev, 0x000f, 0x0060); cit_model3_Packet1(gspca_dev, 0x0010, 0x0008); cit_model3_Packet1(gspca_dev, 0x0011, 0x0004); cit_model3_Packet1(gspca_dev, 0x0012, 0x0028); cit_model3_Packet1(gspca_dev, 0x0013, 0x0002); cit_model3_Packet1(gspca_dev, 0x0014, 0x0000); cit_model3_Packet1(gspca_dev, 0x0015, 0x00fb); cit_model3_Packet1(gspca_dev, 0x0016, 0x0002); cit_model3_Packet1(gspca_dev, 0x0017, 0x0037); cit_model3_Packet1(gspca_dev, 0x0018, 0x0036); cit_model3_Packet1(gspca_dev, 0x001e, 0x0000); cit_model3_Packet1(gspca_dev, 0x001f, 0x0008); cit_model3_Packet1(gspca_dev, 0x0020, 0x00c1); cit_model3_Packet1(gspca_dev, 0x0021, 0x0034); cit_model3_Packet1(gspca_dev, 0x0022, 0x0034); cit_model3_Packet1(gspca_dev, 0x0025, 0x0002); cit_model3_Packet1(gspca_dev, 0x0028, 0x0022); cit_model3_Packet1(gspca_dev, 0x0029, 0x000a); cit_model3_Packet1(gspca_dev, 0x002b, 0x0000); cit_model3_Packet1(gspca_dev, 0x002c, 0x0000); cit_model3_Packet1(gspca_dev, 0x002d, 0x00ff); cit_model3_Packet1(gspca_dev, 0x002e, 0x00ff); cit_model3_Packet1(gspca_dev, 0x002f, 0x00ff); cit_model3_Packet1(gspca_dev, 0x0030, 0x00ff); cit_model3_Packet1(gspca_dev, 0x0031, 0x00ff); cit_model3_Packet1(gspca_dev, 0x0032, 0x0007); cit_model3_Packet1(gspca_dev, 0x0033, 0x0005); cit_model3_Packet1(gspca_dev, 0x0037, 0x0040); cit_model3_Packet1(gspca_dev, 0x0039, 0x0000); cit_model3_Packet1(gspca_dev, 0x003a, 0x0000); cit_model3_Packet1(gspca_dev, 0x003b, 0x0001); cit_model3_Packet1(gspca_dev, 0x003c, 0x0000); cit_model3_Packet1(gspca_dev, 0x0040, 0x000c); cit_model3_Packet1(gspca_dev, 0x0041, 0x00fb); cit_model3_Packet1(gspca_dev, 0x0042, 0x0002); cit_model3_Packet1(gspca_dev, 0x0043, 0x0000); cit_model3_Packet1(gspca_dev, 0x0045, 0x0000); cit_model3_Packet1(gspca_dev, 0x0046, 0x0000); cit_model3_Packet1(gspca_dev, 0x0047, 0x0000); cit_model3_Packet1(gspca_dev, 0x0048, 0x0000); cit_model3_Packet1(gspca_dev, 0x0049, 0x0000); cit_model3_Packet1(gspca_dev, 0x004a, 0x00ff); cit_model3_Packet1(gspca_dev, 0x004b, 0x00ff); cit_model3_Packet1(gspca_dev, 0x004c, 0x00ff); cit_model3_Packet1(gspca_dev, 0x004f, 0x0000); cit_model3_Packet1(gspca_dev, 0x0050, 0x0000); cit_model3_Packet1(gspca_dev, 0x0051, 0x0002); cit_model3_Packet1(gspca_dev, 0x0055, 0x0000); cit_model3_Packet1(gspca_dev, 0x0056, 0x0000); cit_model3_Packet1(gspca_dev, 0x0057, 0x0000); cit_model3_Packet1(gspca_dev, 0x0058, 0x0002); cit_model3_Packet1(gspca_dev, 0x0059, 0x0000); cit_model3_Packet1(gspca_dev, 0x005c, 0x0016); cit_model3_Packet1(gspca_dev, 0x005d, 0x0022); cit_model3_Packet1(gspca_dev, 0x005e, 0x003c); cit_model3_Packet1(gspca_dev, 0x005f, 0x0050); cit_model3_Packet1(gspca_dev, 0x0060, 0x0044); cit_model3_Packet1(gspca_dev, 0x0061, 0x0005); cit_model3_Packet1(gspca_dev, 0x006a, 0x007e); cit_model3_Packet1(gspca_dev, 0x006f, 0x0000); cit_model3_Packet1(gspca_dev, 0x0072, 0x001b); cit_model3_Packet1(gspca_dev, 0x0073, 0x0005); cit_model3_Packet1(gspca_dev, 0x0074, 0x000a); cit_model3_Packet1(gspca_dev, 0x0075, 0x001b); cit_model3_Packet1(gspca_dev, 0x0076, 0x002a); cit_model3_Packet1(gspca_dev, 0x0077, 0x003c); cit_model3_Packet1(gspca_dev, 0x0078, 0x0050); cit_model3_Packet1(gspca_dev, 0x007b, 0x0000); cit_model3_Packet1(gspca_dev, 0x007c, 0x0011); cit_model3_Packet1(gspca_dev, 0x007d, 0x0024); cit_model3_Packet1(gspca_dev, 0x007e, 0x0043); cit_model3_Packet1(gspca_dev, 0x007f, 0x005a); cit_model3_Packet1(gspca_dev, 0x0084, 0x0020); cit_model3_Packet1(gspca_dev, 0x0085, 0x0033); cit_model3_Packet1(gspca_dev, 0x0086, 0x000a); cit_model3_Packet1(gspca_dev, 0x0087, 0x0030); cit_model3_Packet1(gspca_dev, 0x0088, 0x0070); cit_model3_Packet1(gspca_dev, 0x008b, 0x0008); cit_model3_Packet1(gspca_dev, 0x008f, 0x0000); cit_model3_Packet1(gspca_dev, 0x0090, 0x0006); cit_model3_Packet1(gspca_dev, 0x0091, 0x0028); cit_model3_Packet1(gspca_dev, 0x0092, 0x005a); cit_model3_Packet1(gspca_dev, 0x0093, 0x0082); cit_model3_Packet1(gspca_dev, 0x0096, 0x0014); cit_model3_Packet1(gspca_dev, 0x0097, 0x0020); cit_model3_Packet1(gspca_dev, 0x0098, 0x0000); cit_model3_Packet1(gspca_dev, 0x00b0, 0x0046); cit_model3_Packet1(gspca_dev, 0x00b1, 0x0000); cit_model3_Packet1(gspca_dev, 0x00b2, 0x0000); cit_model3_Packet1(gspca_dev, 0x00b3, 0x0004); cit_model3_Packet1(gspca_dev, 0x00b4, 0x0007); cit_model3_Packet1(gspca_dev, 0x00b6, 0x0002); cit_model3_Packet1(gspca_dev, 0x00b7, 0x0004); cit_model3_Packet1(gspca_dev, 0x00bb, 0x0000); cit_model3_Packet1(gspca_dev, 0x00bc, 0x0001); cit_model3_Packet1(gspca_dev, 0x00bd, 0x0000); cit_model3_Packet1(gspca_dev, 0x00bf, 0x0000); cit_model3_Packet1(gspca_dev, 0x00c0, 0x00c8); cit_model3_Packet1(gspca_dev, 0x00c1, 0x0014); cit_model3_Packet1(gspca_dev, 0x00c2, 0x0001); cit_model3_Packet1(gspca_dev, 0x00c3, 0x0000); cit_model3_Packet1(gspca_dev, 0x00c4, 0x0004); cit_model3_Packet1(gspca_dev, 0x00cb, 0x00bf); cit_model3_Packet1(gspca_dev, 0x00cc, 0x00bf); cit_model3_Packet1(gspca_dev, 0x00cd, 0x00bf); cit_model3_Packet1(gspca_dev, 0x00ce, 0x0000); cit_model3_Packet1(gspca_dev, 0x00cf, 0x0020); cit_model3_Packet1(gspca_dev, 0x00d0, 0x0040); cit_model3_Packet1(gspca_dev, 0x00d1, 0x00bf); cit_model3_Packet1(gspca_dev, 0x00d1, 0x00bf); cit_model3_Packet1(gspca_dev, 0x00d2, 0x00bf); cit_model3_Packet1(gspca_dev, 0x00d3, 0x00bf); cit_model3_Packet1(gspca_dev, 0x00ea, 0x0008); cit_model3_Packet1(gspca_dev, 0x00eb, 0x0000); cit_model3_Packet1(gspca_dev, 0x00ec, 0x00e8); cit_model3_Packet1(gspca_dev, 0x00ed, 0x0001); cit_model3_Packet1(gspca_dev, 0x00ef, 0x0022); cit_model3_Packet1(gspca_dev, 0x00f0, 0x0000); cit_model3_Packet1(gspca_dev, 0x00f2, 0x0028); cit_model3_Packet1(gspca_dev, 0x00f4, 0x0002); cit_model3_Packet1(gspca_dev, 0x00f5, 0x0000); cit_model3_Packet1(gspca_dev, 0x00fa, 0x0000); cit_model3_Packet1(gspca_dev, 0x00fb, 0x0001); cit_model3_Packet1(gspca_dev, 0x00fc, 0x0000); cit_model3_Packet1(gspca_dev, 0x00fd, 0x0000); cit_model3_Packet1(gspca_dev, 0x00fe, 0x0000); cit_model3_Packet1(gspca_dev, 0x00ff, 0x0000); cit_model3_Packet1(gspca_dev, 0x00be, 0x0003); cit_model3_Packet1(gspca_dev, 0x00c8, 0x0000); cit_model3_Packet1(gspca_dev, 0x00c9, 0x0020); cit_model3_Packet1(gspca_dev, 0x00ca, 0x0040); cit_model3_Packet1(gspca_dev, 0x0053, 0x0001); cit_model3_Packet1(gspca_dev, 0x0082, 0x000e); cit_model3_Packet1(gspca_dev, 0x0083, 0x0020); cit_model3_Packet1(gspca_dev, 0x0034, 0x003c); cit_model3_Packet1(gspca_dev, 0x006e, 0x0055); cit_model3_Packet1(gspca_dev, 0x0062, 0x0005); cit_model3_Packet1(gspca_dev, 0x0063, 0x0008); cit_model3_Packet1(gspca_dev, 0x0066, 0x000a); cit_model3_Packet1(gspca_dev, 0x0067, 0x0006); cit_model3_Packet1(gspca_dev, 0x006b, 0x0010); cit_model3_Packet1(gspca_dev, 0x005a, 0x0001); cit_model3_Packet1(gspca_dev, 0x005b, 0x000a); cit_model3_Packet1(gspca_dev, 0x0023, 0x0006); cit_model3_Packet1(gspca_dev, 0x0026, 0x0004); cit_model3_Packet1(gspca_dev, 0x0036, 0x0069); cit_model3_Packet1(gspca_dev, 0x0038, 0x0064); cit_model3_Packet1(gspca_dev, 0x003d, 0x0003); cit_model3_Packet1(gspca_dev, 0x003e, 0x0001); cit_model3_Packet1(gspca_dev, 0x00b8, 0x0014); cit_model3_Packet1(gspca_dev, 0x00b9, 0x0014); cit_model3_Packet1(gspca_dev, 0x00e6, 0x0004); cit_model3_Packet1(gspca_dev, 0x00e8, 0x0001); return 0; } /* this function is called at probe and resume time */ static int sd_init(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; switch (sd->model) { case CIT_MODEL0: cit_init_model0(gspca_dev); sd_stop0(gspca_dev); break; case CIT_MODEL1: case CIT_MODEL2: case CIT_MODEL3: case CIT_MODEL4: break; /* All is done in sd_start */ case CIT_IBM_NETCAM_PRO: cit_init_ibm_netcam_pro(gspca_dev); sd_stop0(gspca_dev); break; } return 0; } static int cit_set_brightness(struct gspca_dev *gspca_dev, s32 val) { struct sd *sd = (struct sd *) gspca_dev; int i; switch (sd->model) { case CIT_MODEL0: case CIT_IBM_NETCAM_PRO: /* No (known) brightness control for these */ break; case CIT_MODEL1: /* Model 1: Brightness range 0 - 63 */ cit_Packet_Format1(gspca_dev, 0x0031, val); cit_Packet_Format1(gspca_dev, 0x0032, val); cit_Packet_Format1(gspca_dev, 0x0033, val); break; case CIT_MODEL2: /* Model 2: Brightness range 0x60 - 0xee */ /* Scale 0 - 63 to 0x60 - 0xee */ i = 0x60 + val * 2254 / 1000; cit_model2_Packet1(gspca_dev, 0x001a, i); break; case CIT_MODEL3: /* Model 3: Brightness range 'i' in [0x0C..0x3F] */ i = val; if (i < 0x0c) i = 0x0c; cit_model3_Packet1(gspca_dev, 0x0036, i); break; case CIT_MODEL4: /* Model 4: Brightness range 'i' in [0x04..0xb4] */ /* Scale 0 - 63 to 0x04 - 0xb4 */ i = 0x04 + val * 2794 / 1000; cit_model4_BrightnessPacket(gspca_dev, i); break; } return 0; } static int cit_set_contrast(struct gspca_dev *gspca_dev, s32 val) { struct sd *sd = (struct sd *) gspca_dev; switch (sd->model) { case CIT_MODEL0: { int i; /* gain 0-15, 0-20 -> 0-15 */ i = val * 1000 / 1333; cit_write_reg(gspca_dev, i, 0x0422); /* gain 0-31, may not be lower then 0x0422, 0-20 -> 0-31 */ i = val * 2000 / 1333; cit_write_reg(gspca_dev, i, 0x0423); /* gain 0-127, may not be lower then 0x0423, 0-20 -> 0-63 */ i = val * 4000 / 1333; cit_write_reg(gspca_dev, i, 0x0424); /* gain 0-127, may not be lower then 0x0424, , 0-20 -> 0-127 */ i = val * 8000 / 1333; cit_write_reg(gspca_dev, i, 0x0425); break; } case CIT_MODEL2: case CIT_MODEL4: /* These models do not have this control. */ break; case CIT_MODEL1: { /* Scale 0 - 20 to 15 - 0 */ int i, new_contrast = (20 - val) * 1000 / 1333; for (i = 0; i < cit_model1_ntries; i++) { cit_Packet_Format1(gspca_dev, 0x0014, new_contrast); cit_send_FF_04_02(gspca_dev); } break; } case CIT_MODEL3: { /* Preset hardware values */ static const struct { unsigned short cv1; unsigned short cv2; unsigned short cv3; } cv[7] = { { 0x05, 0x05, 0x0f }, /* Minimum */ { 0x04, 0x04, 0x16 }, { 0x02, 0x03, 0x16 }, { 0x02, 0x08, 0x16 }, { 0x01, 0x0c, 0x16 }, { 0x01, 0x0e, 0x16 }, { 0x01, 0x10, 0x16 } /* Maximum */ }; int i = val / 3; cit_model3_Packet1(gspca_dev, 0x0067, cv[i].cv1); cit_model3_Packet1(gspca_dev, 0x005b, cv[i].cv2); cit_model3_Packet1(gspca_dev, 0x005c, cv[i].cv3); break; } case CIT_IBM_NETCAM_PRO: cit_model3_Packet1(gspca_dev, 0x005b, val + 1); break; } return 0; } static int cit_set_hue(struct gspca_dev *gspca_dev, s32 val) { struct sd *sd = (struct sd *) gspca_dev; switch (sd->model) { case CIT_MODEL0: case CIT_MODEL1: case CIT_IBM_NETCAM_PRO: /* No hue control for these models */ break; case CIT_MODEL2: cit_model2_Packet1(gspca_dev, 0x0024, val); /* cit_model2_Packet1(gspca_dev, 0x0020, sat); */ break; case CIT_MODEL3: { /* Model 3: Brightness range 'i' in [0x05..0x37] */ /* TESTME according to the ibmcam driver this does not work */ if (0) { /* Scale 0 - 127 to 0x05 - 0x37 */ int i = 0x05 + val * 1000 / 2540; cit_model3_Packet1(gspca_dev, 0x007e, i); } break; } case CIT_MODEL4: /* HDG: taken from ibmcam, setting the color gains does not * really belong here. * * I am not sure r/g/b_gain variables exactly control gain * of those channels. Most likely they subtly change some * very internal image processing settings in the camera. * In any case, here is what they do, and feel free to tweak: * * r_gain: seriously affects red gain * g_gain: seriously affects green gain * b_gain: seriously affects blue gain * hue: changes average color from violet (0) to red (0xFF) */ cit_write_reg(gspca_dev, 0x00aa, 0x012d); cit_write_reg(gspca_dev, 0x001e, 0x012f); cit_write_reg(gspca_dev, 0xd141, 0x0124); cit_write_reg(gspca_dev, 160, 0x0127); /* Green gain */ cit_write_reg(gspca_dev, 160, 0x012e); /* Red gain */ cit_write_reg(gspca_dev, 160, 0x0130); /* Blue gain */ cit_write_reg(gspca_dev, 0x8a28, 0x0124); cit_write_reg(gspca_dev, val, 0x012d); /* Hue */ cit_write_reg(gspca_dev, 0xf545, 0x0124); break; } return 0; } static int cit_set_sharpness(struct gspca_dev *gspca_dev, s32 val) { struct sd *sd = (struct sd *) gspca_dev; switch (sd->model) { case CIT_MODEL0: case CIT_MODEL2: case CIT_MODEL4: case CIT_IBM_NETCAM_PRO: /* These models do not have this control */ break; case CIT_MODEL1: { int i; static const unsigned short sa[] = { 0x11, 0x13, 0x16, 0x18, 0x1a, 0x8, 0x0a }; for (i = 0; i < cit_model1_ntries; i++) cit_PacketFormat2(gspca_dev, 0x0013, sa[val]); break; } case CIT_MODEL3: { /* * "Use a table of magic numbers. * This setting doesn't really change much. * But that's how Windows does it." */ static const struct { unsigned short sv1; unsigned short sv2; unsigned short sv3; unsigned short sv4; } sv[7] = { { 0x00, 0x00, 0x05, 0x14 }, /* Smoothest */ { 0x01, 0x04, 0x05, 0x14 }, { 0x02, 0x04, 0x05, 0x14 }, { 0x03, 0x04, 0x05, 0x14 }, { 0x03, 0x05, 0x05, 0x14 }, { 0x03, 0x06, 0x05, 0x14 }, { 0x03, 0x07, 0x05, 0x14 } /* Sharpest */ }; cit_model3_Packet1(gspca_dev, 0x0060, sv[val].sv1); cit_model3_Packet1(gspca_dev, 0x0061, sv[val].sv2); cit_model3_Packet1(gspca_dev, 0x0062, sv[val].sv3); cit_model3_Packet1(gspca_dev, 0x0063, sv[val].sv4); break; } } return 0; } /* * cit_set_lighting() * * Camera model 1: * We have 3 levels of lighting conditions: 0=Bright, 1=Medium, 2=Low. * * Camera model 2: * We have 16 levels of lighting, 0 for bright light and up to 15 for * low light. But values above 5 or so are useless because camera is * not really capable to produce anything worth viewing at such light. * This setting may be altered only in certain camera state. * * Low lighting forces slower FPS. * * History: * 1/5/00 Created. * 2/20/00 Added support for Model 2 cameras. */ static void cit_set_lighting(struct gspca_dev *gspca_dev, s32 val) { struct sd *sd = (struct sd *) gspca_dev; switch (sd->model) { case CIT_MODEL0: case CIT_MODEL2: case CIT_MODEL3: case CIT_MODEL4: case CIT_IBM_NETCAM_PRO: break; case CIT_MODEL1: { int i; for (i = 0; i < cit_model1_ntries; i++) cit_Packet_Format1(gspca_dev, 0x0027, val); break; } } } static void cit_set_hflip(struct gspca_dev *gspca_dev, s32 val) { struct sd *sd = (struct sd *) gspca_dev; switch (sd->model) { case CIT_MODEL0: if (val) cit_write_reg(gspca_dev, 0x0020, 0x0115); else cit_write_reg(gspca_dev, 0x0040, 0x0115); break; case CIT_MODEL1: case CIT_MODEL2: case CIT_MODEL3: case CIT_MODEL4: case CIT_IBM_NETCAM_PRO: break; } } static int cit_restart_stream(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; switch (sd->model) { case CIT_MODEL0: case CIT_MODEL1: cit_write_reg(gspca_dev, 0x0001, 0x0114); fallthrough; case CIT_MODEL2: case CIT_MODEL4: cit_write_reg(gspca_dev, 0x00c0, 0x010c); /* Go! */ usb_clear_halt(gspca_dev->dev, gspca_dev->urb[0]->pipe); break; case CIT_MODEL3: case CIT_IBM_NETCAM_PRO: cit_write_reg(gspca_dev, 0x0001, 0x0114); cit_write_reg(gspca_dev, 0x00c0, 0x010c); /* Go! */ usb_clear_halt(gspca_dev->dev, gspca_dev->urb[0]->pipe); /* Clear button events from while we were not streaming */ cit_write_reg(gspca_dev, 0x0001, 0x0113); break; } sd->sof_read = 0; return 0; } static int cit_get_packet_size(struct gspca_dev *gspca_dev) { struct usb_host_interface *alt; struct usb_interface *intf; intf = usb_ifnum_to_if(gspca_dev->dev, gspca_dev->iface); alt = usb_altnum_to_altsetting(intf, gspca_dev->alt); if (!alt) { pr_err("Couldn't get altsetting\n"); return -EIO; } if (alt->desc.bNumEndpoints < 1) return -ENODEV; return le16_to_cpu(alt->endpoint[0].desc.wMaxPacketSize); } /* Calculate the clockdiv giving us max fps given the available bandwidth */ static int cit_get_clock_div(struct gspca_dev *gspca_dev) { int clock_div = 7; /* 0=30 1=25 2=20 3=15 4=12 5=7.5 6=6 7=3fps ?? */ int fps[8] = { 30, 25, 20, 15, 12, 8, 6, 3 }; int packet_size; packet_size = cit_get_packet_size(gspca_dev); if (packet_size < 0) return packet_size; while (clock_div > 3 && 1000 * packet_size > gspca_dev->pixfmt.width * gspca_dev->pixfmt.height * fps[clock_div - 1] * 3 / 2) clock_div--; gspca_dbg(gspca_dev, D_PROBE, "PacketSize: %d, res: %dx%d -> using clockdiv: %d (%d fps)\n", packet_size, gspca_dev->pixfmt.width, gspca_dev->pixfmt.height, clock_div, fps[clock_div]); return clock_div; } static int cit_start_model0(struct gspca_dev *gspca_dev) { const unsigned short compression = 0; /* 0=none, 7=best frame rate */ int clock_div; clock_div = cit_get_clock_div(gspca_dev); if (clock_div < 0) return clock_div; cit_write_reg(gspca_dev, 0x0000, 0x0100); /* turn on led */ cit_write_reg(gspca_dev, 0x0003, 0x0438); cit_write_reg(gspca_dev, 0x001e, 0x042b); cit_write_reg(gspca_dev, 0x0041, 0x042c); cit_write_reg(gspca_dev, 0x0008, 0x0436); cit_write_reg(gspca_dev, 0x0024, 0x0403); cit_write_reg(gspca_dev, 0x002c, 0x0404); cit_write_reg(gspca_dev, 0x0002, 0x0426); cit_write_reg(gspca_dev, 0x0014, 0x0427); switch (gspca_dev->pixfmt.width) { case 160: /* 160x120 */ cit_write_reg(gspca_dev, 0x0004, 0x010b); cit_write_reg(gspca_dev, 0x0001, 0x010a); cit_write_reg(gspca_dev, 0x0010, 0x0102); cit_write_reg(gspca_dev, 0x00a0, 0x0103); cit_write_reg(gspca_dev, 0x0000, 0x0104); cit_write_reg(gspca_dev, 0x0078, 0x0105); break; case 176: /* 176x144 */ cit_write_reg(gspca_dev, 0x0006, 0x010b); cit_write_reg(gspca_dev, 0x0000, 0x010a); cit_write_reg(gspca_dev, 0x0005, 0x0102); cit_write_reg(gspca_dev, 0x00b0, 0x0103); cit_write_reg(gspca_dev, 0x0000, 0x0104); cit_write_reg(gspca_dev, 0x0090, 0x0105); break; case 320: /* 320x240 */ cit_write_reg(gspca_dev, 0x0008, 0x010b); cit_write_reg(gspca_dev, 0x0004, 0x010a); cit_write_reg(gspca_dev, 0x0005, 0x0102); cit_write_reg(gspca_dev, 0x00a0, 0x0103); cit_write_reg(gspca_dev, 0x0010, 0x0104); cit_write_reg(gspca_dev, 0x0078, 0x0105); break; } cit_write_reg(gspca_dev, compression, 0x0109); cit_write_reg(gspca_dev, clock_div, 0x0111); return 0; } static int cit_start_model1(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; int i, clock_div; clock_div = cit_get_clock_div(gspca_dev); if (clock_div < 0) return clock_div; cit_read_reg(gspca_dev, 0x0128, 1); cit_read_reg(gspca_dev, 0x0100, 0); cit_write_reg(gspca_dev, 0x01, 0x0100); /* LED On */ cit_read_reg(gspca_dev, 0x0100, 0); cit_write_reg(gspca_dev, 0x81, 0x0100); /* LED Off */ cit_read_reg(gspca_dev, 0x0100, 0); cit_write_reg(gspca_dev, 0x01, 0x0100); /* LED On */ cit_write_reg(gspca_dev, 0x01, 0x0108); cit_write_reg(gspca_dev, 0x03, 0x0112); cit_read_reg(gspca_dev, 0x0115, 0); cit_write_reg(gspca_dev, 0x06, 0x0115); cit_read_reg(gspca_dev, 0x0116, 0); cit_write_reg(gspca_dev, 0x44, 0x0116); cit_read_reg(gspca_dev, 0x0116, 0); cit_write_reg(gspca_dev, 0x40, 0x0116); cit_read_reg(gspca_dev, 0x0115, 0); cit_write_reg(gspca_dev, 0x0e, 0x0115); cit_write_reg(gspca_dev, 0x19, 0x012c); cit_Packet_Format1(gspca_dev, 0x00, 0x1e); cit_Packet_Format1(gspca_dev, 0x39, 0x0d); cit_Packet_Format1(gspca_dev, 0x39, 0x09); cit_Packet_Format1(gspca_dev, 0x3b, 0x00); cit_Packet_Format1(gspca_dev, 0x28, 0x22); cit_Packet_Format1(gspca_dev, 0x27, 0x00); cit_Packet_Format1(gspca_dev, 0x2b, 0x1f); cit_Packet_Format1(gspca_dev, 0x39, 0x08); for (i = 0; i < cit_model1_ntries; i++) cit_Packet_Format1(gspca_dev, 0x2c, 0x00); for (i = 0; i < cit_model1_ntries; i++) cit_Packet_Format1(gspca_dev, 0x30, 0x14); cit_PacketFormat2(gspca_dev, 0x39, 0x02); cit_PacketFormat2(gspca_dev, 0x01, 0xe1); cit_PacketFormat2(gspca_dev, 0x02, 0xcd); cit_PacketFormat2(gspca_dev, 0x03, 0xcd); cit_PacketFormat2(gspca_dev, 0x04, 0xfa); cit_PacketFormat2(gspca_dev, 0x3f, 0xff); cit_PacketFormat2(gspca_dev, 0x39, 0x00); cit_PacketFormat2(gspca_dev, 0x39, 0x02); cit_PacketFormat2(gspca_dev, 0x0a, 0x37); cit_PacketFormat2(gspca_dev, 0x0b, 0xb8); cit_PacketFormat2(gspca_dev, 0x0c, 0xf3); cit_PacketFormat2(gspca_dev, 0x0d, 0xe3); cit_PacketFormat2(gspca_dev, 0x0e, 0x0d); cit_PacketFormat2(gspca_dev, 0x0f, 0xf2); cit_PacketFormat2(gspca_dev, 0x10, 0xd5); cit_PacketFormat2(gspca_dev, 0x11, 0xba); cit_PacketFormat2(gspca_dev, 0x12, 0x53); cit_PacketFormat2(gspca_dev, 0x3f, 0xff); cit_PacketFormat2(gspca_dev, 0x39, 0x00); cit_PacketFormat2(gspca_dev, 0x39, 0x02); cit_PacketFormat2(gspca_dev, 0x16, 0x00); cit_PacketFormat2(gspca_dev, 0x17, 0x28); cit_PacketFormat2(gspca_dev, 0x18, 0x7d); cit_PacketFormat2(gspca_dev, 0x19, 0xbe); cit_PacketFormat2(gspca_dev, 0x3f, 0xff); cit_PacketFormat2(gspca_dev, 0x39, 0x00); for (i = 0; i < cit_model1_ntries; i++) cit_Packet_Format1(gspca_dev, 0x00, 0x18); for (i = 0; i < cit_model1_ntries; i++) cit_Packet_Format1(gspca_dev, 0x13, 0x18); for (i = 0; i < cit_model1_ntries; i++) cit_Packet_Format1(gspca_dev, 0x14, 0x06); /* TESTME These are handled through controls KEEP until someone can test leaving this out is ok */ if (0) { /* This is default brightness */ for (i = 0; i < cit_model1_ntries; i++) cit_Packet_Format1(gspca_dev, 0x31, 0x37); for (i = 0; i < cit_model1_ntries; i++) cit_Packet_Format1(gspca_dev, 0x32, 0x46); for (i = 0; i < cit_model1_ntries; i++) cit_Packet_Format1(gspca_dev, 0x33, 0x55); } cit_Packet_Format1(gspca_dev, 0x2e, 0x04); for (i = 0; i < cit_model1_ntries; i++) cit_Packet_Format1(gspca_dev, 0x2d, 0x04); for (i = 0; i < cit_model1_ntries; i++) cit_Packet_Format1(gspca_dev, 0x29, 0x80); cit_Packet_Format1(gspca_dev, 0x2c, 0x01); cit_Packet_Format1(gspca_dev, 0x30, 0x17); cit_Packet_Format1(gspca_dev, 0x39, 0x08); for (i = 0; i < cit_model1_ntries; i++) cit_Packet_Format1(gspca_dev, 0x34, 0x00); cit_write_reg(gspca_dev, 0x00, 0x0101); cit_write_reg(gspca_dev, 0x00, 0x010a); switch (gspca_dev->pixfmt.width) { case 128: /* 128x96 */ cit_write_reg(gspca_dev, 0x80, 0x0103); cit_write_reg(gspca_dev, 0x60, 0x0105); cit_write_reg(gspca_dev, 0x0c, 0x010b); cit_write_reg(gspca_dev, 0x04, 0x011b); /* Same everywhere */ cit_write_reg(gspca_dev, 0x0b, 0x011d); cit_write_reg(gspca_dev, 0x00, 0x011e); /* Same everywhere */ cit_write_reg(gspca_dev, 0x00, 0x0129); break; case 176: /* 176x144 */ cit_write_reg(gspca_dev, 0xb0, 0x0103); cit_write_reg(gspca_dev, 0x8f, 0x0105); cit_write_reg(gspca_dev, 0x06, 0x010b); cit_write_reg(gspca_dev, 0x04, 0x011b); /* Same everywhere */ cit_write_reg(gspca_dev, 0x0d, 0x011d); cit_write_reg(gspca_dev, 0x00, 0x011e); /* Same everywhere */ cit_write_reg(gspca_dev, 0x03, 0x0129); break; case 352: /* 352x288 */ cit_write_reg(gspca_dev, 0xb0, 0x0103); cit_write_reg(gspca_dev, 0x90, 0x0105); cit_write_reg(gspca_dev, 0x02, 0x010b); cit_write_reg(gspca_dev, 0x04, 0x011b); /* Same everywhere */ cit_write_reg(gspca_dev, 0x05, 0x011d); cit_write_reg(gspca_dev, 0x00, 0x011e); /* Same everywhere */ cit_write_reg(gspca_dev, 0x00, 0x0129); break; } cit_write_reg(gspca_dev, 0xff, 0x012b); /* TESTME These are handled through controls KEEP until someone can test leaving this out is ok */ if (0) { /* This is another brightness - don't know why */ for (i = 0; i < cit_model1_ntries; i++) cit_Packet_Format1(gspca_dev, 0x31, 0xc3); for (i = 0; i < cit_model1_ntries; i++) cit_Packet_Format1(gspca_dev, 0x32, 0xd2); for (i = 0; i < cit_model1_ntries; i++) cit_Packet_Format1(gspca_dev, 0x33, 0xe1); /* Default contrast */ for (i = 0; i < cit_model1_ntries; i++) cit_Packet_Format1(gspca_dev, 0x14, 0x0a); /* Default sharpness */ for (i = 0; i < cit_model1_ntries2; i++) cit_PacketFormat2(gspca_dev, 0x13, 0x1a); /* Default lighting conditions */ cit_Packet_Format1(gspca_dev, 0x0027, v4l2_ctrl_g_ctrl(sd->lighting)); } /* Assorted init */ switch (gspca_dev->pixfmt.width) { case 128: /* 128x96 */ cit_Packet_Format1(gspca_dev, 0x2b, 0x1e); cit_write_reg(gspca_dev, 0xc9, 0x0119); /* Same everywhere */ cit_write_reg(gspca_dev, 0x80, 0x0109); /* Same everywhere */ cit_write_reg(gspca_dev, 0x36, 0x0102); cit_write_reg(gspca_dev, 0x1a, 0x0104); cit_write_reg(gspca_dev, 0x04, 0x011a); /* Same everywhere */ cit_write_reg(gspca_dev, 0x2b, 0x011c); cit_write_reg(gspca_dev, 0x23, 0x012a); /* Same everywhere */ break; case 176: /* 176x144 */ cit_Packet_Format1(gspca_dev, 0x2b, 0x1e); cit_write_reg(gspca_dev, 0xc9, 0x0119); /* Same everywhere */ cit_write_reg(gspca_dev, 0x80, 0x0109); /* Same everywhere */ cit_write_reg(gspca_dev, 0x04, 0x0102); cit_write_reg(gspca_dev, 0x02, 0x0104); cit_write_reg(gspca_dev, 0x04, 0x011a); /* Same everywhere */ cit_write_reg(gspca_dev, 0x2b, 0x011c); cit_write_reg(gspca_dev, 0x23, 0x012a); /* Same everywhere */ break; case 352: /* 352x288 */ cit_Packet_Format1(gspca_dev, 0x2b, 0x1f); cit_write_reg(gspca_dev, 0xc9, 0x0119); /* Same everywhere */ cit_write_reg(gspca_dev, 0x80, 0x0109); /* Same everywhere */ cit_write_reg(gspca_dev, 0x08, 0x0102); cit_write_reg(gspca_dev, 0x01, 0x0104); cit_write_reg(gspca_dev, 0x04, 0x011a); /* Same everywhere */ cit_write_reg(gspca_dev, 0x2f, 0x011c); cit_write_reg(gspca_dev, 0x23, 0x012a); /* Same everywhere */ break; } cit_write_reg(gspca_dev, 0x01, 0x0100); /* LED On */ cit_write_reg(gspca_dev, clock_div, 0x0111); return 0; } static int cit_start_model2(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; int clock_div = 0; cit_write_reg(gspca_dev, 0x0000, 0x0100); /* LED on */ cit_read_reg(gspca_dev, 0x0116, 0); cit_write_reg(gspca_dev, 0x0060, 0x0116); cit_write_reg(gspca_dev, 0x0002, 0x0112); cit_write_reg(gspca_dev, 0x00bc, 0x012c); cit_write_reg(gspca_dev, 0x0008, 0x012b); cit_write_reg(gspca_dev, 0x0000, 0x0108); cit_write_reg(gspca_dev, 0x0001, 0x0133); cit_write_reg(gspca_dev, 0x0001, 0x0102); switch (gspca_dev->pixfmt.width) { case 176: /* 176x144 */ cit_write_reg(gspca_dev, 0x002c, 0x0103); /* All except 320x240 */ cit_write_reg(gspca_dev, 0x0000, 0x0104); /* Same */ cit_write_reg(gspca_dev, 0x0024, 0x0105); /* 176x144, 352x288 */ cit_write_reg(gspca_dev, 0x00b9, 0x010a); /* Unique to this mode */ cit_write_reg(gspca_dev, 0x0038, 0x0119); /* Unique to this mode */ /* TESTME HDG: this does not seem right (it is 2 for all other resolutions) */ sd->sof_len = 10; break; case 320: /* 320x240 */ cit_write_reg(gspca_dev, 0x0028, 0x0103); /* Unique to this mode */ cit_write_reg(gspca_dev, 0x0000, 0x0104); /* Same */ cit_write_reg(gspca_dev, 0x001e, 0x0105); /* 320x240, 352x240 */ cit_write_reg(gspca_dev, 0x0039, 0x010a); /* All except 176x144 */ cit_write_reg(gspca_dev, 0x0070, 0x0119); /* All except 176x144 */ sd->sof_len = 2; break; #if 0 case VIDEOSIZE_352x240: cit_write_reg(gspca_dev, 0x002c, 0x0103); /* All except 320x240 */ cit_write_reg(gspca_dev, 0x0000, 0x0104); /* Same */ cit_write_reg(gspca_dev, 0x001e, 0x0105); /* 320x240, 352x240 */ cit_write_reg(gspca_dev, 0x0039, 0x010a); /* All except 176x144 */ cit_write_reg(gspca_dev, 0x0070, 0x0119); /* All except 176x144 */ sd->sof_len = 2; break; #endif case 352: /* 352x288 */ cit_write_reg(gspca_dev, 0x002c, 0x0103); /* All except 320x240 */ cit_write_reg(gspca_dev, 0x0000, 0x0104); /* Same */ cit_write_reg(gspca_dev, 0x0024, 0x0105); /* 176x144, 352x288 */ cit_write_reg(gspca_dev, 0x0039, 0x010a); /* All except 176x144 */ cit_write_reg(gspca_dev, 0x0070, 0x0119); /* All except 176x144 */ sd->sof_len = 2; break; } cit_write_reg(gspca_dev, 0x0000, 0x0100); /* LED on */ switch (gspca_dev->pixfmt.width) { case 176: /* 176x144 */ cit_write_reg(gspca_dev, 0x0050, 0x0111); cit_write_reg(gspca_dev, 0x00d0, 0x0111); break; case 320: /* 320x240 */ case 352: /* 352x288 */ cit_write_reg(gspca_dev, 0x0040, 0x0111); cit_write_reg(gspca_dev, 0x00c0, 0x0111); break; } cit_write_reg(gspca_dev, 0x009b, 0x010f); cit_write_reg(gspca_dev, 0x00bb, 0x010f); /* * Hardware settings, may affect CMOS sensor; not user controls! * ------------------------------------------------------------- * 0x0004: no effect * 0x0006: hardware effect * 0x0008: no effect * 0x000a: stops video stream, probably important h/w setting * 0x000c: changes color in hardware manner (not user setting) * 0x0012: changes number of colors (does not affect speed) * 0x002a: no effect * 0x002c: hardware setting (related to scan lines) * 0x002e: stops video stream, probably important h/w setting */ cit_model2_Packet1(gspca_dev, 0x000a, 0x005c); cit_model2_Packet1(gspca_dev, 0x0004, 0x0000); cit_model2_Packet1(gspca_dev, 0x0006, 0x00fb); cit_model2_Packet1(gspca_dev, 0x0008, 0x0000); cit_model2_Packet1(gspca_dev, 0x000c, 0x0009); cit_model2_Packet1(gspca_dev, 0x0012, 0x000a); cit_model2_Packet1(gspca_dev, 0x002a, 0x0000); cit_model2_Packet1(gspca_dev, 0x002c, 0x0000); cit_model2_Packet1(gspca_dev, 0x002e, 0x0008); /* * Function 0x0030 pops up all over the place. Apparently * it is a hardware control register, with every bit assigned to * do something. */ cit_model2_Packet1(gspca_dev, 0x0030, 0x0000); /* * Magic control of CMOS sensor. Only lower values like * 0-3 work, and picture shifts left or right. Don't change. */ switch (gspca_dev->pixfmt.width) { case 176: /* 176x144 */ cit_model2_Packet1(gspca_dev, 0x0014, 0x0002); cit_model2_Packet1(gspca_dev, 0x0016, 0x0002); /* Horizontal shift */ cit_model2_Packet1(gspca_dev, 0x0018, 0x004a); /* Another hardware setting */ clock_div = 6; break; case 320: /* 320x240 */ cit_model2_Packet1(gspca_dev, 0x0014, 0x0009); cit_model2_Packet1(gspca_dev, 0x0016, 0x0005); /* Horizontal shift */ cit_model2_Packet1(gspca_dev, 0x0018, 0x0044); /* Another hardware setting */ clock_div = 8; break; #if 0 case VIDEOSIZE_352x240: /* This mode doesn't work as Windows programs it; changed to work */ cit_model2_Packet1(gspca_dev, 0x0014, 0x0009); /* Windows sets this to 8 */ cit_model2_Packet1(gspca_dev, 0x0016, 0x0003); /* Horizontal shift */ cit_model2_Packet1(gspca_dev, 0x0018, 0x0044); /* Windows sets this to 0x0045 */ clock_div = 10; break; #endif case 352: /* 352x288 */ cit_model2_Packet1(gspca_dev, 0x0014, 0x0003); cit_model2_Packet1(gspca_dev, 0x0016, 0x0002); /* Horizontal shift */ cit_model2_Packet1(gspca_dev, 0x0018, 0x004a); /* Another hardware setting */ clock_div = 16; break; } /* TESTME These are handled through controls KEEP until someone can test leaving this out is ok */ if (0) cit_model2_Packet1(gspca_dev, 0x001a, 0x005a); /* * We have our own frame rate setting varying from 0 (slowest) to 6 * (fastest). The camera model 2 allows frame rate in range [0..0x1F] # where 0 is also the slowest setting. However for all practical # reasons high settings make no sense because USB is not fast enough # to support high FPS. Be aware that the picture datastream will be # severely disrupted if you ask for frame rate faster than allowed # for the video size - see below: * * Allowable ranges (obtained experimentally on OHCI, K6-3, 450 MHz): * ----------------------------------------------------------------- * 176x144: [6..31] * 320x240: [8..31] * 352x240: [10..31] * 352x288: [16..31] I have to raise lower threshold for stability... * * As usual, slower FPS provides better sensitivity. */ cit_model2_Packet1(gspca_dev, 0x001c, clock_div); /* * This setting does not visibly affect pictures; left it here * because it was present in Windows USB data stream. This function * does not allow arbitrary values and apparently is a bit mask, to * be activated only at appropriate time. Don't change it randomly! */ switch (gspca_dev->pixfmt.width) { case 176: /* 176x144 */ cit_model2_Packet1(gspca_dev, 0x0026, 0x00c2); break; case 320: /* 320x240 */ cit_model2_Packet1(gspca_dev, 0x0026, 0x0044); break; #if 0 case VIDEOSIZE_352x240: cit_model2_Packet1(gspca_dev, 0x0026, 0x0046); break; #endif case 352: /* 352x288 */ cit_model2_Packet1(gspca_dev, 0x0026, 0x0048); break; } cit_model2_Packet1(gspca_dev, 0x0028, v4l2_ctrl_g_ctrl(sd->lighting)); /* model2 cannot change the backlight compensation while streaming */ v4l2_ctrl_grab(sd->lighting, true); /* color balance rg2 */ cit_model2_Packet1(gspca_dev, 0x001e, 0x002f); /* saturation */ cit_model2_Packet1(gspca_dev, 0x0020, 0x0034); /* color balance yb */ cit_model2_Packet1(gspca_dev, 0x0022, 0x00a0); /* Hardware control command */ cit_model2_Packet1(gspca_dev, 0x0030, 0x0004); return 0; } static int cit_start_model3(struct gspca_dev *gspca_dev) { const unsigned short compression = 0; /* 0=none, 7=best frame rate */ int i, clock_div = 0; /* HDG not in ibmcam driver, added to see if it helps with auto-detecting between model3 and ibm netcamera pro */ cit_read_reg(gspca_dev, 0x128, 1); cit_write_reg(gspca_dev, 0x0000, 0x0100); cit_read_reg(gspca_dev, 0x0116, 0); cit_write_reg(gspca_dev, 0x0060, 0x0116); cit_write_reg(gspca_dev, 0x0002, 0x0112); cit_write_reg(gspca_dev, 0x0000, 0x0123); cit_write_reg(gspca_dev, 0x0001, 0x0117); cit_write_reg(gspca_dev, 0x0040, 0x0108); cit_write_reg(gspca_dev, 0x0019, 0x012c); cit_write_reg(gspca_dev, 0x0060, 0x0116); cit_write_reg(gspca_dev, 0x0002, 0x0115); cit_write_reg(gspca_dev, 0x0003, 0x0115); cit_read_reg(gspca_dev, 0x0115, 0); cit_write_reg(gspca_dev, 0x000b, 0x0115); /* TESTME HDG not in ibmcam driver, added to see if it helps with auto-detecting between model3 and ibm netcamera pro */ if (0) { cit_write_reg(gspca_dev, 0x0078, 0x012d); cit_write_reg(gspca_dev, 0x0001, 0x012f); cit_write_reg(gspca_dev, 0xd141, 0x0124); cit_write_reg(gspca_dev, 0x0079, 0x012d); cit_write_reg(gspca_dev, 0x00ff, 0x0130); cit_write_reg(gspca_dev, 0xcd41, 0x0124); cit_write_reg(gspca_dev, 0xfffa, 0x0124); cit_read_reg(gspca_dev, 0x0126, 1); } cit_model3_Packet1(gspca_dev, 0x000a, 0x0040); cit_model3_Packet1(gspca_dev, 0x000b, 0x00f6); cit_model3_Packet1(gspca_dev, 0x000c, 0x0002); cit_model3_Packet1(gspca_dev, 0x000d, 0x0020); cit_model3_Packet1(gspca_dev, 0x000e, 0x0033); cit_model3_Packet1(gspca_dev, 0x000f, 0x0007); cit_model3_Packet1(gspca_dev, 0x0010, 0x0000); cit_model3_Packet1(gspca_dev, 0x0011, 0x0070); cit_model3_Packet1(gspca_dev, 0x0012, 0x0030); cit_model3_Packet1(gspca_dev, 0x0013, 0x0000); cit_model3_Packet1(gspca_dev, 0x0014, 0x0001); cit_model3_Packet1(gspca_dev, 0x0015, 0x0001); cit_model3_Packet1(gspca_dev, 0x0016, 0x0001); cit_model3_Packet1(gspca_dev, 0x0017, 0x0001); cit_model3_Packet1(gspca_dev, 0x0018, 0x0000); cit_model3_Packet1(gspca_dev, 0x001e, 0x00c3); cit_model3_Packet1(gspca_dev, 0x0020, 0x0000); cit_model3_Packet1(gspca_dev, 0x0028, 0x0010); cit_model3_Packet1(gspca_dev, 0x0029, 0x0054); cit_model3_Packet1(gspca_dev, 0x002a, 0x0013); cit_model3_Packet1(gspca_dev, 0x002b, 0x0007); cit_model3_Packet1(gspca_dev, 0x002d, 0x0028); cit_model3_Packet1(gspca_dev, 0x002e, 0x0000); cit_model3_Packet1(gspca_dev, 0x0031, 0x0000); cit_model3_Packet1(gspca_dev, 0x0032, 0x0000); cit_model3_Packet1(gspca_dev, 0x0033, 0x0000); cit_model3_Packet1(gspca_dev, 0x0034, 0x0000); cit_model3_Packet1(gspca_dev, 0x0035, 0x0038); cit_model3_Packet1(gspca_dev, 0x003a, 0x0001); cit_model3_Packet1(gspca_dev, 0x003c, 0x001e); cit_model3_Packet1(gspca_dev, 0x003f, 0x000a); cit_model3_Packet1(gspca_dev, 0x0041, 0x0000); cit_model3_Packet1(gspca_dev, 0x0046, 0x003f); cit_model3_Packet1(gspca_dev, 0x0047, 0x0000); cit_model3_Packet1(gspca_dev, 0x0050, 0x0005); cit_model3_Packet1(gspca_dev, 0x0052, 0x001a); cit_model3_Packet1(gspca_dev, 0x0053, 0x0003); cit_model3_Packet1(gspca_dev, 0x005a, 0x006b); cit_model3_Packet1(gspca_dev, 0x005d, 0x001e); cit_model3_Packet1(gspca_dev, 0x005e, 0x0030); cit_model3_Packet1(gspca_dev, 0x005f, 0x0041); cit_model3_Packet1(gspca_dev, 0x0064, 0x0008); cit_model3_Packet1(gspca_dev, 0x0065, 0x0015); cit_model3_Packet1(gspca_dev, 0x0068, 0x000f); cit_model3_Packet1(gspca_dev, 0x0079, 0x0000); cit_model3_Packet1(gspca_dev, 0x007a, 0x0000); cit_model3_Packet1(gspca_dev, 0x007c, 0x003f); cit_model3_Packet1(gspca_dev, 0x0082, 0x000f); cit_model3_Packet1(gspca_dev, 0x0085, 0x0000); cit_model3_Packet1(gspca_dev, 0x0099, 0x0000); cit_model3_Packet1(gspca_dev, 0x009b, 0x0023); cit_model3_Packet1(gspca_dev, 0x009c, 0x0022); cit_model3_Packet1(gspca_dev, 0x009d, 0x0096); cit_model3_Packet1(gspca_dev, 0x009e, 0x0096); cit_model3_Packet1(gspca_dev, 0x009f, 0x000a); switch (gspca_dev->pixfmt.width) { case 160: cit_write_reg(gspca_dev, 0x0000, 0x0101); /* Same on 160x120, 320x240 */ cit_write_reg(gspca_dev, 0x00a0, 0x0103); /* Same on 160x120, 320x240 */ cit_write_reg(gspca_dev, 0x0078, 0x0105); /* Same on 160x120, 320x240 */ cit_write_reg(gspca_dev, 0x0000, 0x010a); /* Same */ cit_write_reg(gspca_dev, 0x0024, 0x010b); /* Differs everywhere */ cit_write_reg(gspca_dev, 0x00a9, 0x0119); cit_write_reg(gspca_dev, 0x0016, 0x011b); cit_write_reg(gspca_dev, 0x0002, 0x011d); /* Same on 160x120, 320x240 */ cit_write_reg(gspca_dev, 0x0003, 0x011e); /* Same on 160x120, 640x480 */ cit_write_reg(gspca_dev, 0x0000, 0x0129); /* Same */ cit_write_reg(gspca_dev, 0x00fc, 0x012b); /* Same */ cit_write_reg(gspca_dev, 0x0018, 0x0102); cit_write_reg(gspca_dev, 0x0004, 0x0104); cit_write_reg(gspca_dev, 0x0004, 0x011a); cit_write_reg(gspca_dev, 0x0028, 0x011c); cit_write_reg(gspca_dev, 0x0022, 0x012a); /* Same */ cit_write_reg(gspca_dev, 0x0000, 0x0118); cit_write_reg(gspca_dev, 0x0000, 0x0132); cit_model3_Packet1(gspca_dev, 0x0021, 0x0001); /* Same */ cit_write_reg(gspca_dev, compression, 0x0109); clock_div = 3; break; case 320: cit_write_reg(gspca_dev, 0x0000, 0x0101); /* Same on 160x120, 320x240 */ cit_write_reg(gspca_dev, 0x00a0, 0x0103); /* Same on 160x120, 320x240 */ cit_write_reg(gspca_dev, 0x0078, 0x0105); /* Same on 160x120, 320x240 */ cit_write_reg(gspca_dev, 0x0000, 0x010a); /* Same */ cit_write_reg(gspca_dev, 0x0028, 0x010b); /* Differs everywhere */ cit_write_reg(gspca_dev, 0x0002, 0x011d); /* Same */ cit_write_reg(gspca_dev, 0x0000, 0x011e); cit_write_reg(gspca_dev, 0x0000, 0x0129); /* Same */ cit_write_reg(gspca_dev, 0x00fc, 0x012b); /* Same */ /* 4 commands from 160x120 skipped */ cit_write_reg(gspca_dev, 0x0022, 0x012a); /* Same */ cit_model3_Packet1(gspca_dev, 0x0021, 0x0001); /* Same */ cit_write_reg(gspca_dev, compression, 0x0109); cit_write_reg(gspca_dev, 0x00d9, 0x0119); cit_write_reg(gspca_dev, 0x0006, 0x011b); cit_write_reg(gspca_dev, 0x0021, 0x0102); /* Same on 320x240, 640x480 */ cit_write_reg(gspca_dev, 0x0010, 0x0104); cit_write_reg(gspca_dev, 0x0004, 0x011a); cit_write_reg(gspca_dev, 0x003f, 0x011c); cit_write_reg(gspca_dev, 0x001c, 0x0118); cit_write_reg(gspca_dev, 0x0000, 0x0132); clock_div = 5; break; case 640: cit_write_reg(gspca_dev, 0x00f0, 0x0105); cit_write_reg(gspca_dev, 0x0000, 0x010a); /* Same */ cit_write_reg(gspca_dev, 0x0038, 0x010b); /* Differs everywhere */ cit_write_reg(gspca_dev, 0x00d9, 0x0119); /* Same on 320x240, 640x480 */ cit_write_reg(gspca_dev, 0x0006, 0x011b); /* Same on 320x240, 640x480 */ cit_write_reg(gspca_dev, 0x0004, 0x011d); /* NC */ cit_write_reg(gspca_dev, 0x0003, 0x011e); /* Same on 160x120, 640x480 */ cit_write_reg(gspca_dev, 0x0000, 0x0129); /* Same */ cit_write_reg(gspca_dev, 0x00fc, 0x012b); /* Same */ cit_write_reg(gspca_dev, 0x0021, 0x0102); /* Same on 320x240, 640x480 */ cit_write_reg(gspca_dev, 0x0016, 0x0104); /* NC */ cit_write_reg(gspca_dev, 0x0004, 0x011a); /* Same on 320x240, 640x480 */ cit_write_reg(gspca_dev, 0x003f, 0x011c); /* Same on 320x240, 640x480 */ cit_write_reg(gspca_dev, 0x0022, 0x012a); /* Same */ cit_write_reg(gspca_dev, 0x001c, 0x0118); /* Same on 320x240, 640x480 */ cit_model3_Packet1(gspca_dev, 0x0021, 0x0001); /* Same */ cit_write_reg(gspca_dev, compression, 0x0109); cit_write_reg(gspca_dev, 0x0040, 0x0101); cit_write_reg(gspca_dev, 0x0040, 0x0103); cit_write_reg(gspca_dev, 0x0000, 0x0132); /* Same on 320x240, 640x480 */ clock_div = 7; break; } cit_model3_Packet1(gspca_dev, 0x007e, 0x000e); /* Hue */ cit_model3_Packet1(gspca_dev, 0x0036, 0x0011); /* Brightness */ cit_model3_Packet1(gspca_dev, 0x0060, 0x0002); /* Sharpness */ cit_model3_Packet1(gspca_dev, 0x0061, 0x0004); /* Sharpness */ cit_model3_Packet1(gspca_dev, 0x0062, 0x0005); /* Sharpness */ cit_model3_Packet1(gspca_dev, 0x0063, 0x0014); /* Sharpness */ cit_model3_Packet1(gspca_dev, 0x0096, 0x00a0); /* Red sharpness */ cit_model3_Packet1(gspca_dev, 0x0097, 0x0096); /* Blue sharpness */ cit_model3_Packet1(gspca_dev, 0x0067, 0x0001); /* Contrast */ cit_model3_Packet1(gspca_dev, 0x005b, 0x000c); /* Contrast */ cit_model3_Packet1(gspca_dev, 0x005c, 0x0016); /* Contrast */ cit_model3_Packet1(gspca_dev, 0x0098, 0x000b); cit_model3_Packet1(gspca_dev, 0x002c, 0x0003); /* Was 1, broke 640x480 */ cit_model3_Packet1(gspca_dev, 0x002f, 0x002a); cit_model3_Packet1(gspca_dev, 0x0030, 0x0029); cit_model3_Packet1(gspca_dev, 0x0037, 0x0002); cit_model3_Packet1(gspca_dev, 0x0038, 0x0059); cit_model3_Packet1(gspca_dev, 0x003d, 0x002e); cit_model3_Packet1(gspca_dev, 0x003e, 0x0028); cit_model3_Packet1(gspca_dev, 0x0078, 0x0005); cit_model3_Packet1(gspca_dev, 0x007b, 0x0011); cit_model3_Packet1(gspca_dev, 0x007d, 0x004b); cit_model3_Packet1(gspca_dev, 0x007f, 0x0022); cit_model3_Packet1(gspca_dev, 0x0080, 0x000c); cit_model3_Packet1(gspca_dev, 0x0081, 0x000b); cit_model3_Packet1(gspca_dev, 0x0083, 0x00fd); cit_model3_Packet1(gspca_dev, 0x0086, 0x000b); cit_model3_Packet1(gspca_dev, 0x0087, 0x000b); cit_model3_Packet1(gspca_dev, 0x007e, 0x000e); cit_model3_Packet1(gspca_dev, 0x0096, 0x00a0); /* Red sharpness */ cit_model3_Packet1(gspca_dev, 0x0097, 0x0096); /* Blue sharpness */ cit_model3_Packet1(gspca_dev, 0x0098, 0x000b); /* FIXME we should probably use cit_get_clock_div() here (in combination with isoc negotiation using the programmable isoc size) like with the IBM netcam pro). */ cit_write_reg(gspca_dev, clock_div, 0x0111); /* Clock Divider */ switch (gspca_dev->pixfmt.width) { case 160: cit_model3_Packet1(gspca_dev, 0x001f, 0x0000); /* Same */ cit_model3_Packet1(gspca_dev, 0x0039, 0x001f); /* Same */ cit_model3_Packet1(gspca_dev, 0x003b, 0x003c); /* Same */ cit_model3_Packet1(gspca_dev, 0x0040, 0x000a); cit_model3_Packet1(gspca_dev, 0x0051, 0x000a); break; case 320: cit_model3_Packet1(gspca_dev, 0x001f, 0x0000); /* Same */ cit_model3_Packet1(gspca_dev, 0x0039, 0x001f); /* Same */ cit_model3_Packet1(gspca_dev, 0x003b, 0x003c); /* Same */ cit_model3_Packet1(gspca_dev, 0x0040, 0x0008); cit_model3_Packet1(gspca_dev, 0x0051, 0x000b); break; case 640: cit_model3_Packet1(gspca_dev, 0x001f, 0x0002); /* !Same */ cit_model3_Packet1(gspca_dev, 0x0039, 0x003e); /* !Same */ cit_model3_Packet1(gspca_dev, 0x0040, 0x0008); cit_model3_Packet1(gspca_dev, 0x0051, 0x000a); break; } /* if (sd->input_index) { */ if (rca_input) { for (i = 0; i < ARRAY_SIZE(rca_initdata); i++) { if (rca_initdata[i][0]) cit_read_reg(gspca_dev, rca_initdata[i][2], 0); else cit_write_reg(gspca_dev, rca_initdata[i][1], rca_initdata[i][2]); } } return 0; } static int cit_start_model4(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; cit_write_reg(gspca_dev, 0x0000, 0x0100); cit_write_reg(gspca_dev, 0x00c0, 0x0111); cit_write_reg(gspca_dev, 0x00bc, 0x012c); cit_write_reg(gspca_dev, 0x0080, 0x012b); cit_write_reg(gspca_dev, 0x0000, 0x0108); cit_write_reg(gspca_dev, 0x0001, 0x0133); cit_write_reg(gspca_dev, 0x009b, 0x010f); cit_write_reg(gspca_dev, 0x00bb, 0x010f); cit_model4_Packet1(gspca_dev, 0x0038, 0x0000); cit_model4_Packet1(gspca_dev, 0x000a, 0x005c); cit_write_reg(gspca_dev, 0x00aa, 0x012d); cit_write_reg(gspca_dev, 0x0004, 0x012f); cit_write_reg(gspca_dev, 0xd141, 0x0124); cit_write_reg(gspca_dev, 0x0000, 0x0127); cit_write_reg(gspca_dev, 0x00fb, 0x012e); cit_write_reg(gspca_dev, 0x0000, 0x0130); cit_write_reg(gspca_dev, 0x8a28, 0x0124); cit_write_reg(gspca_dev, 0x00aa, 0x012f); cit_write_reg(gspca_dev, 0xd055, 0x0124); cit_write_reg(gspca_dev, 0x000c, 0x0127); cit_write_reg(gspca_dev, 0x0009, 0x012e); cit_write_reg(gspca_dev, 0xaa28, 0x0124); cit_write_reg(gspca_dev, 0x00aa, 0x012d); cit_write_reg(gspca_dev, 0x0012, 0x012f); cit_write_reg(gspca_dev, 0xd141, 0x0124); cit_write_reg(gspca_dev, 0x0008, 0x0127); cit_write_reg(gspca_dev, 0x00aa, 0x0130); cit_write_reg(gspca_dev, 0x82a8, 0x0124); cit_write_reg(gspca_dev, 0x002a, 0x012d); cit_write_reg(gspca_dev, 0x0000, 0x012f); cit_write_reg(gspca_dev, 0xd145, 0x0124); cit_write_reg(gspca_dev, 0xfffa, 0x0124); cit_model4_Packet1(gspca_dev, 0x0034, 0x0000); switch (gspca_dev->pixfmt.width) { case 128: /* 128x96 */ cit_write_reg(gspca_dev, 0x0070, 0x0119); cit_write_reg(gspca_dev, 0x00d0, 0x0111); cit_write_reg(gspca_dev, 0x0039, 0x010a); cit_write_reg(gspca_dev, 0x0001, 0x0102); cit_write_reg(gspca_dev, 0x0028, 0x0103); cit_write_reg(gspca_dev, 0x0000, 0x0104); cit_write_reg(gspca_dev, 0x001e, 0x0105); cit_write_reg(gspca_dev, 0x00aa, 0x012d); cit_write_reg(gspca_dev, 0x0016, 0x012f); cit_write_reg(gspca_dev, 0xd141, 0x0124); cit_write_reg(gspca_dev, 0x000a, 0x0127); cit_write_reg(gspca_dev, 0x00aa, 0x0130); cit_write_reg(gspca_dev, 0x82a8, 0x0124); cit_write_reg(gspca_dev, 0x0014, 0x012d); cit_write_reg(gspca_dev, 0x0008, 0x012f); cit_write_reg(gspca_dev, 0xd145, 0x0124); cit_write_reg(gspca_dev, 0x00aa, 0x012e); cit_write_reg(gspca_dev, 0x001a, 0x0130); cit_write_reg(gspca_dev, 0x8a0a, 0x0124); cit_write_reg(gspca_dev, 0x005a, 0x012d); cit_write_reg(gspca_dev, 0x9545, 0x0124); cit_write_reg(gspca_dev, 0x00aa, 0x0127); cit_write_reg(gspca_dev, 0x0018, 0x012e); cit_write_reg(gspca_dev, 0x0043, 0x0130); cit_write_reg(gspca_dev, 0x8a28, 0x0124); cit_write_reg(gspca_dev, 0x00aa, 0x012f); cit_write_reg(gspca_dev, 0xd055, 0x0124); cit_write_reg(gspca_dev, 0x001c, 0x0127); cit_write_reg(gspca_dev, 0x00eb, 0x012e); cit_write_reg(gspca_dev, 0xaa28, 0x0124); cit_write_reg(gspca_dev, 0x00aa, 0x012d); cit_write_reg(gspca_dev, 0x0032, 0x012f); cit_write_reg(gspca_dev, 0xd141, 0x0124); cit_write_reg(gspca_dev, 0x0000, 0x0127); cit_write_reg(gspca_dev, 0x00aa, 0x0130); cit_write_reg(gspca_dev, 0x82a8, 0x0124); cit_write_reg(gspca_dev, 0x0036, 0x012d); cit_write_reg(gspca_dev, 0x0008, 0x012f); cit_write_reg(gspca_dev, 0xd145, 0x0124); cit_write_reg(gspca_dev, 0xfffa, 0x0124); cit_write_reg(gspca_dev, 0x00aa, 0x012d); cit_write_reg(gspca_dev, 0x001e, 0x012f); cit_write_reg(gspca_dev, 0xd141, 0x0124); cit_write_reg(gspca_dev, 0x0017, 0x0127); cit_write_reg(gspca_dev, 0x0013, 0x012e); cit_write_reg(gspca_dev, 0x0031, 0x0130); cit_write_reg(gspca_dev, 0x8a28, 0x0124); cit_write_reg(gspca_dev, 0x0017, 0x012d); cit_write_reg(gspca_dev, 0x0078, 0x012f); cit_write_reg(gspca_dev, 0xd145, 0x0124); cit_write_reg(gspca_dev, 0x0000, 0x0127); cit_write_reg(gspca_dev, 0xfea8, 0x0124); sd->sof_len = 2; break; case 160: /* 160x120 */ cit_write_reg(gspca_dev, 0x0038, 0x0119); cit_write_reg(gspca_dev, 0x00d0, 0x0111); cit_write_reg(gspca_dev, 0x00b9, 0x010a); cit_write_reg(gspca_dev, 0x0001, 0x0102); cit_write_reg(gspca_dev, 0x0028, 0x0103); cit_write_reg(gspca_dev, 0x0000, 0x0104); cit_write_reg(gspca_dev, 0x001e, 0x0105); cit_write_reg(gspca_dev, 0x00aa, 0x012d); cit_write_reg(gspca_dev, 0x0016, 0x012f); cit_write_reg(gspca_dev, 0xd141, 0x0124); cit_write_reg(gspca_dev, 0x000b, 0x0127); cit_write_reg(gspca_dev, 0x00aa, 0x0130); cit_write_reg(gspca_dev, 0x82a8, 0x0124); cit_write_reg(gspca_dev, 0x0014, 0x012d); cit_write_reg(gspca_dev, 0x0008, 0x012f); cit_write_reg(gspca_dev, 0xd145, 0x0124); cit_write_reg(gspca_dev, 0x00aa, 0x012e); cit_write_reg(gspca_dev, 0x001a, 0x0130); cit_write_reg(gspca_dev, 0x8a0a, 0x0124); cit_write_reg(gspca_dev, 0x005a, 0x012d); cit_write_reg(gspca_dev, 0x9545, 0x0124); cit_write_reg(gspca_dev, 0x00aa, 0x0127); cit_write_reg(gspca_dev, 0x0018, 0x012e); cit_write_reg(gspca_dev, 0x0043, 0x0130); cit_write_reg(gspca_dev, 0x8a28, 0x0124); cit_write_reg(gspca_dev, 0x00aa, 0x012f); cit_write_reg(gspca_dev, 0xd055, 0x0124); cit_write_reg(gspca_dev, 0x001c, 0x0127); cit_write_reg(gspca_dev, 0x00c7, 0x012e); cit_write_reg(gspca_dev, 0xaa28, 0x0124); cit_write_reg(gspca_dev, 0x00aa, 0x012d); cit_write_reg(gspca_dev, 0x0032, 0x012f); cit_write_reg(gspca_dev, 0xd141, 0x0124); cit_write_reg(gspca_dev, 0x0025, 0x0127); cit_write_reg(gspca_dev, 0x00aa, 0x0130); cit_write_reg(gspca_dev, 0x82a8, 0x0124); cit_write_reg(gspca_dev, 0x0036, 0x012d); cit_write_reg(gspca_dev, 0x0008, 0x012f); cit_write_reg(gspca_dev, 0xd145, 0x0124); cit_write_reg(gspca_dev, 0xfffa, 0x0124); cit_write_reg(gspca_dev, 0x00aa, 0x012d); cit_write_reg(gspca_dev, 0x001e, 0x012f); cit_write_reg(gspca_dev, 0xd141, 0x0124); cit_write_reg(gspca_dev, 0x0048, 0x0127); cit_write_reg(gspca_dev, 0x0035, 0x012e); cit_write_reg(gspca_dev, 0x00d0, 0x0130); cit_write_reg(gspca_dev, 0x8a28, 0x0124); cit_write_reg(gspca_dev, 0x0048, 0x012d); cit_write_reg(gspca_dev, 0x0090, 0x012f); cit_write_reg(gspca_dev, 0xd145, 0x0124); cit_write_reg(gspca_dev, 0x0001, 0x0127); cit_write_reg(gspca_dev, 0xfea8, 0x0124); sd->sof_len = 2; break; case 176: /* 176x144 */ cit_write_reg(gspca_dev, 0x0038, 0x0119); cit_write_reg(gspca_dev, 0x00d0, 0x0111); cit_write_reg(gspca_dev, 0x00b9, 0x010a); cit_write_reg(gspca_dev, 0x0001, 0x0102); cit_write_reg(gspca_dev, 0x002c, 0x0103); cit_write_reg(gspca_dev, 0x0000, 0x0104); cit_write_reg(gspca_dev, 0x0024, 0x0105); cit_write_reg(gspca_dev, 0x00aa, 0x012d); cit_write_reg(gspca_dev, 0x0016, 0x012f); cit_write_reg(gspca_dev, 0xd141, 0x0124); cit_write_reg(gspca_dev, 0x0007, 0x0127); cit_write_reg(gspca_dev, 0x00aa, 0x0130); cit_write_reg(gspca_dev, 0x82a8, 0x0124); cit_write_reg(gspca_dev, 0x0014, 0x012d); cit_write_reg(gspca_dev, 0x0001, 0x012f); cit_write_reg(gspca_dev, 0xd145, 0x0124); cit_write_reg(gspca_dev, 0x00aa, 0x012e); cit_write_reg(gspca_dev, 0x001a, 0x0130); cit_write_reg(gspca_dev, 0x8a0a, 0x0124); cit_write_reg(gspca_dev, 0x005e, 0x012d); cit_write_reg(gspca_dev, 0x9545, 0x0124); cit_write_reg(gspca_dev, 0x00aa, 0x0127); cit_write_reg(gspca_dev, 0x0018, 0x012e); cit_write_reg(gspca_dev, 0x0049, 0x0130); cit_write_reg(gspca_dev, 0x8a28, 0x0124); cit_write_reg(gspca_dev, 0x00aa, 0x012f); cit_write_reg(gspca_dev, 0xd055, 0x0124); cit_write_reg(gspca_dev, 0x001c, 0x0127); cit_write_reg(gspca_dev, 0x00c7, 0x012e); cit_write_reg(gspca_dev, 0xaa28, 0x0124); cit_write_reg(gspca_dev, 0x00aa, 0x012d); cit_write_reg(gspca_dev, 0x0032, 0x012f); cit_write_reg(gspca_dev, 0xd141, 0x0124); cit_write_reg(gspca_dev, 0x0028, 0x0127); cit_write_reg(gspca_dev, 0x00aa, 0x0130); cit_write_reg(gspca_dev, 0x82a8, 0x0124); cit_write_reg(gspca_dev, 0x0036, 0x012d); cit_write_reg(gspca_dev, 0x0008, 0x012f); cit_write_reg(gspca_dev, 0xd145, 0x0124); cit_write_reg(gspca_dev, 0xfffa, 0x0124); cit_write_reg(gspca_dev, 0x00aa, 0x012d); cit_write_reg(gspca_dev, 0x001e, 0x012f); cit_write_reg(gspca_dev, 0xd141, 0x0124); cit_write_reg(gspca_dev, 0x0010, 0x0127); cit_write_reg(gspca_dev, 0x0013, 0x012e); cit_write_reg(gspca_dev, 0x002a, 0x0130); cit_write_reg(gspca_dev, 0x8a28, 0x0124); cit_write_reg(gspca_dev, 0x0010, 0x012d); cit_write_reg(gspca_dev, 0x006d, 0x012f); cit_write_reg(gspca_dev, 0xd145, 0x0124); cit_write_reg(gspca_dev, 0x0001, 0x0127); cit_write_reg(gspca_dev, 0xfea8, 0x0124); /* TESTME HDG: this does not seem right (it is 2 for all other resolutions) */ sd->sof_len = 10; break; case 320: /* 320x240 */ cit_write_reg(gspca_dev, 0x0070, 0x0119); cit_write_reg(gspca_dev, 0x00d0, 0x0111); cit_write_reg(gspca_dev, 0x0039, 0x010a); cit_write_reg(gspca_dev, 0x0001, 0x0102); cit_write_reg(gspca_dev, 0x0028, 0x0103); cit_write_reg(gspca_dev, 0x0000, 0x0104); cit_write_reg(gspca_dev, 0x001e, 0x0105); cit_write_reg(gspca_dev, 0x00aa, 0x012d); cit_write_reg(gspca_dev, 0x0016, 0x012f); cit_write_reg(gspca_dev, 0xd141, 0x0124); cit_write_reg(gspca_dev, 0x000a, 0x0127); cit_write_reg(gspca_dev, 0x00aa, 0x0130); cit_write_reg(gspca_dev, 0x82a8, 0x0124); cit_write_reg(gspca_dev, 0x0014, 0x012d); cit_write_reg(gspca_dev, 0x0008, 0x012f); cit_write_reg(gspca_dev, 0xd145, 0x0124); cit_write_reg(gspca_dev, 0x00aa, 0x012e); cit_write_reg(gspca_dev, 0x001a, 0x0130); cit_write_reg(gspca_dev, 0x8a0a, 0x0124); cit_write_reg(gspca_dev, 0x005a, 0x012d); cit_write_reg(gspca_dev, 0x9545, 0x0124); cit_write_reg(gspca_dev, 0x00aa, 0x0127); cit_write_reg(gspca_dev, 0x0018, 0x012e); cit_write_reg(gspca_dev, 0x0043, 0x0130); cit_write_reg(gspca_dev, 0x8a28, 0x0124); cit_write_reg(gspca_dev, 0x00aa, 0x012f); cit_write_reg(gspca_dev, 0xd055, 0x0124); cit_write_reg(gspca_dev, 0x001c, 0x0127); cit_write_reg(gspca_dev, 0x00eb, 0x012e); cit_write_reg(gspca_dev, 0xaa28, 0x0124); cit_write_reg(gspca_dev, 0x00aa, 0x012d); cit_write_reg(gspca_dev, 0x0032, 0x012f); cit_write_reg(gspca_dev, 0xd141, 0x0124); cit_write_reg(gspca_dev, 0x0000, 0x0127); cit_write_reg(gspca_dev, 0x00aa, 0x0130); cit_write_reg(gspca_dev, 0x82a8, 0x0124); cit_write_reg(gspca_dev, 0x0036, 0x012d); cit_write_reg(gspca_dev, 0x0008, 0x012f); cit_write_reg(gspca_dev, 0xd145, 0x0124); cit_write_reg(gspca_dev, 0xfffa, 0x0124); cit_write_reg(gspca_dev, 0x00aa, 0x012d); cit_write_reg(gspca_dev, 0x001e, 0x012f); cit_write_reg(gspca_dev, 0xd141, 0x0124); cit_write_reg(gspca_dev, 0x0017, 0x0127); cit_write_reg(gspca_dev, 0x0013, 0x012e); cit_write_reg(gspca_dev, 0x0031, 0x0130); cit_write_reg(gspca_dev, 0x8a28, 0x0124); cit_write_reg(gspca_dev, 0x0017, 0x012d); cit_write_reg(gspca_dev, 0x0078, 0x012f); cit_write_reg(gspca_dev, 0xd145, 0x0124); cit_write_reg(gspca_dev, 0x0000, 0x0127); cit_write_reg(gspca_dev, 0xfea8, 0x0124); sd->sof_len = 2; break; case 352: /* 352x288 */ cit_write_reg(gspca_dev, 0x0070, 0x0119); cit_write_reg(gspca_dev, 0x00c0, 0x0111); cit_write_reg(gspca_dev, 0x0039, 0x010a); cit_write_reg(gspca_dev, 0x0001, 0x0102); cit_write_reg(gspca_dev, 0x002c, 0x0103); cit_write_reg(gspca_dev, 0x0000, 0x0104); cit_write_reg(gspca_dev, 0x0024, 0x0105); cit_write_reg(gspca_dev, 0x00aa, 0x012d); cit_write_reg(gspca_dev, 0x0016, 0x012f); cit_write_reg(gspca_dev, 0xd141, 0x0124); cit_write_reg(gspca_dev, 0x0006, 0x0127); cit_write_reg(gspca_dev, 0x00aa, 0x0130); cit_write_reg(gspca_dev, 0x82a8, 0x0124); cit_write_reg(gspca_dev, 0x0014, 0x012d); cit_write_reg(gspca_dev, 0x0002, 0x012f); cit_write_reg(gspca_dev, 0xd145, 0x0124); cit_write_reg(gspca_dev, 0x00aa, 0x012e); cit_write_reg(gspca_dev, 0x001a, 0x0130); cit_write_reg(gspca_dev, 0x8a0a, 0x0124); cit_write_reg(gspca_dev, 0x005e, 0x012d); cit_write_reg(gspca_dev, 0x9545, 0x0124); cit_write_reg(gspca_dev, 0x00aa, 0x0127); cit_write_reg(gspca_dev, 0x0018, 0x012e); cit_write_reg(gspca_dev, 0x0049, 0x0130); cit_write_reg(gspca_dev, 0x8a28, 0x0124); cit_write_reg(gspca_dev, 0x00aa, 0x012f); cit_write_reg(gspca_dev, 0xd055, 0x0124); cit_write_reg(gspca_dev, 0x001c, 0x0127); cit_write_reg(gspca_dev, 0x00cf, 0x012e); cit_write_reg(gspca_dev, 0xaa28, 0x0124); cit_write_reg(gspca_dev, 0x00aa, 0x012d); cit_write_reg(gspca_dev, 0x0032, 0x012f); cit_write_reg(gspca_dev, 0xd141, 0x0124); cit_write_reg(gspca_dev, 0x0000, 0x0127); cit_write_reg(gspca_dev, 0x00aa, 0x0130); cit_write_reg(gspca_dev, 0x82a8, 0x0124); cit_write_reg(gspca_dev, 0x0036, 0x012d); cit_write_reg(gspca_dev, 0x0008, 0x012f); cit_write_reg(gspca_dev, 0xd145, 0x0124); cit_write_reg(gspca_dev, 0xfffa, 0x0124); cit_write_reg(gspca_dev, 0x00aa, 0x012d); cit_write_reg(gspca_dev, 0x001e, 0x012f); cit_write_reg(gspca_dev, 0xd141, 0x0124); cit_write_reg(gspca_dev, 0x0010, 0x0127); cit_write_reg(gspca_dev, 0x0013, 0x012e); cit_write_reg(gspca_dev, 0x0025, 0x0130); cit_write_reg(gspca_dev, 0x8a28, 0x0124); cit_write_reg(gspca_dev, 0x0010, 0x012d); cit_write_reg(gspca_dev, 0x0048, 0x012f); cit_write_reg(gspca_dev, 0xd145, 0x0124); cit_write_reg(gspca_dev, 0x0000, 0x0127); cit_write_reg(gspca_dev, 0xfea8, 0x0124); sd->sof_len = 2; break; } cit_model4_Packet1(gspca_dev, 0x0038, 0x0004); return 0; } static int cit_start_ibm_netcam_pro(struct gspca_dev *gspca_dev) { const unsigned short compression = 0; /* 0=none, 7=best frame rate */ int i, clock_div; clock_div = cit_get_clock_div(gspca_dev); if (clock_div < 0) return clock_div; cit_write_reg(gspca_dev, 0x0003, 0x0133); cit_write_reg(gspca_dev, 0x0000, 0x0117); cit_write_reg(gspca_dev, 0x0008, 0x0123); cit_write_reg(gspca_dev, 0x0000, 0x0100); cit_write_reg(gspca_dev, 0x0060, 0x0116); /* cit_write_reg(gspca_dev, 0x0002, 0x0112); see sd_stop0 */ cit_write_reg(gspca_dev, 0x0000, 0x0133); cit_write_reg(gspca_dev, 0x0000, 0x0123); cit_write_reg(gspca_dev, 0x0001, 0x0117); cit_write_reg(gspca_dev, 0x0040, 0x0108); cit_write_reg(gspca_dev, 0x0019, 0x012c); cit_write_reg(gspca_dev, 0x0060, 0x0116); /* cit_write_reg(gspca_dev, 0x000b, 0x0115); see sd_stop0 */ cit_model3_Packet1(gspca_dev, 0x0049, 0x0000); cit_write_reg(gspca_dev, 0x0000, 0x0101); /* Same on 160x120, 320x240 */ cit_write_reg(gspca_dev, 0x003a, 0x0102); /* Hstart */ cit_write_reg(gspca_dev, 0x00a0, 0x0103); /* Same on 160x120, 320x240 */ cit_write_reg(gspca_dev, 0x0078, 0x0105); /* Same on 160x120, 320x240 */ cit_write_reg(gspca_dev, 0x0000, 0x010a); /* Same */ cit_write_reg(gspca_dev, 0x0002, 0x011d); /* Same on 160x120, 320x240 */ cit_write_reg(gspca_dev, 0x0000, 0x0129); /* Same */ cit_write_reg(gspca_dev, 0x00fc, 0x012b); /* Same */ cit_write_reg(gspca_dev, 0x0022, 0x012a); /* Same */ switch (gspca_dev->pixfmt.width) { case 160: /* 160x120 */ cit_write_reg(gspca_dev, 0x0024, 0x010b); cit_write_reg(gspca_dev, 0x0089, 0x0119); cit_write_reg(gspca_dev, 0x000a, 0x011b); cit_write_reg(gspca_dev, 0x0003, 0x011e); cit_write_reg(gspca_dev, 0x0007, 0x0104); cit_write_reg(gspca_dev, 0x0009, 0x011a); cit_write_reg(gspca_dev, 0x008b, 0x011c); cit_write_reg(gspca_dev, 0x0008, 0x0118); cit_write_reg(gspca_dev, 0x0000, 0x0132); break; case 320: /* 320x240 */ cit_write_reg(gspca_dev, 0x0028, 0x010b); cit_write_reg(gspca_dev, 0x00d9, 0x0119); cit_write_reg(gspca_dev, 0x0006, 0x011b); cit_write_reg(gspca_dev, 0x0000, 0x011e); cit_write_reg(gspca_dev, 0x000e, 0x0104); cit_write_reg(gspca_dev, 0x0004, 0x011a); cit_write_reg(gspca_dev, 0x003f, 0x011c); cit_write_reg(gspca_dev, 0x000c, 0x0118); cit_write_reg(gspca_dev, 0x0000, 0x0132); break; } cit_model3_Packet1(gspca_dev, 0x0019, 0x0031); cit_model3_Packet1(gspca_dev, 0x001a, 0x0003); cit_model3_Packet1(gspca_dev, 0x001b, 0x0038); cit_model3_Packet1(gspca_dev, 0x001c, 0x0000); cit_model3_Packet1(gspca_dev, 0x0024, 0x0001); cit_model3_Packet1(gspca_dev, 0x0027, 0x0001); cit_model3_Packet1(gspca_dev, 0x002a, 0x0004); cit_model3_Packet1(gspca_dev, 0x0035, 0x000b); cit_model3_Packet1(gspca_dev, 0x003f, 0x0001); cit_model3_Packet1(gspca_dev, 0x0044, 0x0000); cit_model3_Packet1(gspca_dev, 0x0054, 0x0000); cit_model3_Packet1(gspca_dev, 0x00c4, 0x0000); cit_model3_Packet1(gspca_dev, 0x00e7, 0x0001); cit_model3_Packet1(gspca_dev, 0x00e9, 0x0001); cit_model3_Packet1(gspca_dev, 0x00ee, 0x0000); cit_model3_Packet1(gspca_dev, 0x00f3, 0x00c0); cit_write_reg(gspca_dev, compression, 0x0109); cit_write_reg(gspca_dev, clock_div, 0x0111); /* if (sd->input_index) { */ if (rca_input) { for (i = 0; i < ARRAY_SIZE(rca_initdata); i++) { if (rca_initdata[i][0]) cit_read_reg(gspca_dev, rca_initdata[i][2], 0); else cit_write_reg(gspca_dev, rca_initdata[i][1], rca_initdata[i][2]); } } return 0; } /* -- start the camera -- */ static int sd_start(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; int packet_size; packet_size = cit_get_packet_size(gspca_dev); if (packet_size < 0) return packet_size; switch (sd->model) { case CIT_MODEL0: cit_start_model0(gspca_dev); break; case CIT_MODEL1: cit_start_model1(gspca_dev); break; case CIT_MODEL2: cit_start_model2(gspca_dev); break; case CIT_MODEL3: cit_start_model3(gspca_dev); break; case CIT_MODEL4: cit_start_model4(gspca_dev); break; case CIT_IBM_NETCAM_PRO: cit_start_ibm_netcam_pro(gspca_dev); break; } /* Program max isoc packet size */ cit_write_reg(gspca_dev, packet_size >> 8, 0x0106); cit_write_reg(gspca_dev, packet_size & 0xff, 0x0107); cit_restart_stream(gspca_dev); return 0; } static int sd_isoc_init(struct gspca_dev *gspca_dev) { struct usb_interface_cache *intfc; struct usb_host_interface *alt; int max_packet_size; switch (gspca_dev->pixfmt.width) { case 160: max_packet_size = 450; break; case 176: max_packet_size = 600; break; default: max_packet_size = 1022; break; } intfc = gspca_dev->dev->actconfig->intf_cache[0]; if (intfc->num_altsetting < 2) return -ENODEV; alt = &intfc->altsetting[1]; if (alt->desc.bNumEndpoints < 1) return -ENODEV; /* Start isoc bandwidth "negotiation" at max isoc bandwidth */ alt->endpoint[0].desc.wMaxPacketSize = cpu_to_le16(max_packet_size); return 0; } static int sd_isoc_nego(struct gspca_dev *gspca_dev) { int ret, packet_size, min_packet_size; struct usb_host_interface *alt; switch (gspca_dev->pixfmt.width) { case 160: min_packet_size = 200; break; case 176: min_packet_size = 266; break; default: min_packet_size = 400; break; } /* * Existence of altsetting and endpoint was verified in sd_isoc_init() */ alt = &gspca_dev->dev->actconfig->intf_cache[0]->altsetting[1]; packet_size = le16_to_cpu(alt->endpoint[0].desc.wMaxPacketSize); if (packet_size <= min_packet_size) return -EIO; packet_size -= 100; if (packet_size < min_packet_size) packet_size = min_packet_size; alt->endpoint[0].desc.wMaxPacketSize = cpu_to_le16(packet_size); ret = usb_set_interface(gspca_dev->dev, gspca_dev->iface, 1); if (ret < 0) pr_err("set alt 1 err %d\n", ret); return ret; } static void sd_stopN(struct gspca_dev *gspca_dev) { cit_write_reg(gspca_dev, 0x0000, 0x010c); } static void sd_stop0(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; if (!gspca_dev->present) return; switch (sd->model) { case CIT_MODEL0: /* HDG windows does this, but it causes the cams autogain to restart from a gain of 0, which does not look good when changing resolutions. */ /* cit_write_reg(gspca_dev, 0x0000, 0x0112); */ cit_write_reg(gspca_dev, 0x00c0, 0x0100); /* LED Off */ break; case CIT_MODEL1: cit_send_FF_04_02(gspca_dev); cit_read_reg(gspca_dev, 0x0100, 0); cit_write_reg(gspca_dev, 0x81, 0x0100); /* LED Off */ break; case CIT_MODEL2: v4l2_ctrl_grab(sd->lighting, false); fallthrough; case CIT_MODEL4: cit_model2_Packet1(gspca_dev, 0x0030, 0x0004); cit_write_reg(gspca_dev, 0x0080, 0x0100); /* LED Off */ cit_write_reg(gspca_dev, 0x0020, 0x0111); cit_write_reg(gspca_dev, 0x00a0, 0x0111); cit_model2_Packet1(gspca_dev, 0x0030, 0x0002); cit_write_reg(gspca_dev, 0x0020, 0x0111); cit_write_reg(gspca_dev, 0x0000, 0x0112); break; case CIT_MODEL3: cit_write_reg(gspca_dev, 0x0006, 0x012c); cit_model3_Packet1(gspca_dev, 0x0046, 0x0000); cit_read_reg(gspca_dev, 0x0116, 0); cit_write_reg(gspca_dev, 0x0064, 0x0116); cit_read_reg(gspca_dev, 0x0115, 0); cit_write_reg(gspca_dev, 0x0003, 0x0115); cit_write_reg(gspca_dev, 0x0008, 0x0123); cit_write_reg(gspca_dev, 0x0000, 0x0117); cit_write_reg(gspca_dev, 0x0000, 0x0112); cit_write_reg(gspca_dev, 0x0080, 0x0100); break; case CIT_IBM_NETCAM_PRO: cit_model3_Packet1(gspca_dev, 0x0049, 0x00ff); cit_write_reg(gspca_dev, 0x0006, 0x012c); cit_write_reg(gspca_dev, 0x0000, 0x0116); /* HDG windows does this, but I cannot get the camera to restart with this without redoing the entire init sequence which makes switching modes really slow */ /* cit_write_reg(gspca_dev, 0x0006, 0x0115); */ cit_write_reg(gspca_dev, 0x0008, 0x0123); cit_write_reg(gspca_dev, 0x0000, 0x0117); cit_write_reg(gspca_dev, 0x0003, 0x0133); cit_write_reg(gspca_dev, 0x0000, 0x0111); /* HDG windows does this, but I get a green picture when restarting the stream after this */ /* cit_write_reg(gspca_dev, 0x0000, 0x0112); */ cit_write_reg(gspca_dev, 0x00c0, 0x0100); break; } #if IS_ENABLED(CONFIG_INPUT) /* If the last button state is pressed, release it now! */ if (sd->button_state) { input_report_key(gspca_dev->input_dev, KEY_CAMERA, 0); input_sync(gspca_dev->input_dev); sd->button_state = 0; } #endif } static u8 *cit_find_sof(struct gspca_dev *gspca_dev, u8 *data, int len) { struct sd *sd = (struct sd *) gspca_dev; u8 byte3 = 0, byte4 = 0; int i; switch (sd->model) { case CIT_MODEL0: case CIT_MODEL1: case CIT_MODEL3: case CIT_IBM_NETCAM_PRO: switch (gspca_dev->pixfmt.width) { case 160: /* 160x120 */ byte3 = 0x02; byte4 = 0x0a; break; case 176: /* 176x144 */ byte3 = 0x02; byte4 = 0x0e; break; case 320: /* 320x240 */ byte3 = 0x02; byte4 = 0x08; break; case 352: /* 352x288 */ byte3 = 0x02; byte4 = 0x00; break; case 640: byte3 = 0x03; byte4 = 0x08; break; } /* These have a different byte3 */ if (sd->model <= CIT_MODEL1) byte3 = 0x00; for (i = 0; i < len; i++) { /* For this model the SOF always starts at offset 0 so no need to search the entire frame */ if (sd->model == CIT_MODEL0 && sd->sof_read != i) break; switch (sd->sof_read) { case 0: if (data[i] == 0x00) sd->sof_read++; break; case 1: if (data[i] == 0xff) sd->sof_read++; else if (data[i] == 0x00) sd->sof_read = 1; else sd->sof_read = 0; break; case 2: if (data[i] == byte3) sd->sof_read++; else if (data[i] == 0x00) sd->sof_read = 1; else sd->sof_read = 0; break; case 3: if (data[i] == byte4) { sd->sof_read = 0; return data + i + (sd->sof_len - 3); } if (byte3 == 0x00 && data[i] == 0xff) sd->sof_read = 2; else if (data[i] == 0x00) sd->sof_read = 1; else sd->sof_read = 0; break; } } break; case CIT_MODEL2: case CIT_MODEL4: /* TESTME we need to find a longer sof signature to avoid false positives */ for (i = 0; i < len; i++) { switch (sd->sof_read) { case 0: if (data[i] == 0x00) sd->sof_read++; break; case 1: sd->sof_read = 0; if (data[i] == 0xff) { if (i >= 4) gspca_dbg(gspca_dev, D_FRAM, "header found at offset: %d: %02x %02x 00 %3ph\n\n", i - 1, data[i - 4], data[i - 3], &data[i]); else gspca_dbg(gspca_dev, D_FRAM, "header found at offset: %d: 00 %3ph\n\n", i - 1, &data[i]); return data + i + (sd->sof_len - 1); } break; } } break; } return NULL; } static void sd_pkt_scan(struct gspca_dev *gspca_dev, u8 *data, int len) { struct sd *sd = (struct sd *) gspca_dev; unsigned char *sof; sof = cit_find_sof(gspca_dev, data, len); if (sof) { int n; /* finish decoding current frame */ n = sof - data; if (n > sd->sof_len) n -= sd->sof_len; else n = 0; gspca_frame_add(gspca_dev, LAST_PACKET, data, n); gspca_frame_add(gspca_dev, FIRST_PACKET, NULL, 0); len -= sof - data; data = sof; } gspca_frame_add(gspca_dev, INTER_PACKET, data, len); } #if IS_ENABLED(CONFIG_INPUT) static void cit_check_button(struct gspca_dev *gspca_dev) { int new_button_state; struct sd *sd = (struct sd *)gspca_dev; switch (sd->model) { case CIT_MODEL3: case CIT_IBM_NETCAM_PRO: break; default: /* TEST ME unknown if this works on other models too */ return; } /* Read the button state */ cit_read_reg(gspca_dev, 0x0113, 0); new_button_state = !gspca_dev->usb_buf[0]; /* Tell the cam we've seen the button press, notice that this is a nop (iow the cam keeps reporting pressed) until the button is actually released. */ if (new_button_state) cit_write_reg(gspca_dev, 0x01, 0x0113); if (sd->button_state != new_button_state) { input_report_key(gspca_dev->input_dev, KEY_CAMERA, new_button_state); input_sync(gspca_dev->input_dev); sd->button_state = new_button_state; } } #endif static int sd_s_ctrl(struct v4l2_ctrl *ctrl) { struct gspca_dev *gspca_dev = container_of(ctrl->handler, struct gspca_dev, ctrl_handler); struct sd *sd = (struct sd *)gspca_dev; gspca_dev->usb_err = 0; if (!gspca_dev->streaming) return 0; if (sd->stop_on_control_change) sd_stopN(gspca_dev); switch (ctrl->id) { case V4L2_CID_BRIGHTNESS: cit_set_brightness(gspca_dev, ctrl->val); break; case V4L2_CID_CONTRAST: cit_set_contrast(gspca_dev, ctrl->val); break; case V4L2_CID_HUE: cit_set_hue(gspca_dev, ctrl->val); break; case V4L2_CID_HFLIP: cit_set_hflip(gspca_dev, ctrl->val); break; case V4L2_CID_SHARPNESS: cit_set_sharpness(gspca_dev, ctrl->val); break; case V4L2_CID_BACKLIGHT_COMPENSATION: cit_set_lighting(gspca_dev, ctrl->val); break; } if (sd->stop_on_control_change) cit_restart_stream(gspca_dev); return gspca_dev->usb_err; } static const struct v4l2_ctrl_ops sd_ctrl_ops = { .s_ctrl = sd_s_ctrl, }; static int sd_init_controls(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *)gspca_dev; struct v4l2_ctrl_handler *hdl = &gspca_dev->ctrl_handler; bool has_brightness; bool has_contrast; bool has_hue; bool has_sharpness; bool has_lighting; bool has_hflip; has_brightness = has_contrast = has_hue = has_sharpness = has_hflip = has_lighting = false; switch (sd->model) { case CIT_MODEL0: has_contrast = has_hflip = true; break; case CIT_MODEL1: has_brightness = has_contrast = has_sharpness = has_lighting = true; break; case CIT_MODEL2: has_brightness = has_hue = has_lighting = true; break; case CIT_MODEL3: has_brightness = has_contrast = has_sharpness = true; break; case CIT_MODEL4: has_brightness = has_hue = true; break; case CIT_IBM_NETCAM_PRO: has_brightness = has_hue = has_sharpness = has_hflip = has_lighting = true; break; } gspca_dev->vdev.ctrl_handler = hdl; v4l2_ctrl_handler_init(hdl, 5); if (has_brightness) v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_BRIGHTNESS, 0, 63, 1, 32); if (has_contrast) v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_CONTRAST, 0, 20, 1, 10); if (has_hue) v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_HUE, 0, 127, 1, 63); if (has_sharpness) v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_SHARPNESS, 0, 6, 1, 3); if (has_lighting) sd->lighting = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_BACKLIGHT_COMPENSATION, 0, 2, 1, 1); if (has_hflip) v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_HFLIP, 0, 1, 1, 0); if (hdl->error) { pr_err("Could not initialize controls\n"); return hdl->error; } return 0; } /* sub-driver description */ static const struct sd_desc sd_desc = { .name = MODULE_NAME, .config = sd_config, .init = sd_init, .init_controls = sd_init_controls, .start = sd_start, .stopN = sd_stopN, .stop0 = sd_stop0, .pkt_scan = sd_pkt_scan, #if IS_ENABLED(CONFIG_INPUT) .dq_callback = cit_check_button, .other_input = 1, #endif }; static const struct sd_desc sd_desc_isoc_nego = { .name = MODULE_NAME, .config = sd_config, .init = sd_init, .init_controls = sd_init_controls, .start = sd_start, .isoc_init = sd_isoc_init, .isoc_nego = sd_isoc_nego, .stopN = sd_stopN, .stop0 = sd_stop0, .pkt_scan = sd_pkt_scan, #if IS_ENABLED(CONFIG_INPUT) .dq_callback = cit_check_button, .other_input = 1, #endif }; /* -- module initialisation -- */ static const struct usb_device_id device_table[] = { { USB_DEVICE_VER(0x0545, 0x8080, 0x0001, 0x0001), .driver_info = CIT_MODEL0 }, { USB_DEVICE_VER(0x0545, 0x8080, 0x0002, 0x0002), .driver_info = CIT_MODEL1 }, { USB_DEVICE_VER(0x0545, 0x8080, 0x030a, 0x030a), .driver_info = CIT_MODEL2 }, { USB_DEVICE_VER(0x0545, 0x8080, 0x0301, 0x0301), .driver_info = CIT_MODEL3 }, { USB_DEVICE_VER(0x0545, 0x8002, 0x030a, 0x030a), .driver_info = CIT_MODEL4 }, { USB_DEVICE_VER(0x0545, 0x800c, 0x030a, 0x030a), .driver_info = CIT_MODEL2 }, { USB_DEVICE_VER(0x0545, 0x800d, 0x030a, 0x030a), .driver_info = CIT_MODEL4 }, {} }; MODULE_DEVICE_TABLE(usb, device_table); /* -- device connect -- */ static int sd_probe(struct usb_interface *intf, const struct usb_device_id *id) { const struct sd_desc *desc = &sd_desc; switch (id->driver_info) { case CIT_MODEL0: case CIT_MODEL1: if (intf->cur_altsetting->desc.bInterfaceNumber != 2) return -ENODEV; break; case CIT_MODEL2: case CIT_MODEL4: if (intf->cur_altsetting->desc.bInterfaceNumber != 0) return -ENODEV; break; case CIT_MODEL3: if (intf->cur_altsetting->desc.bInterfaceNumber != 0) return -ENODEV; /* FIXME this likely applies to all model3 cams and probably to other models too. */ if (ibm_netcam_pro) desc = &sd_desc_isoc_nego; break; } return gspca_dev_probe2(intf, id, desc, sizeof(struct sd), THIS_MODULE); } static struct usb_driver sd_driver = { .name = MODULE_NAME, .id_table = device_table, .probe = sd_probe, .disconnect = gspca_disconnect, #ifdef CONFIG_PM .suspend = gspca_suspend, .resume = gspca_resume, .reset_resume = gspca_resume, #endif }; module_usb_driver(sd_driver);
3 2 5 1 2 2 4 4 2 1 1 2 1 1 5 5 4 4 4 6 3 3 3 2 3 3 1 1 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 // SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (C) 2011 Instituto Nokia de Tecnologia * * Authors: * Aloisio Almeida Jr <aloisio.almeida@openbossa.org> * Lauro Ramos Venancio <lauro.venancio@openbossa.org> */ #define pr_fmt(fmt) KBUILD_MODNAME ": %s: " fmt, __func__ #include <net/tcp_states.h> #include <linux/nfc.h> #include <linux/export.h> #include <linux/kcov.h> #include "nfc.h" static struct nfc_sock_list raw_sk_list = { .lock = __RW_LOCK_UNLOCKED(raw_sk_list.lock) }; static void nfc_sock_link(struct nfc_sock_list *l, struct sock *sk) { write_lock(&l->lock); sk_add_node(sk, &l->head); write_unlock(&l->lock); } static void nfc_sock_unlink(struct nfc_sock_list *l, struct sock *sk) { write_lock(&l->lock); sk_del_node_init(sk); write_unlock(&l->lock); } static void rawsock_write_queue_purge(struct sock *sk) { pr_debug("sk=%p\n", sk); spin_lock_bh(&sk->sk_write_queue.lock); __skb_queue_purge(&sk->sk_write_queue); nfc_rawsock(sk)->tx_work_scheduled = false; spin_unlock_bh(&sk->sk_write_queue.lock); } static void rawsock_report_error(struct sock *sk, int err) { pr_debug("sk=%p err=%d\n", sk, err); sk->sk_shutdown = SHUTDOWN_MASK; sk->sk_err = -err; sk_error_report(sk); rawsock_write_queue_purge(sk); } static int rawsock_release(struct socket *sock) { struct sock *sk = sock->sk; pr_debug("sock=%p sk=%p\n", sock, sk); if (!sk) return 0; if (sock->type == SOCK_RAW) nfc_sock_unlink(&raw_sk_list, sk); sock_orphan(sk); sock_put(sk); return 0; } static int rawsock_connect(struct socket *sock, struct sockaddr *_addr, int len, int flags) { struct sock *sk = sock->sk; struct sockaddr_nfc *addr = (struct sockaddr_nfc *)_addr; struct nfc_dev *dev; int rc = 0; pr_debug("sock=%p sk=%p flags=%d\n", sock, sk, flags); if (!addr || len < sizeof(struct sockaddr_nfc) || addr->sa_family != AF_NFC) return -EINVAL; pr_debug("addr dev_idx=%u target_idx=%u protocol=%u\n", addr->dev_idx, addr->target_idx, addr->nfc_protocol); lock_sock(sk); if (sock->state == SS_CONNECTED) { rc = -EISCONN; goto error; } dev = nfc_get_device(addr->dev_idx); if (!dev) { rc = -ENODEV; goto error; } if (addr->target_idx > dev->target_next_idx - 1 || addr->target_idx < dev->target_next_idx - dev->n_targets) { rc = -EINVAL; goto put_dev; } rc = nfc_activate_target(dev, addr->target_idx, addr->nfc_protocol); if (rc) goto put_dev; nfc_rawsock(sk)->dev = dev; nfc_rawsock(sk)->target_idx = addr->target_idx; sock->state = SS_CONNECTED; sk->sk_state = TCP_ESTABLISHED; sk->sk_state_change(sk); release_sock(sk); return 0; put_dev: nfc_put_device(dev); error: release_sock(sk); return rc; } static int rawsock_add_header(struct sk_buff *skb) { *(u8 *)skb_push(skb, NFC_HEADER_SIZE) = 0; return 0; } static void rawsock_data_exchange_complete(void *context, struct sk_buff *skb, int err) { struct sock *sk = (struct sock *) context; BUG_ON(in_hardirq()); pr_debug("sk=%p err=%d\n", sk, err); if (err) goto error; err = rawsock_add_header(skb); if (err) goto error_skb; err = sock_queue_rcv_skb(sk, skb); if (err) goto error_skb; spin_lock_bh(&sk->sk_write_queue.lock); if (!skb_queue_empty(&sk->sk_write_queue)) schedule_work(&nfc_rawsock(sk)->tx_work); else nfc_rawsock(sk)->tx_work_scheduled = false; spin_unlock_bh(&sk->sk_write_queue.lock); sock_put(sk); return; error_skb: kfree_skb(skb); error: rawsock_report_error(sk, err); sock_put(sk); } static void rawsock_tx_work(struct work_struct *work) { struct sock *sk = to_rawsock_sk(work); struct nfc_dev *dev = nfc_rawsock(sk)->dev; u32 target_idx = nfc_rawsock(sk)->target_idx; struct sk_buff *skb; int rc; pr_debug("sk=%p target_idx=%u\n", sk, target_idx); if (sk->sk_shutdown & SEND_SHUTDOWN) { rawsock_write_queue_purge(sk); return; } skb = skb_dequeue(&sk->sk_write_queue); kcov_remote_start_common(skb_get_kcov_handle(skb)); sock_hold(sk); rc = nfc_data_exchange(dev, target_idx, skb, rawsock_data_exchange_complete, sk); if (rc) { rawsock_report_error(sk, rc); sock_put(sk); } kcov_remote_stop(); } static int rawsock_sendmsg(struct socket *sock, struct msghdr *msg, size_t len) { struct sock *sk = sock->sk; struct nfc_dev *dev = nfc_rawsock(sk)->dev; struct sk_buff *skb; int rc; pr_debug("sock=%p sk=%p len=%zu\n", sock, sk, len); if (msg->msg_namelen) return -EOPNOTSUPP; if (sock->state != SS_CONNECTED) return -ENOTCONN; skb = nfc_alloc_send_skb(dev, sk, msg->msg_flags, len, &rc); if (skb == NULL) return rc; rc = memcpy_from_msg(skb_put(skb, len), msg, len); if (rc < 0) { kfree_skb(skb); return rc; } spin_lock_bh(&sk->sk_write_queue.lock); __skb_queue_tail(&sk->sk_write_queue, skb); if (!nfc_rawsock(sk)->tx_work_scheduled) { schedule_work(&nfc_rawsock(sk)->tx_work); nfc_rawsock(sk)->tx_work_scheduled = true; } spin_unlock_bh(&sk->sk_write_queue.lock); return len; } static int rawsock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, int flags) { struct sock *sk = sock->sk; struct sk_buff *skb; int copied; int rc; pr_debug("sock=%p sk=%p len=%zu flags=%d\n", sock, sk, len, flags); skb = skb_recv_datagram(sk, flags, &rc); if (!skb) return rc; copied = skb->len; if (len < copied) { msg->msg_flags |= MSG_TRUNC; copied = len; } rc = skb_copy_datagram_msg(skb, 0, msg, copied); skb_free_datagram(sk, skb); return rc ? : copied; } static const struct proto_ops rawsock_ops = { .family = PF_NFC, .owner = THIS_MODULE, .release = rawsock_release, .bind = sock_no_bind, .connect = rawsock_connect, .socketpair = sock_no_socketpair, .accept = sock_no_accept, .getname = sock_no_getname, .poll = datagram_poll, .ioctl = sock_no_ioctl, .listen = sock_no_listen, .shutdown = sock_no_shutdown, .sendmsg = rawsock_sendmsg, .recvmsg = rawsock_recvmsg, .mmap = sock_no_mmap, }; static const struct proto_ops rawsock_raw_ops = { .family = PF_NFC, .owner = THIS_MODULE, .release = rawsock_release, .bind = sock_no_bind, .connect = sock_no_connect, .socketpair = sock_no_socketpair, .accept = sock_no_accept, .getname = sock_no_getname, .poll = datagram_poll, .ioctl = sock_no_ioctl, .listen = sock_no_listen, .shutdown = sock_no_shutdown, .sendmsg = sock_no_sendmsg, .recvmsg = rawsock_recvmsg, .mmap = sock_no_mmap, }; static void rawsock_destruct(struct sock *sk) { pr_debug("sk=%p\n", sk); if (sk->sk_state == TCP_ESTABLISHED) { nfc_deactivate_target(nfc_rawsock(sk)->dev, nfc_rawsock(sk)->target_idx, NFC_TARGET_MODE_IDLE); nfc_put_device(nfc_rawsock(sk)->dev); } skb_queue_purge(&sk->sk_receive_queue); if (!sock_flag(sk, SOCK_DEAD)) { pr_err("Freeing alive NFC raw socket %p\n", sk); return; } } static int rawsock_create(struct net *net, struct socket *sock, const struct nfc_protocol *nfc_proto, int kern) { struct sock *sk; pr_debug("sock=%p\n", sock); if ((sock->type != SOCK_SEQPACKET) && (sock->type != SOCK_RAW)) return -ESOCKTNOSUPPORT; if (sock->type == SOCK_RAW) { if (!ns_capable(net->user_ns, CAP_NET_RAW)) return -EPERM; sock->ops = &rawsock_raw_ops; } else { sock->ops = &rawsock_ops; } sk = sk_alloc(net, PF_NFC, GFP_ATOMIC, nfc_proto->proto, kern); if (!sk) return -ENOMEM; sock_init_data(sock, sk); sk->sk_protocol = nfc_proto->id; sk->sk_destruct = rawsock_destruct; sock->state = SS_UNCONNECTED; if (sock->type == SOCK_RAW) nfc_sock_link(&raw_sk_list, sk); else { INIT_WORK(&nfc_rawsock(sk)->tx_work, rawsock_tx_work); nfc_rawsock(sk)->tx_work_scheduled = false; } return 0; } void nfc_send_to_raw_sock(struct nfc_dev *dev, struct sk_buff *skb, u8 payload_type, u8 direction) { struct sk_buff *skb_copy = NULL, *nskb; struct sock *sk; u8 *data; read_lock(&raw_sk_list.lock); sk_for_each(sk, &raw_sk_list.head) { if (!skb_copy) { skb_copy = __pskb_copy_fclone(skb, NFC_RAW_HEADER_SIZE, GFP_ATOMIC, true); if (!skb_copy) continue; data = skb_push(skb_copy, NFC_RAW_HEADER_SIZE); data[0] = dev ? dev->idx : 0xFF; data[1] = direction & 0x01; data[1] |= (payload_type << 1); } nskb = skb_clone(skb_copy, GFP_ATOMIC); if (!nskb) continue; if (sock_queue_rcv_skb(sk, nskb)) kfree_skb(nskb); } read_unlock(&raw_sk_list.lock); kfree_skb(skb_copy); } EXPORT_SYMBOL(nfc_send_to_raw_sock); static struct proto rawsock_proto = { .name = "NFC_RAW", .owner = THIS_MODULE, .obj_size = sizeof(struct nfc_rawsock), }; static const struct nfc_protocol rawsock_nfc_proto = { .id = NFC_SOCKPROTO_RAW, .proto = &rawsock_proto, .owner = THIS_MODULE, .create = rawsock_create }; int __init rawsock_init(void) { int rc; rc = nfc_proto_register(&rawsock_nfc_proto); return rc; } void rawsock_exit(void) { nfc_proto_unregister(&rawsock_nfc_proto); }
119 109 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 /* * Copyright (c) 2016 Intel Corporation * * Permission to use, copy, modify, distribute, and sell this software and its * documentation for any purpose is hereby granted without fee, provided that * the above copyright notice appear in all copies and that both that copyright * notice and this permission notice appear in supporting documentation, and * that the name of the copyright holders not be used in advertising or * publicity pertaining to distribution of the software without specific, * written prior permission. The copyright holders make no representations * about the suitability of this software for any purpose. It is provided "as * is" without express or implied warranty. * * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE * OF THIS SOFTWARE. */ #ifndef __DRM_FRAMEBUFFER_H__ #define __DRM_FRAMEBUFFER_H__ #include <linux/ctype.h> #include <linux/list.h> #include <linux/sched.h> #include <drm/drm_fourcc.h> #include <drm/drm_mode_object.h> struct drm_clip_rect; struct drm_device; struct drm_file; struct drm_framebuffer; struct drm_gem_object; /** * struct drm_framebuffer_funcs - framebuffer hooks */ struct drm_framebuffer_funcs { /** * @destroy: * * Clean up framebuffer resources, specifically also unreference the * backing storage. The core guarantees to call this function for every * framebuffer successfully created by calling * &drm_mode_config_funcs.fb_create. Drivers must also call * drm_framebuffer_cleanup() to release DRM core resources for this * framebuffer. */ void (*destroy)(struct drm_framebuffer *framebuffer); /** * @create_handle: * * Create a buffer handle in the driver-specific buffer manager (either * GEM or TTM) valid for the passed-in &struct drm_file. This is used by * the core to implement the GETFB IOCTL, which returns (for * sufficiently priviledged user) also a native buffer handle. This can * be used for seamless transitions between modesetting clients by * copying the current screen contents to a private buffer and blending * between that and the new contents. * * GEM based drivers should call drm_gem_handle_create() to create the * handle. * * RETURNS: * * 0 on success or a negative error code on failure. */ int (*create_handle)(struct drm_framebuffer *fb, struct drm_file *file_priv, unsigned int *handle); /** * @dirty: * * Optional callback for the dirty fb IOCTL. * * Userspace can notify the driver via this callback that an area of the * framebuffer has changed and should be flushed to the display * hardware. This can also be used internally, e.g. by the fbdev * emulation, though that's not the case currently. * * See documentation in drm_mode.h for the struct drm_mode_fb_dirty_cmd * for more information as all the semantics and arguments have a one to * one mapping on this function. * * Atomic drivers should use drm_atomic_helper_dirtyfb() to implement * this hook. * * RETURNS: * * 0 on success or a negative error code on failure. */ int (*dirty)(struct drm_framebuffer *framebuffer, struct drm_file *file_priv, unsigned flags, unsigned color, struct drm_clip_rect *clips, unsigned num_clips); }; /** * struct drm_framebuffer - frame buffer object * * Note that the fb is refcounted for the benefit of driver internals, * for example some hw, disabling a CRTC/plane is asynchronous, and * scanout does not actually complete until the next vblank. So some * cleanup (like releasing the reference(s) on the backing GEM bo(s)) * should be deferred. In cases like this, the driver would like to * hold a ref to the fb even though it has already been removed from * userspace perspective. See drm_framebuffer_get() and * drm_framebuffer_put(). * * The refcount is stored inside the mode object @base. */ struct drm_framebuffer { /** * @dev: DRM device this framebuffer belongs to */ struct drm_device *dev; /** * @head: Place on the &drm_mode_config.fb_list, access protected by * &drm_mode_config.fb_lock. */ struct list_head head; /** * @base: base modeset object structure, contains the reference count. */ struct drm_mode_object base; /** * @comm: Name of the process allocating the fb, used for fb dumping. */ char comm[TASK_COMM_LEN]; /** * @format: framebuffer format information */ const struct drm_format_info *format; /** * @funcs: framebuffer vfunc table */ const struct drm_framebuffer_funcs *funcs; /** * @pitches: Line stride per buffer. For userspace created object this * is copied from drm_mode_fb_cmd2. */ unsigned int pitches[DRM_FORMAT_MAX_PLANES]; /** * @offsets: Offset from buffer start to the actual pixel data in bytes, * per buffer. For userspace created object this is copied from * drm_mode_fb_cmd2. * * Note that this is a linear offset and does not take into account * tiling or buffer layout per @modifier. It is meant to be used when * the actual pixel data for this framebuffer plane starts at an offset, * e.g. when multiple planes are allocated within the same backing * storage buffer object. For tiled layouts this generally means its * @offsets must at least be tile-size aligned, but hardware often has * stricter requirements. * * This should not be used to specifiy x/y pixel offsets into the buffer * data (even for linear buffers). Specifying an x/y pixel offset is * instead done through the source rectangle in &struct drm_plane_state. */ unsigned int offsets[DRM_FORMAT_MAX_PLANES]; /** * @modifier: Data layout modifier. This is used to describe * tiling, or also special layouts (like compression) of auxiliary * buffers. For userspace created object this is copied from * drm_mode_fb_cmd2. */ uint64_t modifier; /** * @width: Logical width of the visible area of the framebuffer, in * pixels. */ unsigned int width; /** * @height: Logical height of the visible area of the framebuffer, in * pixels. */ unsigned int height; /** * @flags: Framebuffer flags like DRM_MODE_FB_INTERLACED or * DRM_MODE_FB_MODIFIERS. */ int flags; /** * @filp_head: Placed on &drm_file.fbs, protected by &drm_file.fbs_lock. */ struct list_head filp_head; /** * @obj: GEM objects backing the framebuffer, one per plane (optional). * * This is used by the GEM framebuffer helpers, see e.g. * drm_gem_fb_create(). */ struct drm_gem_object *obj[DRM_FORMAT_MAX_PLANES]; }; #define obj_to_fb(x) container_of(x, struct drm_framebuffer, base) int drm_framebuffer_init(struct drm_device *dev, struct drm_framebuffer *fb, const struct drm_framebuffer_funcs *funcs); struct drm_framebuffer *drm_framebuffer_lookup(struct drm_device *dev, struct drm_file *file_priv, uint32_t id); void drm_framebuffer_remove(struct drm_framebuffer *fb); void drm_framebuffer_cleanup(struct drm_framebuffer *fb); void drm_framebuffer_unregister_private(struct drm_framebuffer *fb); /** * drm_framebuffer_get - acquire a framebuffer reference * @fb: DRM framebuffer * * This function increments the framebuffer's reference count. */ static inline void drm_framebuffer_get(struct drm_framebuffer *fb) { drm_mode_object_get(&fb->base); } /** * drm_framebuffer_put - release a framebuffer reference * @fb: DRM framebuffer * * This function decrements the framebuffer's reference count and frees the * framebuffer if the reference count drops to zero. */ static inline void drm_framebuffer_put(struct drm_framebuffer *fb) { drm_mode_object_put(&fb->base); } /** * drm_framebuffer_read_refcount - read the framebuffer reference count. * @fb: framebuffer * * This functions returns the framebuffer's reference count. */ static inline uint32_t drm_framebuffer_read_refcount(const struct drm_framebuffer *fb) { return kref_read(&fb->base.refcount); } /** * drm_framebuffer_assign - store a reference to the fb * @p: location to store framebuffer * @fb: new framebuffer (maybe NULL) * * This functions sets the location to store a reference to the framebuffer, * unreferencing the framebuffer that was previously stored in that location. */ static inline void drm_framebuffer_assign(struct drm_framebuffer **p, struct drm_framebuffer *fb) { if (fb) drm_framebuffer_get(fb); if (*p) drm_framebuffer_put(*p); *p = fb; } /* * drm_for_each_fb - iterate over all framebuffers * @fb: the loop cursor * @dev: the DRM device * * Iterate over all framebuffers of @dev. User must hold * &drm_mode_config.fb_lock. */ #define drm_for_each_fb(fb, dev) \ for (WARN_ON(!mutex_is_locked(&(dev)->mode_config.fb_lock)), \ fb = list_first_entry(&(dev)->mode_config.fb_list, \ struct drm_framebuffer, head); \ &fb->head != (&(dev)->mode_config.fb_list); \ fb = list_next_entry(fb, head)) /** * struct drm_afbc_framebuffer - a special afbc frame buffer object * * A derived class of struct drm_framebuffer, dedicated for afbc use cases. */ struct drm_afbc_framebuffer { /** * @base: base framebuffer structure. */ struct drm_framebuffer base; /** * @block_width: width of a single afbc block */ u32 block_width; /** * @block_height: height of a single afbc block */ u32 block_height; /** * @aligned_width: aligned frame buffer width */ u32 aligned_width; /** * @aligned_height: aligned frame buffer height */ u32 aligned_height; /** * @offset: offset of the first afbc header */ u32 offset; /** * @afbc_size: minimum size of afbc buffer */ u32 afbc_size; }; #define fb_to_afbc_fb(x) container_of(x, struct drm_afbc_framebuffer, base) #endif
1 1 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 /* * Copyright (c) 2005 Intel Inc. All rights reserved. * Copyright (c) 2005-2006 Voltaire, Inc. All rights reserved. * Copyright (c) 2014 Intel Corporation. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <linux/slab.h> #include "mad_priv.h" #include "mad_rmpp.h" enum rmpp_state { RMPP_STATE_ACTIVE, RMPP_STATE_TIMEOUT, RMPP_STATE_COMPLETE }; struct mad_rmpp_recv { struct ib_mad_agent_private *agent; struct list_head list; struct delayed_work timeout_work; struct delayed_work cleanup_work; struct completion comp; enum rmpp_state state; spinlock_t lock; refcount_t refcount; struct ib_ah *ah; struct ib_mad_recv_wc *rmpp_wc; struct ib_mad_recv_buf *cur_seg_buf; int last_ack; int seg_num; int newwin; int repwin; __be64 tid; u32 src_qp; u32 slid; u8 mgmt_class; u8 class_version; u8 method; u8 base_version; }; static inline void deref_rmpp_recv(struct mad_rmpp_recv *rmpp_recv) { if (refcount_dec_and_test(&rmpp_recv->refcount)) complete(&rmpp_recv->comp); } static void destroy_rmpp_recv(struct mad_rmpp_recv *rmpp_recv) { deref_rmpp_recv(rmpp_recv); wait_for_completion(&rmpp_recv->comp); rdma_destroy_ah(rmpp_recv->ah, RDMA_DESTROY_AH_SLEEPABLE); kfree(rmpp_recv); } void ib_cancel_rmpp_recvs(struct ib_mad_agent_private *agent) { struct mad_rmpp_recv *rmpp_recv, *temp_rmpp_recv; unsigned long flags; spin_lock_irqsave(&agent->lock, flags); list_for_each_entry(rmpp_recv, &agent->rmpp_list, list) { cancel_delayed_work(&rmpp_recv->timeout_work); cancel_delayed_work(&rmpp_recv->cleanup_work); } spin_unlock_irqrestore(&agent->lock, flags); flush_workqueue(agent->qp_info->port_priv->wq); list_for_each_entry_safe(rmpp_recv, temp_rmpp_recv, &agent->rmpp_list, list) { list_del(&rmpp_recv->list); if (rmpp_recv->state != RMPP_STATE_COMPLETE) ib_free_recv_mad(rmpp_recv->rmpp_wc); destroy_rmpp_recv(rmpp_recv); } } static void format_ack(struct ib_mad_send_buf *msg, struct ib_rmpp_mad *data, struct mad_rmpp_recv *rmpp_recv) { struct ib_rmpp_mad *ack = msg->mad; unsigned long flags; memcpy(ack, &data->mad_hdr, msg->hdr_len); ack->mad_hdr.method ^= IB_MGMT_METHOD_RESP; ack->rmpp_hdr.rmpp_type = IB_MGMT_RMPP_TYPE_ACK; ib_set_rmpp_flags(&ack->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE); spin_lock_irqsave(&rmpp_recv->lock, flags); rmpp_recv->last_ack = rmpp_recv->seg_num; ack->rmpp_hdr.seg_num = cpu_to_be32(rmpp_recv->seg_num); ack->rmpp_hdr.paylen_newwin = cpu_to_be32(rmpp_recv->newwin); spin_unlock_irqrestore(&rmpp_recv->lock, flags); } static void ack_recv(struct mad_rmpp_recv *rmpp_recv, struct ib_mad_recv_wc *recv_wc) { struct ib_mad_send_buf *msg; int ret, hdr_len; hdr_len = ib_get_mad_data_offset(recv_wc->recv_buf.mad->mad_hdr.mgmt_class); msg = ib_create_send_mad(&rmpp_recv->agent->agent, recv_wc->wc->src_qp, recv_wc->wc->pkey_index, 1, hdr_len, 0, GFP_KERNEL, IB_MGMT_BASE_VERSION); if (IS_ERR(msg)) return; format_ack(msg, (struct ib_rmpp_mad *) recv_wc->recv_buf.mad, rmpp_recv); msg->ah = rmpp_recv->ah; ret = ib_post_send_mad(msg, NULL); if (ret) ib_free_send_mad(msg); } static struct ib_mad_send_buf *alloc_response_msg(struct ib_mad_agent *agent, struct ib_mad_recv_wc *recv_wc) { struct ib_mad_send_buf *msg; struct ib_ah *ah; int hdr_len; ah = ib_create_ah_from_wc(agent->qp->pd, recv_wc->wc, recv_wc->recv_buf.grh, agent->port_num); if (IS_ERR(ah)) return (void *) ah; hdr_len = ib_get_mad_data_offset(recv_wc->recv_buf.mad->mad_hdr.mgmt_class); msg = ib_create_send_mad(agent, recv_wc->wc->src_qp, recv_wc->wc->pkey_index, 1, hdr_len, 0, GFP_KERNEL, IB_MGMT_BASE_VERSION); if (IS_ERR(msg)) rdma_destroy_ah(ah, RDMA_DESTROY_AH_SLEEPABLE); else { msg->ah = ah; msg->context[0] = ah; } return msg; } static void ack_ds_ack(struct ib_mad_agent_private *agent, struct ib_mad_recv_wc *recv_wc) { struct ib_mad_send_buf *msg; struct ib_rmpp_mad *rmpp_mad; int ret; msg = alloc_response_msg(&agent->agent, recv_wc); if (IS_ERR(msg)) return; rmpp_mad = msg->mad; memcpy(rmpp_mad, recv_wc->recv_buf.mad, msg->hdr_len); rmpp_mad->mad_hdr.method ^= IB_MGMT_METHOD_RESP; ib_set_rmpp_flags(&rmpp_mad->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE); rmpp_mad->rmpp_hdr.seg_num = 0; rmpp_mad->rmpp_hdr.paylen_newwin = cpu_to_be32(1); ret = ib_post_send_mad(msg, NULL); if (ret) { rdma_destroy_ah(msg->ah, RDMA_DESTROY_AH_SLEEPABLE); ib_free_send_mad(msg); } } void ib_rmpp_send_handler(struct ib_mad_send_wc *mad_send_wc) { if (mad_send_wc->send_buf->context[0] == mad_send_wc->send_buf->ah) rdma_destroy_ah(mad_send_wc->send_buf->ah, RDMA_DESTROY_AH_SLEEPABLE); ib_free_send_mad(mad_send_wc->send_buf); } static void nack_recv(struct ib_mad_agent_private *agent, struct ib_mad_recv_wc *recv_wc, u8 rmpp_status) { struct ib_mad_send_buf *msg; struct ib_rmpp_mad *rmpp_mad; int ret; msg = alloc_response_msg(&agent->agent, recv_wc); if (IS_ERR(msg)) return; rmpp_mad = msg->mad; memcpy(rmpp_mad, recv_wc->recv_buf.mad, msg->hdr_len); rmpp_mad->mad_hdr.method ^= IB_MGMT_METHOD_RESP; rmpp_mad->rmpp_hdr.rmpp_version = IB_MGMT_RMPP_VERSION; rmpp_mad->rmpp_hdr.rmpp_type = IB_MGMT_RMPP_TYPE_ABORT; ib_set_rmpp_flags(&rmpp_mad->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE); rmpp_mad->rmpp_hdr.rmpp_status = rmpp_status; rmpp_mad->rmpp_hdr.seg_num = 0; rmpp_mad->rmpp_hdr.paylen_newwin = 0; ret = ib_post_send_mad(msg, NULL); if (ret) { rdma_destroy_ah(msg->ah, RDMA_DESTROY_AH_SLEEPABLE); ib_free_send_mad(msg); } } static void recv_timeout_handler(struct work_struct *work) { struct mad_rmpp_recv *rmpp_recv = container_of(work, struct mad_rmpp_recv, timeout_work.work); struct ib_mad_recv_wc *rmpp_wc; unsigned long flags; spin_lock_irqsave(&rmpp_recv->agent->lock, flags); if (rmpp_recv->state != RMPP_STATE_ACTIVE) { spin_unlock_irqrestore(&rmpp_recv->agent->lock, flags); return; } rmpp_recv->state = RMPP_STATE_TIMEOUT; list_del(&rmpp_recv->list); spin_unlock_irqrestore(&rmpp_recv->agent->lock, flags); rmpp_wc = rmpp_recv->rmpp_wc; nack_recv(rmpp_recv->agent, rmpp_wc, IB_MGMT_RMPP_STATUS_T2L); destroy_rmpp_recv(rmpp_recv); ib_free_recv_mad(rmpp_wc); } static void recv_cleanup_handler(struct work_struct *work) { struct mad_rmpp_recv *rmpp_recv = container_of(work, struct mad_rmpp_recv, cleanup_work.work); unsigned long flags; spin_lock_irqsave(&rmpp_recv->agent->lock, flags); list_del(&rmpp_recv->list); spin_unlock_irqrestore(&rmpp_recv->agent->lock, flags); destroy_rmpp_recv(rmpp_recv); } static struct mad_rmpp_recv * create_rmpp_recv(struct ib_mad_agent_private *agent, struct ib_mad_recv_wc *mad_recv_wc) { struct mad_rmpp_recv *rmpp_recv; struct ib_mad_hdr *mad_hdr; rmpp_recv = kmalloc(sizeof *rmpp_recv, GFP_KERNEL); if (!rmpp_recv) return NULL; rmpp_recv->ah = ib_create_ah_from_wc(agent->agent.qp->pd, mad_recv_wc->wc, mad_recv_wc->recv_buf.grh, agent->agent.port_num); if (IS_ERR(rmpp_recv->ah)) goto error; rmpp_recv->agent = agent; init_completion(&rmpp_recv->comp); INIT_DELAYED_WORK(&rmpp_recv->timeout_work, recv_timeout_handler); INIT_DELAYED_WORK(&rmpp_recv->cleanup_work, recv_cleanup_handler); spin_lock_init(&rmpp_recv->lock); rmpp_recv->state = RMPP_STATE_ACTIVE; refcount_set(&rmpp_recv->refcount, 1); rmpp_recv->rmpp_wc = mad_recv_wc; rmpp_recv->cur_seg_buf = &mad_recv_wc->recv_buf; rmpp_recv->newwin = 1; rmpp_recv->seg_num = 1; rmpp_recv->last_ack = 0; rmpp_recv->repwin = 1; mad_hdr = &mad_recv_wc->recv_buf.mad->mad_hdr; rmpp_recv->tid = mad_hdr->tid; rmpp_recv->src_qp = mad_recv_wc->wc->src_qp; rmpp_recv->slid = mad_recv_wc->wc->slid; rmpp_recv->mgmt_class = mad_hdr->mgmt_class; rmpp_recv->class_version = mad_hdr->class_version; rmpp_recv->method = mad_hdr->method; rmpp_recv->base_version = mad_hdr->base_version; return rmpp_recv; error: kfree(rmpp_recv); return NULL; } static struct mad_rmpp_recv * find_rmpp_recv(struct ib_mad_agent_private *agent, struct ib_mad_recv_wc *mad_recv_wc) { struct mad_rmpp_recv *rmpp_recv; struct ib_mad_hdr *mad_hdr = &mad_recv_wc->recv_buf.mad->mad_hdr; list_for_each_entry(rmpp_recv, &agent->rmpp_list, list) { if (rmpp_recv->tid == mad_hdr->tid && rmpp_recv->src_qp == mad_recv_wc->wc->src_qp && rmpp_recv->slid == mad_recv_wc->wc->slid && rmpp_recv->mgmt_class == mad_hdr->mgmt_class && rmpp_recv->class_version == mad_hdr->class_version && rmpp_recv->method == mad_hdr->method) return rmpp_recv; } return NULL; } static struct mad_rmpp_recv * acquire_rmpp_recv(struct ib_mad_agent_private *agent, struct ib_mad_recv_wc *mad_recv_wc) { struct mad_rmpp_recv *rmpp_recv; unsigned long flags; spin_lock_irqsave(&agent->lock, flags); rmpp_recv = find_rmpp_recv(agent, mad_recv_wc); if (rmpp_recv) refcount_inc(&rmpp_recv->refcount); spin_unlock_irqrestore(&agent->lock, flags); return rmpp_recv; } static struct mad_rmpp_recv * insert_rmpp_recv(struct ib_mad_agent_private *agent, struct mad_rmpp_recv *rmpp_recv) { struct mad_rmpp_recv *cur_rmpp_recv; cur_rmpp_recv = find_rmpp_recv(agent, rmpp_recv->rmpp_wc); if (!cur_rmpp_recv) list_add_tail(&rmpp_recv->list, &agent->rmpp_list); return cur_rmpp_recv; } static inline int get_last_flag(struct ib_mad_recv_buf *seg) { struct ib_rmpp_mad *rmpp_mad; rmpp_mad = (struct ib_rmpp_mad *) seg->mad; return ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) & IB_MGMT_RMPP_FLAG_LAST; } static inline int get_seg_num(struct ib_mad_recv_buf *seg) { struct ib_rmpp_mad *rmpp_mad; rmpp_mad = (struct ib_rmpp_mad *) seg->mad; return be32_to_cpu(rmpp_mad->rmpp_hdr.seg_num); } static inline struct ib_mad_recv_buf *get_next_seg(struct list_head *rmpp_list, struct ib_mad_recv_buf *seg) { if (seg->list.next == rmpp_list) return NULL; return container_of(seg->list.next, struct ib_mad_recv_buf, list); } static inline int window_size(struct ib_mad_agent_private *agent) { return max(agent->qp_info->recv_queue.max_active >> 3, 1); } static struct ib_mad_recv_buf *find_seg_location(struct list_head *rmpp_list, int seg_num) { struct ib_mad_recv_buf *seg_buf; int cur_seg_num; list_for_each_entry_reverse(seg_buf, rmpp_list, list) { cur_seg_num = get_seg_num(seg_buf); if (seg_num > cur_seg_num) return seg_buf; if (seg_num == cur_seg_num) break; } return NULL; } static void update_seg_num(struct mad_rmpp_recv *rmpp_recv, struct ib_mad_recv_buf *new_buf) { struct list_head *rmpp_list = &rmpp_recv->rmpp_wc->rmpp_list; while (new_buf && (get_seg_num(new_buf) == rmpp_recv->seg_num + 1)) { rmpp_recv->cur_seg_buf = new_buf; rmpp_recv->seg_num++; new_buf = get_next_seg(rmpp_list, new_buf); } } static inline int get_mad_len(struct mad_rmpp_recv *rmpp_recv) { struct ib_rmpp_mad *rmpp_mad; int hdr_size, data_size, pad; bool opa = rdma_cap_opa_mad(rmpp_recv->agent->qp_info->port_priv->device, rmpp_recv->agent->qp_info->port_priv->port_num); rmpp_mad = (struct ib_rmpp_mad *)rmpp_recv->cur_seg_buf->mad; hdr_size = ib_get_mad_data_offset(rmpp_mad->mad_hdr.mgmt_class); if (opa && rmpp_recv->base_version == OPA_MGMT_BASE_VERSION) { data_size = sizeof(struct opa_rmpp_mad) - hdr_size; pad = OPA_MGMT_RMPP_DATA - be32_to_cpu(rmpp_mad->rmpp_hdr.paylen_newwin); if (pad > OPA_MGMT_RMPP_DATA || pad < 0) pad = 0; } else { data_size = sizeof(struct ib_rmpp_mad) - hdr_size; pad = IB_MGMT_RMPP_DATA - be32_to_cpu(rmpp_mad->rmpp_hdr.paylen_newwin); if (pad > IB_MGMT_RMPP_DATA || pad < 0) pad = 0; } return hdr_size + rmpp_recv->seg_num * data_size - pad; } static struct ib_mad_recv_wc *complete_rmpp(struct mad_rmpp_recv *rmpp_recv) { struct ib_mad_recv_wc *rmpp_wc; ack_recv(rmpp_recv, rmpp_recv->rmpp_wc); if (rmpp_recv->seg_num > 1) cancel_delayed_work(&rmpp_recv->timeout_work); rmpp_wc = rmpp_recv->rmpp_wc; rmpp_wc->mad_len = get_mad_len(rmpp_recv); /* 10 seconds until we can find the packet lifetime */ queue_delayed_work(rmpp_recv->agent->qp_info->port_priv->wq, &rmpp_recv->cleanup_work, msecs_to_jiffies(10000)); return rmpp_wc; } static struct ib_mad_recv_wc * continue_rmpp(struct ib_mad_agent_private *agent, struct ib_mad_recv_wc *mad_recv_wc) { struct mad_rmpp_recv *rmpp_recv; struct ib_mad_recv_buf *prev_buf; struct ib_mad_recv_wc *done_wc; int seg_num; unsigned long flags; rmpp_recv = acquire_rmpp_recv(agent, mad_recv_wc); if (!rmpp_recv) goto drop1; seg_num = get_seg_num(&mad_recv_wc->recv_buf); spin_lock_irqsave(&rmpp_recv->lock, flags); if ((rmpp_recv->state == RMPP_STATE_TIMEOUT) || (seg_num > rmpp_recv->newwin)) goto drop3; if ((seg_num <= rmpp_recv->last_ack) || (rmpp_recv->state == RMPP_STATE_COMPLETE)) { spin_unlock_irqrestore(&rmpp_recv->lock, flags); ack_recv(rmpp_recv, mad_recv_wc); goto drop2; } prev_buf = find_seg_location(&rmpp_recv->rmpp_wc->rmpp_list, seg_num); if (!prev_buf) goto drop3; done_wc = NULL; list_add(&mad_recv_wc->recv_buf.list, &prev_buf->list); if (rmpp_recv->cur_seg_buf == prev_buf) { update_seg_num(rmpp_recv, &mad_recv_wc->recv_buf); if (get_last_flag(rmpp_recv->cur_seg_buf)) { rmpp_recv->state = RMPP_STATE_COMPLETE; spin_unlock_irqrestore(&rmpp_recv->lock, flags); done_wc = complete_rmpp(rmpp_recv); goto out; } else if (rmpp_recv->seg_num == rmpp_recv->newwin) { rmpp_recv->newwin += window_size(agent); spin_unlock_irqrestore(&rmpp_recv->lock, flags); ack_recv(rmpp_recv, mad_recv_wc); goto out; } } spin_unlock_irqrestore(&rmpp_recv->lock, flags); out: deref_rmpp_recv(rmpp_recv); return done_wc; drop3: spin_unlock_irqrestore(&rmpp_recv->lock, flags); drop2: deref_rmpp_recv(rmpp_recv); drop1: ib_free_recv_mad(mad_recv_wc); return NULL; } static struct ib_mad_recv_wc * start_rmpp(struct ib_mad_agent_private *agent, struct ib_mad_recv_wc *mad_recv_wc) { struct mad_rmpp_recv *rmpp_recv; unsigned long flags; rmpp_recv = create_rmpp_recv(agent, mad_recv_wc); if (!rmpp_recv) { ib_free_recv_mad(mad_recv_wc); return NULL; } spin_lock_irqsave(&agent->lock, flags); if (insert_rmpp_recv(agent, rmpp_recv)) { spin_unlock_irqrestore(&agent->lock, flags); /* duplicate first MAD */ destroy_rmpp_recv(rmpp_recv); return continue_rmpp(agent, mad_recv_wc); } refcount_inc(&rmpp_recv->refcount); if (get_last_flag(&mad_recv_wc->recv_buf)) { rmpp_recv->state = RMPP_STATE_COMPLETE; spin_unlock_irqrestore(&agent->lock, flags); complete_rmpp(rmpp_recv); } else { spin_unlock_irqrestore(&agent->lock, flags); /* 40 seconds until we can find the packet lifetimes */ queue_delayed_work(agent->qp_info->port_priv->wq, &rmpp_recv->timeout_work, msecs_to_jiffies(40000)); rmpp_recv->newwin += window_size(agent); ack_recv(rmpp_recv, mad_recv_wc); mad_recv_wc = NULL; } deref_rmpp_recv(rmpp_recv); return mad_recv_wc; } static int send_next_seg(struct ib_mad_send_wr_private *mad_send_wr) { struct ib_rmpp_mad *rmpp_mad; int timeout; u32 paylen = 0; rmpp_mad = mad_send_wr->send_buf.mad; ib_set_rmpp_flags(&rmpp_mad->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE); rmpp_mad->rmpp_hdr.seg_num = cpu_to_be32(++mad_send_wr->seg_num); if (mad_send_wr->seg_num == 1) { rmpp_mad->rmpp_hdr.rmpp_rtime_flags |= IB_MGMT_RMPP_FLAG_FIRST; paylen = (mad_send_wr->send_buf.seg_count * mad_send_wr->send_buf.seg_rmpp_size) - mad_send_wr->pad; } if (mad_send_wr->seg_num == mad_send_wr->send_buf.seg_count) { rmpp_mad->rmpp_hdr.rmpp_rtime_flags |= IB_MGMT_RMPP_FLAG_LAST; paylen = mad_send_wr->send_buf.seg_rmpp_size - mad_send_wr->pad; } rmpp_mad->rmpp_hdr.paylen_newwin = cpu_to_be32(paylen); /* 2 seconds for an ACK until we can find the packet lifetime */ timeout = mad_send_wr->send_buf.timeout_ms; if (!timeout || timeout > 2000) mad_send_wr->timeout = msecs_to_jiffies(2000); return ib_send_mad(mad_send_wr); } static void abort_send(struct ib_mad_agent_private *agent, struct ib_mad_recv_wc *mad_recv_wc, u8 rmpp_status) { struct ib_mad_send_wr_private *mad_send_wr; struct ib_mad_send_wc wc; unsigned long flags; spin_lock_irqsave(&agent->lock, flags); mad_send_wr = ib_find_send_mad(agent, mad_recv_wc); if (!mad_send_wr) goto out; /* Unmatched send */ if ((mad_send_wr->last_ack == mad_send_wr->send_buf.seg_count) || (!mad_send_wr->timeout) || (mad_send_wr->status != IB_WC_SUCCESS)) goto out; /* Send is already done */ ib_mark_mad_done(mad_send_wr); spin_unlock_irqrestore(&agent->lock, flags); wc.status = IB_WC_REM_ABORT_ERR; wc.vendor_err = rmpp_status; wc.send_buf = &mad_send_wr->send_buf; ib_mad_complete_send_wr(mad_send_wr, &wc); return; out: spin_unlock_irqrestore(&agent->lock, flags); } static inline void adjust_last_ack(struct ib_mad_send_wr_private *wr, int seg_num) { struct list_head *list; wr->last_ack = seg_num; list = &wr->last_ack_seg->list; list_for_each_entry(wr->last_ack_seg, list, list) if (wr->last_ack_seg->num == seg_num) break; } static void process_ds_ack(struct ib_mad_agent_private *agent, struct ib_mad_recv_wc *mad_recv_wc, int newwin) { struct mad_rmpp_recv *rmpp_recv; rmpp_recv = find_rmpp_recv(agent, mad_recv_wc); if (rmpp_recv && rmpp_recv->state == RMPP_STATE_COMPLETE) rmpp_recv->repwin = newwin; } static void process_rmpp_ack(struct ib_mad_agent_private *agent, struct ib_mad_recv_wc *mad_recv_wc) { struct ib_mad_send_wr_private *mad_send_wr; struct ib_rmpp_mad *rmpp_mad; unsigned long flags; int seg_num, newwin, ret; rmpp_mad = (struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad; if (rmpp_mad->rmpp_hdr.rmpp_status) { abort_send(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BAD_STATUS); nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BAD_STATUS); return; } seg_num = be32_to_cpu(rmpp_mad->rmpp_hdr.seg_num); newwin = be32_to_cpu(rmpp_mad->rmpp_hdr.paylen_newwin); if (newwin < seg_num) { abort_send(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_W2S); nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_W2S); return; } spin_lock_irqsave(&agent->lock, flags); mad_send_wr = ib_find_send_mad(agent, mad_recv_wc); if (!mad_send_wr) { if (!seg_num) process_ds_ack(agent, mad_recv_wc, newwin); goto out; /* Unmatched or DS RMPP ACK */ } if ((mad_send_wr->last_ack == mad_send_wr->send_buf.seg_count) && (mad_send_wr->timeout)) { spin_unlock_irqrestore(&agent->lock, flags); ack_ds_ack(agent, mad_recv_wc); return; /* Repeated ACK for DS RMPP transaction */ } if ((mad_send_wr->last_ack == mad_send_wr->send_buf.seg_count) || (!mad_send_wr->timeout) || (mad_send_wr->status != IB_WC_SUCCESS)) goto out; /* Send is already done */ if (seg_num > mad_send_wr->send_buf.seg_count || seg_num > mad_send_wr->newwin) { spin_unlock_irqrestore(&agent->lock, flags); abort_send(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_S2B); nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_S2B); return; } if (newwin < mad_send_wr->newwin || seg_num < mad_send_wr->last_ack) goto out; /* Old ACK */ if (seg_num > mad_send_wr->last_ack) { adjust_last_ack(mad_send_wr, seg_num); mad_send_wr->retries_left = mad_send_wr->max_retries; } mad_send_wr->newwin = newwin; if (mad_send_wr->last_ack == mad_send_wr->send_buf.seg_count) { /* If no response is expected, the ACK completes the send */ if (!mad_send_wr->send_buf.timeout_ms) { struct ib_mad_send_wc wc; ib_mark_mad_done(mad_send_wr); spin_unlock_irqrestore(&agent->lock, flags); wc.status = IB_WC_SUCCESS; wc.vendor_err = 0; wc.send_buf = &mad_send_wr->send_buf; ib_mad_complete_send_wr(mad_send_wr, &wc); return; } if (mad_send_wr->refcount == 1) ib_reset_mad_timeout(mad_send_wr, mad_send_wr->send_buf.timeout_ms); spin_unlock_irqrestore(&agent->lock, flags); ack_ds_ack(agent, mad_recv_wc); return; } else if (mad_send_wr->refcount == 1 && mad_send_wr->seg_num < mad_send_wr->newwin && mad_send_wr->seg_num < mad_send_wr->send_buf.seg_count) { /* Send failure will just result in a timeout/retry */ ret = send_next_seg(mad_send_wr); if (ret) goto out; mad_send_wr->refcount++; list_move_tail(&mad_send_wr->agent_list, &mad_send_wr->mad_agent_priv->send_list); } out: spin_unlock_irqrestore(&agent->lock, flags); } static struct ib_mad_recv_wc * process_rmpp_data(struct ib_mad_agent_private *agent, struct ib_mad_recv_wc *mad_recv_wc) { struct ib_rmpp_hdr *rmpp_hdr; u8 rmpp_status; rmpp_hdr = &((struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad)->rmpp_hdr; if (rmpp_hdr->rmpp_status) { rmpp_status = IB_MGMT_RMPP_STATUS_BAD_STATUS; goto bad; } if (rmpp_hdr->seg_num == cpu_to_be32(1)) { if (!(ib_get_rmpp_flags(rmpp_hdr) & IB_MGMT_RMPP_FLAG_FIRST)) { rmpp_status = IB_MGMT_RMPP_STATUS_BAD_SEG; goto bad; } return start_rmpp(agent, mad_recv_wc); } else { if (ib_get_rmpp_flags(rmpp_hdr) & IB_MGMT_RMPP_FLAG_FIRST) { rmpp_status = IB_MGMT_RMPP_STATUS_BAD_SEG; goto bad; } return continue_rmpp(agent, mad_recv_wc); } bad: nack_recv(agent, mad_recv_wc, rmpp_status); ib_free_recv_mad(mad_recv_wc); return NULL; } static void process_rmpp_stop(struct ib_mad_agent_private *agent, struct ib_mad_recv_wc *mad_recv_wc) { struct ib_rmpp_mad *rmpp_mad; rmpp_mad = (struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad; if (rmpp_mad->rmpp_hdr.rmpp_status != IB_MGMT_RMPP_STATUS_RESX) { abort_send(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BAD_STATUS); nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BAD_STATUS); } else abort_send(agent, mad_recv_wc, rmpp_mad->rmpp_hdr.rmpp_status); } static void process_rmpp_abort(struct ib_mad_agent_private *agent, struct ib_mad_recv_wc *mad_recv_wc) { struct ib_rmpp_mad *rmpp_mad; rmpp_mad = (struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad; if (rmpp_mad->rmpp_hdr.rmpp_status < IB_MGMT_RMPP_STATUS_ABORT_MIN || rmpp_mad->rmpp_hdr.rmpp_status > IB_MGMT_RMPP_STATUS_ABORT_MAX) { abort_send(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BAD_STATUS); nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BAD_STATUS); } else abort_send(agent, mad_recv_wc, rmpp_mad->rmpp_hdr.rmpp_status); } struct ib_mad_recv_wc * ib_process_rmpp_recv_wc(struct ib_mad_agent_private *agent, struct ib_mad_recv_wc *mad_recv_wc) { struct ib_rmpp_mad *rmpp_mad; rmpp_mad = (struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad; if (!(rmpp_mad->rmpp_hdr.rmpp_rtime_flags & IB_MGMT_RMPP_FLAG_ACTIVE)) return mad_recv_wc; if (rmpp_mad->rmpp_hdr.rmpp_version != IB_MGMT_RMPP_VERSION) { abort_send(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_UNV); nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_UNV); goto out; } switch (rmpp_mad->rmpp_hdr.rmpp_type) { case IB_MGMT_RMPP_TYPE_DATA: return process_rmpp_data(agent, mad_recv_wc); case IB_MGMT_RMPP_TYPE_ACK: process_rmpp_ack(agent, mad_recv_wc); break; case IB_MGMT_RMPP_TYPE_STOP: process_rmpp_stop(agent, mad_recv_wc); break; case IB_MGMT_RMPP_TYPE_ABORT: process_rmpp_abort(agent, mad_recv_wc); break; default: abort_send(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BADT); nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BADT); break; } out: ib_free_recv_mad(mad_recv_wc); return NULL; } static int init_newwin(struct ib_mad_send_wr_private *mad_send_wr) { struct ib_mad_agent_private *agent = mad_send_wr->mad_agent_priv; struct ib_mad_hdr *mad_hdr = mad_send_wr->send_buf.mad; struct mad_rmpp_recv *rmpp_recv; struct rdma_ah_attr ah_attr; unsigned long flags; int newwin = 1; if (!(mad_hdr->method & IB_MGMT_METHOD_RESP)) goto out; spin_lock_irqsave(&agent->lock, flags); list_for_each_entry(rmpp_recv, &agent->rmpp_list, list) { if (rmpp_recv->tid != mad_hdr->tid || rmpp_recv->mgmt_class != mad_hdr->mgmt_class || rmpp_recv->class_version != mad_hdr->class_version || (rmpp_recv->method & IB_MGMT_METHOD_RESP)) continue; if (rdma_query_ah(mad_send_wr->send_buf.ah, &ah_attr)) continue; if (rmpp_recv->slid == rdma_ah_get_dlid(&ah_attr)) { newwin = rmpp_recv->repwin; break; } } spin_unlock_irqrestore(&agent->lock, flags); out: return newwin; } int ib_send_rmpp_mad(struct ib_mad_send_wr_private *mad_send_wr) { struct ib_rmpp_mad *rmpp_mad; int ret; rmpp_mad = mad_send_wr->send_buf.mad; if (!(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) & IB_MGMT_RMPP_FLAG_ACTIVE)) return IB_RMPP_RESULT_UNHANDLED; if (rmpp_mad->rmpp_hdr.rmpp_type != IB_MGMT_RMPP_TYPE_DATA) { mad_send_wr->seg_num = 1; return IB_RMPP_RESULT_INTERNAL; } mad_send_wr->newwin = init_newwin(mad_send_wr); /* We need to wait for the final ACK even if there isn't a response */ mad_send_wr->refcount += (mad_send_wr->timeout == 0); ret = send_next_seg(mad_send_wr); if (!ret) return IB_RMPP_RESULT_CONSUMED; return ret; } int ib_process_rmpp_send_wc(struct ib_mad_send_wr_private *mad_send_wr, struct ib_mad_send_wc *mad_send_wc) { struct ib_rmpp_mad *rmpp_mad; int ret; rmpp_mad = mad_send_wr->send_buf.mad; if (!(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) & IB_MGMT_RMPP_FLAG_ACTIVE)) return IB_RMPP_RESULT_UNHANDLED; /* RMPP not active */ if (rmpp_mad->rmpp_hdr.rmpp_type != IB_MGMT_RMPP_TYPE_DATA) return IB_RMPP_RESULT_INTERNAL; /* ACK, STOP, or ABORT */ if (mad_send_wc->status != IB_WC_SUCCESS || mad_send_wr->status != IB_WC_SUCCESS) return IB_RMPP_RESULT_PROCESSED; /* Canceled or send error */ if (!mad_send_wr->timeout) return IB_RMPP_RESULT_PROCESSED; /* Response received */ if (mad_send_wr->last_ack == mad_send_wr->send_buf.seg_count) { mad_send_wr->timeout = msecs_to_jiffies(mad_send_wr->send_buf.timeout_ms); return IB_RMPP_RESULT_PROCESSED; /* Send done */ } if (mad_send_wr->seg_num == mad_send_wr->newwin || mad_send_wr->seg_num == mad_send_wr->send_buf.seg_count) return IB_RMPP_RESULT_PROCESSED; /* Wait for ACK */ ret = send_next_seg(mad_send_wr); if (ret) { mad_send_wc->status = IB_WC_GENERAL_ERR; return IB_RMPP_RESULT_PROCESSED; } return IB_RMPP_RESULT_CONSUMED; } int ib_retry_rmpp(struct ib_mad_send_wr_private *mad_send_wr) { struct ib_rmpp_mad *rmpp_mad; int ret; rmpp_mad = mad_send_wr->send_buf.mad; if (!(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) & IB_MGMT_RMPP_FLAG_ACTIVE)) return IB_RMPP_RESULT_UNHANDLED; /* RMPP not active */ if (mad_send_wr->last_ack == mad_send_wr->send_buf.seg_count) return IB_RMPP_RESULT_PROCESSED; mad_send_wr->seg_num = mad_send_wr->last_ack; mad_send_wr->cur_seg = mad_send_wr->last_ack_seg; ret = send_next_seg(mad_send_wr); if (ret) return IB_RMPP_RESULT_PROCESSED; return IB_RMPP_RESULT_CONSUMED; }
104 72 72 3 6 51 38 5 34 83 77 5 3 4 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_DCCP_H #define _LINUX_DCCP_H #include <linux/in.h> #include <linux/interrupt.h> #include <linux/ktime.h> #include <linux/list.h> #include <linux/uio.h> #include <linux/workqueue.h> #include <net/inet_connection_sock.h> #include <net/inet_sock.h> #include <net/inet_timewait_sock.h> #include <net/tcp_states.h> #include <uapi/linux/dccp.h> enum dccp_state { DCCP_OPEN = TCP_ESTABLISHED, DCCP_REQUESTING = TCP_SYN_SENT, DCCP_LISTEN = TCP_LISTEN, DCCP_RESPOND = TCP_SYN_RECV, /* * States involved in closing a DCCP connection: * 1) ACTIVE_CLOSEREQ is entered by a server sending a CloseReq. * * 2) CLOSING can have three different meanings (RFC 4340, 8.3): * a. Client has performed active-close, has sent a Close to the server * from state OPEN or PARTOPEN, and is waiting for the final Reset * (in this case, SOCK_DONE == 1). * b. Client is asked to perform passive-close, by receiving a CloseReq * in (PART)OPEN state. It sends a Close and waits for final Reset * (in this case, SOCK_DONE == 0). * c. Server performs an active-close as in (a), keeps TIMEWAIT state. * * 3) The following intermediate states are employed to give passively * closing nodes a chance to process their unread data: * - PASSIVE_CLOSE (from OPEN => CLOSED) and * - PASSIVE_CLOSEREQ (from (PART)OPEN to CLOSING; case (b) above). */ DCCP_ACTIVE_CLOSEREQ = TCP_FIN_WAIT1, DCCP_PASSIVE_CLOSE = TCP_CLOSE_WAIT, /* any node receiving a Close */ DCCP_CLOSING = TCP_CLOSING, DCCP_TIME_WAIT = TCP_TIME_WAIT, DCCP_CLOSED = TCP_CLOSE, DCCP_NEW_SYN_RECV = TCP_NEW_SYN_RECV, DCCP_PARTOPEN = TCP_MAX_STATES, DCCP_PASSIVE_CLOSEREQ, /* clients receiving CloseReq */ DCCP_MAX_STATES }; enum { DCCPF_OPEN = TCPF_ESTABLISHED, DCCPF_REQUESTING = TCPF_SYN_SENT, DCCPF_LISTEN = TCPF_LISTEN, DCCPF_RESPOND = TCPF_SYN_RECV, DCCPF_ACTIVE_CLOSEREQ = TCPF_FIN_WAIT1, DCCPF_CLOSING = TCPF_CLOSING, DCCPF_TIME_WAIT = TCPF_TIME_WAIT, DCCPF_CLOSED = TCPF_CLOSE, DCCPF_NEW_SYN_RECV = TCPF_NEW_SYN_RECV, DCCPF_PARTOPEN = (1 << DCCP_PARTOPEN), }; static inline struct dccp_hdr *dccp_hdr(const struct sk_buff *skb) { return (struct dccp_hdr *)skb_transport_header(skb); } static inline struct dccp_hdr *dccp_zeroed_hdr(struct sk_buff *skb, int headlen) { skb_push(skb, headlen); skb_reset_transport_header(skb); return memset(skb_transport_header(skb), 0, headlen); } static inline struct dccp_hdr_ext *dccp_hdrx(const struct dccp_hdr *dh) { return (struct dccp_hdr_ext *)((unsigned char *)dh + sizeof(*dh)); } static inline unsigned int __dccp_basic_hdr_len(const struct dccp_hdr *dh) { return sizeof(*dh) + (dh->dccph_x ? sizeof(struct dccp_hdr_ext) : 0); } static inline unsigned int dccp_basic_hdr_len(const struct sk_buff *skb) { const struct dccp_hdr *dh = dccp_hdr(skb); return __dccp_basic_hdr_len(dh); } static inline __u64 dccp_hdr_seq(const struct dccp_hdr *dh) { __u64 seq_nr = ntohs(dh->dccph_seq); if (dh->dccph_x != 0) seq_nr = (seq_nr << 32) + ntohl(dccp_hdrx(dh)->dccph_seq_low); else seq_nr += (u32)dh->dccph_seq2 << 16; return seq_nr; } static inline struct dccp_hdr_request *dccp_hdr_request(struct sk_buff *skb) { return (struct dccp_hdr_request *)(skb_transport_header(skb) + dccp_basic_hdr_len(skb)); } static inline struct dccp_hdr_ack_bits *dccp_hdr_ack_bits(const struct sk_buff *skb) { return (struct dccp_hdr_ack_bits *)(skb_transport_header(skb) + dccp_basic_hdr_len(skb)); } static inline u64 dccp_hdr_ack_seq(const struct sk_buff *skb) { const struct dccp_hdr_ack_bits *dhack = dccp_hdr_ack_bits(skb); return ((u64)ntohs(dhack->dccph_ack_nr_high) << 32) + ntohl(dhack->dccph_ack_nr_low); } static inline struct dccp_hdr_response *dccp_hdr_response(struct sk_buff *skb) { return (struct dccp_hdr_response *)(skb_transport_header(skb) + dccp_basic_hdr_len(skb)); } static inline struct dccp_hdr_reset *dccp_hdr_reset(struct sk_buff *skb) { return (struct dccp_hdr_reset *)(skb_transport_header(skb) + dccp_basic_hdr_len(skb)); } static inline unsigned int __dccp_hdr_len(const struct dccp_hdr *dh) { return __dccp_basic_hdr_len(dh) + dccp_packet_hdr_len(dh->dccph_type); } static inline unsigned int dccp_hdr_len(const struct sk_buff *skb) { return __dccp_hdr_len(dccp_hdr(skb)); } /** * struct dccp_request_sock - represent DCCP-specific connection request * @dreq_inet_rsk: structure inherited from * @dreq_iss: initial sequence number, sent on the first Response (RFC 4340, 7.1) * @dreq_gss: greatest sequence number sent (for retransmitted Responses) * @dreq_isr: initial sequence number received in the first Request * @dreq_gsr: greatest sequence number received (for retransmitted Request(s)) * @dreq_service: service code present on the Request (there is just one) * @dreq_featneg: feature negotiation options for this connection * The following two fields are analogous to the ones in dccp_sock: * @dreq_timestamp_echo: last received timestamp to echo (13.1) * @dreq_timestamp_echo: the time of receiving the last @dreq_timestamp_echo */ struct dccp_request_sock { struct inet_request_sock dreq_inet_rsk; __u64 dreq_iss; __u64 dreq_gss; __u64 dreq_isr; __u64 dreq_gsr; __be32 dreq_service; spinlock_t dreq_lock; struct list_head dreq_featneg; __u32 dreq_timestamp_echo; __u32 dreq_timestamp_time; }; static inline struct dccp_request_sock *dccp_rsk(const struct request_sock *req) { return (struct dccp_request_sock *)req; } extern struct inet_timewait_death_row dccp_death_row; extern int dccp_parse_options(struct sock *sk, struct dccp_request_sock *dreq, struct sk_buff *skb); struct dccp_options_received { u64 dccpor_ndp:48; u32 dccpor_timestamp; u32 dccpor_timestamp_echo; u32 dccpor_elapsed_time; }; struct ccid; enum dccp_role { DCCP_ROLE_UNDEFINED, DCCP_ROLE_LISTEN, DCCP_ROLE_CLIENT, DCCP_ROLE_SERVER, }; struct dccp_service_list { __u32 dccpsl_nr; __be32 dccpsl_list[]; }; #define DCCP_SERVICE_INVALID_VALUE htonl((__u32)-1) #define DCCP_SERVICE_CODE_IS_ABSENT 0 static inline bool dccp_list_has_service(const struct dccp_service_list *sl, const __be32 service) { if (likely(sl != NULL)) { u32 i = sl->dccpsl_nr; while (i--) if (sl->dccpsl_list[i] == service) return true; } return false; } struct dccp_ackvec; /** * struct dccp_sock - DCCP socket state * * @dccps_swl - sequence number window low * @dccps_swh - sequence number window high * @dccps_awl - acknowledgement number window low * @dccps_awh - acknowledgement number window high * @dccps_iss - initial sequence number sent * @dccps_isr - initial sequence number received * @dccps_osr - first OPEN sequence number received * @dccps_gss - greatest sequence number sent * @dccps_gsr - greatest valid sequence number received * @dccps_gar - greatest valid ack number received on a non-Sync; initialized to %dccps_iss * @dccps_service - first (passive sock) or unique (active sock) service code * @dccps_service_list - second .. last service code on passive socket * @dccps_timestamp_echo - latest timestamp received on a TIMESTAMP option * @dccps_timestamp_time - time of receiving latest @dccps_timestamp_echo * @dccps_l_ack_ratio - feature-local Ack Ratio * @dccps_r_ack_ratio - feature-remote Ack Ratio * @dccps_l_seq_win - local Sequence Window (influences ack number validity) * @dccps_r_seq_win - remote Sequence Window (influences seq number validity) * @dccps_pcslen - sender partial checksum coverage (via sockopt) * @dccps_pcrlen - receiver partial checksum coverage (via sockopt) * @dccps_send_ndp_count - local Send NDP Count feature (7.7.2) * @dccps_ndp_count - number of Non Data Packets since last data packet * @dccps_mss_cache - current value of MSS (path MTU minus header sizes) * @dccps_rate_last - timestamp for rate-limiting DCCP-Sync (RFC 4340, 7.5.4) * @dccps_featneg - tracks feature-negotiation state (mostly during handshake) * @dccps_hc_rx_ackvec - rx half connection ack vector * @dccps_hc_rx_ccid - CCID used for the receiver (or receiving half-connection) * @dccps_hc_tx_ccid - CCID used for the sender (or sending half-connection) * @dccps_options_received - parsed set of retrieved options * @dccps_qpolicy - TX dequeueing policy, one of %dccp_packet_dequeueing_policy * @dccps_tx_qlen - maximum length of the TX queue * @dccps_role - role of this sock, one of %dccp_role * @dccps_hc_rx_insert_options - receiver wants to add options when acking * @dccps_hc_tx_insert_options - sender wants to add options when sending * @dccps_server_timewait - server holds timewait state on close (RFC 4340, 8.3) * @dccps_sync_scheduled - flag which signals "send out-of-band message soon" * @dccps_xmitlet - tasklet scheduled by the TX CCID to dequeue data packets * @dccps_xmit_timer - used by the TX CCID to delay sending (rate-based pacing) * @dccps_syn_rtt - RTT sample from Request/Response exchange (in usecs) */ struct dccp_sock { /* inet_connection_sock has to be the first member of dccp_sock */ struct inet_connection_sock dccps_inet_connection; #define dccps_syn_rtt dccps_inet_connection.icsk_ack.lrcvtime __u64 dccps_swl; __u64 dccps_swh; __u64 dccps_awl; __u64 dccps_awh; __u64 dccps_iss; __u64 dccps_isr; __u64 dccps_osr; __u64 dccps_gss; __u64 dccps_gsr; __u64 dccps_gar; __be32 dccps_service; __u32 dccps_mss_cache; struct dccp_service_list *dccps_service_list; __u32 dccps_timestamp_echo; __u32 dccps_timestamp_time; __u16 dccps_l_ack_ratio; __u16 dccps_r_ack_ratio; __u64 dccps_l_seq_win:48; __u64 dccps_r_seq_win:48; __u8 dccps_pcslen:4; __u8 dccps_pcrlen:4; __u8 dccps_send_ndp_count:1; __u64 dccps_ndp_count:48; unsigned long dccps_rate_last; struct list_head dccps_featneg; struct dccp_ackvec *dccps_hc_rx_ackvec; struct ccid *dccps_hc_rx_ccid; struct ccid *dccps_hc_tx_ccid; struct dccp_options_received dccps_options_received; __u8 dccps_qpolicy; __u32 dccps_tx_qlen; enum dccp_role dccps_role:2; __u8 dccps_hc_rx_insert_options:1; __u8 dccps_hc_tx_insert_options:1; __u8 dccps_server_timewait:1; __u8 dccps_sync_scheduled:1; struct tasklet_struct dccps_xmitlet; struct timer_list dccps_xmit_timer; }; #define dccp_sk(ptr) container_of_const(ptr, struct dccp_sock, \ dccps_inet_connection.icsk_inet.sk) static inline const char *dccp_role(const struct sock *sk) { switch (dccp_sk(sk)->dccps_role) { case DCCP_ROLE_UNDEFINED: return "undefined"; case DCCP_ROLE_LISTEN: return "listen"; case DCCP_ROLE_SERVER: return "server"; case DCCP_ROLE_CLIENT: return "client"; } return NULL; } extern void dccp_syn_ack_timeout(const struct request_sock *req); #endif /* _LINUX_DCCP_H */
4246 3243 4238 4244 4246 2859 4222 4241 4260 4244 3237 3234 3244 2669 3644 3646 3243 3076 3221 3223 3244 2677 2673 2680 2675 2927 3079 3088 3 113 3116 2938 3024 2 2 2 2 2 2 2 3115 3017 3115 2936 3087 3120 5 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 // SPDX-License-Identifier: GPL-2.0-only /* * klist.c - Routines for manipulating klists. * * Copyright (C) 2005 Patrick Mochel * * This klist interface provides a couple of structures that wrap around * struct list_head to provide explicit list "head" (struct klist) and list * "node" (struct klist_node) objects. For struct klist, a spinlock is * included that protects access to the actual list itself. struct * klist_node provides a pointer to the klist that owns it and a kref * reference count that indicates the number of current users of that node * in the list. * * The entire point is to provide an interface for iterating over a list * that is safe and allows for modification of the list during the * iteration (e.g. insertion and removal), including modification of the * current node on the list. * * It works using a 3rd object type - struct klist_iter - that is declared * and initialized before an iteration. klist_next() is used to acquire the * next element in the list. It returns NULL if there are no more items. * Internally, that routine takes the klist's lock, decrements the * reference count of the previous klist_node and increments the count of * the next klist_node. It then drops the lock and returns. * * There are primitives for adding and removing nodes to/from a klist. * When deleting, klist_del() will simply decrement the reference count. * Only when the count goes to 0 is the node removed from the list. * klist_remove() will try to delete the node from the list and block until * it is actually removed. This is useful for objects (like devices) that * have been removed from the system and must be freed (but must wait until * all accessors have finished). */ #include <linux/klist.h> #include <linux/export.h> #include <linux/sched.h> /* * Use the lowest bit of n_klist to mark deleted nodes and exclude * dead ones from iteration. */ #define KNODE_DEAD 1LU #define KNODE_KLIST_MASK ~KNODE_DEAD static struct klist *knode_klist(struct klist_node *knode) { return (struct klist *) ((unsigned long)knode->n_klist & KNODE_KLIST_MASK); } static bool knode_dead(struct klist_node *knode) { return (unsigned long)knode->n_klist & KNODE_DEAD; } static void knode_set_klist(struct klist_node *knode, struct klist *klist) { knode->n_klist = klist; /* no knode deserves to start its life dead */ WARN_ON(knode_dead(knode)); } static void knode_kill(struct klist_node *knode) { /* and no knode should die twice ever either, see we're very humane */ WARN_ON(knode_dead(knode)); *(unsigned long *)&knode->n_klist |= KNODE_DEAD; } /** * klist_init - Initialize a klist structure. * @k: The klist we're initializing. * @get: The get function for the embedding object (NULL if none) * @put: The put function for the embedding object (NULL if none) * * Initialises the klist structure. If the klist_node structures are * going to be embedded in refcounted objects (necessary for safe * deletion) then the get/put arguments are used to initialise * functions that take and release references on the embedding * objects. */ void klist_init(struct klist *k, void (*get)(struct klist_node *), void (*put)(struct klist_node *)) { INIT_LIST_HEAD(&k->k_list); spin_lock_init(&k->k_lock); k->get = get; k->put = put; } EXPORT_SYMBOL_GPL(klist_init); static void add_head(struct klist *k, struct klist_node *n) { spin_lock(&k->k_lock); list_add(&n->n_node, &k->k_list); spin_unlock(&k->k_lock); } static void add_tail(struct klist *k, struct klist_node *n) { spin_lock(&k->k_lock); list_add_tail(&n->n_node, &k->k_list); spin_unlock(&k->k_lock); } static void klist_node_init(struct klist *k, struct klist_node *n) { INIT_LIST_HEAD(&n->n_node); kref_init(&n->n_ref); knode_set_klist(n, k); if (k->get) k->get(n); } /** * klist_add_head - Initialize a klist_node and add it to front. * @n: node we're adding. * @k: klist it's going on. */ void klist_add_head(struct klist_node *n, struct klist *k) { klist_node_init(k, n); add_head(k, n); } EXPORT_SYMBOL_GPL(klist_add_head); /** * klist_add_tail - Initialize a klist_node and add it to back. * @n: node we're adding. * @k: klist it's going on. */ void klist_add_tail(struct klist_node *n, struct klist *k) { klist_node_init(k, n); add_tail(k, n); } EXPORT_SYMBOL_GPL(klist_add_tail); /** * klist_add_behind - Init a klist_node and add it after an existing node * @n: node we're adding. * @pos: node to put @n after */ void klist_add_behind(struct klist_node *n, struct klist_node *pos) { struct klist *k = knode_klist(pos); klist_node_init(k, n); spin_lock(&k->k_lock); list_add(&n->n_node, &pos->n_node); spin_unlock(&k->k_lock); } EXPORT_SYMBOL_GPL(klist_add_behind); /** * klist_add_before - Init a klist_node and add it before an existing node * @n: node we're adding. * @pos: node to put @n after */ void klist_add_before(struct klist_node *n, struct klist_node *pos) { struct klist *k = knode_klist(pos); klist_node_init(k, n); spin_lock(&k->k_lock); list_add_tail(&n->n_node, &pos->n_node); spin_unlock(&k->k_lock); } EXPORT_SYMBOL_GPL(klist_add_before); struct klist_waiter { struct list_head list; struct klist_node *node; struct task_struct *process; int woken; }; static DEFINE_SPINLOCK(klist_remove_lock); static LIST_HEAD(klist_remove_waiters); static void klist_release(struct kref *kref) { struct klist_waiter *waiter, *tmp; struct klist_node *n = container_of(kref, struct klist_node, n_ref); WARN_ON(!knode_dead(n)); list_del(&n->n_node); spin_lock(&klist_remove_lock); list_for_each_entry_safe(waiter, tmp, &klist_remove_waiters, list) { if (waiter->node != n) continue; list_del(&waiter->list); waiter->woken = 1; mb(); wake_up_process(waiter->process); } spin_unlock(&klist_remove_lock); knode_set_klist(n, NULL); } static int klist_dec_and_del(struct klist_node *n) { return kref_put(&n->n_ref, klist_release); } static void klist_put(struct klist_node *n, bool kill) { struct klist *k = knode_klist(n); void (*put)(struct klist_node *) = k->put; spin_lock(&k->k_lock); if (kill) knode_kill(n); if (!klist_dec_and_del(n)) put = NULL; spin_unlock(&k->k_lock); if (put) put(n); } /** * klist_del - Decrement the reference count of node and try to remove. * @n: node we're deleting. */ void klist_del(struct klist_node *n) { klist_put(n, true); } EXPORT_SYMBOL_GPL(klist_del); /** * klist_remove - Decrement the refcount of node and wait for it to go away. * @n: node we're removing. */ void klist_remove(struct klist_node *n) { struct klist_waiter waiter; waiter.node = n; waiter.process = current; waiter.woken = 0; spin_lock(&klist_remove_lock); list_add(&waiter.list, &klist_remove_waiters); spin_unlock(&klist_remove_lock); klist_del(n); for (;;) { set_current_state(TASK_UNINTERRUPTIBLE); if (waiter.woken) break; schedule(); } __set_current_state(TASK_RUNNING); } EXPORT_SYMBOL_GPL(klist_remove); /** * klist_node_attached - Say whether a node is bound to a list or not. * @n: Node that we're testing. */ int klist_node_attached(struct klist_node *n) { return (n->n_klist != NULL); } EXPORT_SYMBOL_GPL(klist_node_attached); /** * klist_iter_init_node - Initialize a klist_iter structure. * @k: klist we're iterating. * @i: klist_iter we're filling. * @n: node to start with. * * Similar to klist_iter_init(), but starts the action off with @n, * instead of with the list head. */ void klist_iter_init_node(struct klist *k, struct klist_iter *i, struct klist_node *n) { i->i_klist = k; i->i_cur = NULL; if (n && kref_get_unless_zero(&n->n_ref)) i->i_cur = n; } EXPORT_SYMBOL_GPL(klist_iter_init_node); /** * klist_iter_init - Iniitalize a klist_iter structure. * @k: klist we're iterating. * @i: klist_iter structure we're filling. * * Similar to klist_iter_init_node(), but start with the list head. */ void klist_iter_init(struct klist *k, struct klist_iter *i) { klist_iter_init_node(k, i, NULL); } EXPORT_SYMBOL_GPL(klist_iter_init); /** * klist_iter_exit - Finish a list iteration. * @i: Iterator structure. * * Must be called when done iterating over list, as it decrements the * refcount of the current node. Necessary in case iteration exited before * the end of the list was reached, and always good form. */ void klist_iter_exit(struct klist_iter *i) { if (i->i_cur) { klist_put(i->i_cur, false); i->i_cur = NULL; } } EXPORT_SYMBOL_GPL(klist_iter_exit); static struct klist_node *to_klist_node(struct list_head *n) { return container_of(n, struct klist_node, n_node); } /** * klist_prev - Ante up prev node in list. * @i: Iterator structure. * * First grab list lock. Decrement the reference count of the previous * node, if there was one. Grab the prev node, increment its reference * count, drop the lock, and return that prev node. */ struct klist_node *klist_prev(struct klist_iter *i) { void (*put)(struct klist_node *) = i->i_klist->put; struct klist_node *last = i->i_cur; struct klist_node *prev; unsigned long flags; spin_lock_irqsave(&i->i_klist->k_lock, flags); if (last) { prev = to_klist_node(last->n_node.prev); if (!klist_dec_and_del(last)) put = NULL; } else prev = to_klist_node(i->i_klist->k_list.prev); i->i_cur = NULL; while (prev != to_klist_node(&i->i_klist->k_list)) { if (likely(!knode_dead(prev))) { kref_get(&prev->n_ref); i->i_cur = prev; break; } prev = to_klist_node(prev->n_node.prev); } spin_unlock_irqrestore(&i->i_klist->k_lock, flags); if (put && last) put(last); return i->i_cur; } EXPORT_SYMBOL_GPL(klist_prev); /** * klist_next - Ante up next node in list. * @i: Iterator structure. * * First grab list lock. Decrement the reference count of the previous * node, if there was one. Grab the next node, increment its reference * count, drop the lock, and return that next node. */ struct klist_node *klist_next(struct klist_iter *i) { void (*put)(struct klist_node *) = i->i_klist->put; struct klist_node *last = i->i_cur; struct klist_node *next; unsigned long flags; spin_lock_irqsave(&i->i_klist->k_lock, flags); if (last) { next = to_klist_node(last->n_node.next); if (!klist_dec_and_del(last)) put = NULL; } else next = to_klist_node(i->i_klist->k_list.next); i->i_cur = NULL; while (next != to_klist_node(&i->i_klist->k_list)) { if (likely(!knode_dead(next))) { kref_get(&next->n_ref); i->i_cur = next; break; } next = to_klist_node(next->n_node.next); } spin_unlock_irqrestore(&i->i_klist->k_lock, flags); if (put && last) put(last); return i->i_cur; } EXPORT_SYMBOL_GPL(klist_next);
2 1 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 // SPDX-License-Identifier: GPL-2.0-or-later /* Aquantia Corp. Aquantia AQtion USB to 5GbE Controller * Copyright (C) 2003-2005 David Hollis <dhollis@davehollis.com> * Copyright (C) 2005 Phil Chang <pchang23@sbcglobal.net> * Copyright (C) 2002-2003 TiVo Inc. * Copyright (C) 2017-2018 ASIX * Copyright (C) 2018 Aquantia Corp. */ #include <linux/module.h> #include <linux/netdevice.h> #include <linux/ethtool.h> #include <linux/mii.h> #include <linux/usb.h> #include <linux/crc32.h> #include <linux/if_vlan.h> #include <linux/usb/cdc.h> #include <linux/usb/usbnet.h> #include <linux/linkmode.h> #include "aqc111.h" #define DRIVER_NAME "aqc111" static int aqc111_read_cmd_nopm(struct usbnet *dev, u8 cmd, u16 value, u16 index, u16 size, void *data) { int ret; ret = usbnet_read_cmd_nopm(dev, cmd, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, value, index, data, size); if (unlikely(ret < 0)) netdev_warn(dev->net, "Failed to read(0x%x) reg index 0x%04x: %d\n", cmd, index, ret); return ret; } static int aqc111_read_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index, u16 size, void *data) { int ret; ret = usbnet_read_cmd(dev, cmd, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, value, index, data, size); if (unlikely(ret < 0)) netdev_warn(dev->net, "Failed to read(0x%x) reg index 0x%04x: %d\n", cmd, index, ret); return ret; } static int aqc111_read16_cmd_nopm(struct usbnet *dev, u8 cmd, u16 value, u16 index, u16 *data) { int ret = 0; ret = aqc111_read_cmd_nopm(dev, cmd, value, index, sizeof(*data), data); le16_to_cpus(data); return ret; } static int aqc111_read16_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index, u16 *data) { int ret = 0; ret = aqc111_read_cmd(dev, cmd, value, index, sizeof(*data), data); le16_to_cpus(data); return ret; } static int __aqc111_write_cmd(struct usbnet *dev, u8 cmd, u8 reqtype, u16 value, u16 index, u16 size, const void *data) { int err = -ENOMEM; void *buf = NULL; netdev_dbg(dev->net, "%s cmd=%#x reqtype=%#x value=%#x index=%#x size=%d\n", __func__, cmd, reqtype, value, index, size); if (data) { buf = kmemdup(data, size, GFP_KERNEL); if (!buf) goto out; } err = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0), cmd, reqtype, value, index, buf, size, (cmd == AQ_PHY_POWER) ? AQ_USB_PHY_SET_TIMEOUT : AQ_USB_SET_TIMEOUT); if (unlikely(err < 0)) netdev_warn(dev->net, "Failed to write(0x%x) reg index 0x%04x: %d\n", cmd, index, err); kfree(buf); out: return err; } static int aqc111_write_cmd_nopm(struct usbnet *dev, u8 cmd, u16 value, u16 index, u16 size, void *data) { int ret; ret = __aqc111_write_cmd(dev, cmd, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, value, index, size, data); return ret; } static int aqc111_write_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index, u16 size, const void *data) { int ret; if (usb_autopm_get_interface(dev->intf) < 0) return -ENODEV; ret = __aqc111_write_cmd(dev, cmd, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, value, index, size, data); usb_autopm_put_interface(dev->intf); return ret; } static int aqc111_write16_cmd_nopm(struct usbnet *dev, u8 cmd, u16 value, u16 index, u16 *data) { u16 tmp = *data; cpu_to_le16s(&tmp); return aqc111_write_cmd_nopm(dev, cmd, value, index, sizeof(tmp), &tmp); } static int aqc111_write16_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index, u16 *data) { u16 tmp = *data; cpu_to_le16s(&tmp); return aqc111_write_cmd(dev, cmd, value, index, sizeof(tmp), &tmp); } static int aqc111_write32_cmd_nopm(struct usbnet *dev, u8 cmd, u16 value, u16 index, u32 *data) { u32 tmp = *data; cpu_to_le32s(&tmp); return aqc111_write_cmd_nopm(dev, cmd, value, index, sizeof(tmp), &tmp); } static int aqc111_write32_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index, u32 *data) { u32 tmp = *data; cpu_to_le32s(&tmp); return aqc111_write_cmd(dev, cmd, value, index, sizeof(tmp), &tmp); } static int aqc111_write_cmd_async(struct usbnet *dev, u8 cmd, u16 value, u16 index, u16 size, void *data) { return usbnet_write_cmd_async(dev, cmd, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, value, index, data, size); } static int aqc111_write16_cmd_async(struct usbnet *dev, u8 cmd, u16 value, u16 index, u16 *data) { u16 tmp = *data; cpu_to_le16s(&tmp); return aqc111_write_cmd_async(dev, cmd, value, index, sizeof(tmp), &tmp); } static void aqc111_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *info) { struct usbnet *dev = netdev_priv(net); struct aqc111_data *aqc111_data = dev->driver_priv; /* Inherit standard device info */ usbnet_get_drvinfo(net, info); strscpy(info->driver, DRIVER_NAME, sizeof(info->driver)); snprintf(info->fw_version, sizeof(info->fw_version), "%u.%u.%u", aqc111_data->fw_ver.major, aqc111_data->fw_ver.minor, aqc111_data->fw_ver.rev); info->eedump_len = 0x00; info->regdump_len = 0x00; } static void aqc111_get_wol(struct net_device *net, struct ethtool_wolinfo *wolinfo) { struct usbnet *dev = netdev_priv(net); struct aqc111_data *aqc111_data = dev->driver_priv; wolinfo->supported = WAKE_MAGIC; wolinfo->wolopts = 0; if (aqc111_data->wol_flags & AQ_WOL_FLAG_MP) wolinfo->wolopts |= WAKE_MAGIC; } static int aqc111_set_wol(struct net_device *net, struct ethtool_wolinfo *wolinfo) { struct usbnet *dev = netdev_priv(net); struct aqc111_data *aqc111_data = dev->driver_priv; if (wolinfo->wolopts & ~WAKE_MAGIC) return -EINVAL; aqc111_data->wol_flags = 0; if (wolinfo->wolopts & WAKE_MAGIC) aqc111_data->wol_flags |= AQ_WOL_FLAG_MP; return 0; } static void aqc111_speed_to_link_mode(u32 speed, struct ethtool_link_ksettings *elk) { switch (speed) { case SPEED_5000: ethtool_link_ksettings_add_link_mode(elk, advertising, 5000baseT_Full); break; case SPEED_2500: ethtool_link_ksettings_add_link_mode(elk, advertising, 2500baseT_Full); break; case SPEED_1000: ethtool_link_ksettings_add_link_mode(elk, advertising, 1000baseT_Full); break; case SPEED_100: ethtool_link_ksettings_add_link_mode(elk, advertising, 100baseT_Full); break; } } static int aqc111_get_link_ksettings(struct net_device *net, struct ethtool_link_ksettings *elk) { struct usbnet *dev = netdev_priv(net); struct aqc111_data *aqc111_data = dev->driver_priv; enum usb_device_speed usb_speed = dev->udev->speed; u32 speed = SPEED_UNKNOWN; ethtool_link_ksettings_zero_link_mode(elk, supported); ethtool_link_ksettings_add_link_mode(elk, supported, 100baseT_Full); ethtool_link_ksettings_add_link_mode(elk, supported, 1000baseT_Full); if (usb_speed == USB_SPEED_SUPER) { ethtool_link_ksettings_add_link_mode(elk, supported, 2500baseT_Full); ethtool_link_ksettings_add_link_mode(elk, supported, 5000baseT_Full); } ethtool_link_ksettings_add_link_mode(elk, supported, TP); ethtool_link_ksettings_add_link_mode(elk, supported, Autoneg); elk->base.port = PORT_TP; elk->base.transceiver = XCVR_INTERNAL; elk->base.mdio_support = 0x00; /*Not supported*/ if (aqc111_data->autoneg) linkmode_copy(elk->link_modes.advertising, elk->link_modes.supported); else aqc111_speed_to_link_mode(aqc111_data->advertised_speed, elk); elk->base.autoneg = aqc111_data->autoneg; switch (aqc111_data->link_speed) { case AQ_INT_SPEED_5G: speed = SPEED_5000; break; case AQ_INT_SPEED_2_5G: speed = SPEED_2500; break; case AQ_INT_SPEED_1G: speed = SPEED_1000; break; case AQ_INT_SPEED_100M: speed = SPEED_100; break; } elk->base.duplex = DUPLEX_FULL; elk->base.speed = speed; return 0; } static void aqc111_set_phy_speed(struct usbnet *dev, u8 autoneg, u16 speed) { struct aqc111_data *aqc111_data = dev->driver_priv; aqc111_data->phy_cfg &= ~AQ_ADV_MASK; aqc111_data->phy_cfg |= AQ_PAUSE; aqc111_data->phy_cfg |= AQ_ASYM_PAUSE; aqc111_data->phy_cfg |= AQ_DOWNSHIFT; aqc111_data->phy_cfg &= ~AQ_DSH_RETRIES_MASK; aqc111_data->phy_cfg |= (3 << AQ_DSH_RETRIES_SHIFT) & AQ_DSH_RETRIES_MASK; if (autoneg == AUTONEG_ENABLE) { switch (speed) { case SPEED_5000: aqc111_data->phy_cfg |= AQ_ADV_5G; fallthrough; case SPEED_2500: aqc111_data->phy_cfg |= AQ_ADV_2G5; fallthrough; case SPEED_1000: aqc111_data->phy_cfg |= AQ_ADV_1G; fallthrough; case SPEED_100: aqc111_data->phy_cfg |= AQ_ADV_100M; /* fall-through */ } } else { switch (speed) { case SPEED_5000: aqc111_data->phy_cfg |= AQ_ADV_5G; break; case SPEED_2500: aqc111_data->phy_cfg |= AQ_ADV_2G5; break; case SPEED_1000: aqc111_data->phy_cfg |= AQ_ADV_1G; break; case SPEED_100: aqc111_data->phy_cfg |= AQ_ADV_100M; break; } } aqc111_write32_cmd(dev, AQ_PHY_OPS, 0, 0, &aqc111_data->phy_cfg); } static int aqc111_set_link_ksettings(struct net_device *net, const struct ethtool_link_ksettings *elk) { struct usbnet *dev = netdev_priv(net); struct aqc111_data *aqc111_data = dev->driver_priv; enum usb_device_speed usb_speed = dev->udev->speed; u8 autoneg = elk->base.autoneg; u32 speed = elk->base.speed; if (autoneg == AUTONEG_ENABLE) { if (aqc111_data->autoneg != AUTONEG_ENABLE) { aqc111_data->autoneg = AUTONEG_ENABLE; aqc111_data->advertised_speed = (usb_speed == USB_SPEED_SUPER) ? SPEED_5000 : SPEED_1000; aqc111_set_phy_speed(dev, aqc111_data->autoneg, aqc111_data->advertised_speed); } } else { if (speed != SPEED_100 && speed != SPEED_1000 && speed != SPEED_2500 && speed != SPEED_5000 && speed != SPEED_UNKNOWN) return -EINVAL; if (elk->base.duplex != DUPLEX_FULL) return -EINVAL; if (usb_speed != USB_SPEED_SUPER && speed > SPEED_1000) return -EINVAL; aqc111_data->autoneg = AUTONEG_DISABLE; if (speed != SPEED_UNKNOWN) aqc111_data->advertised_speed = speed; aqc111_set_phy_speed(dev, aqc111_data->autoneg, aqc111_data->advertised_speed); } return 0; } static const struct ethtool_ops aqc111_ethtool_ops = { .get_drvinfo = aqc111_get_drvinfo, .get_wol = aqc111_get_wol, .set_wol = aqc111_set_wol, .get_msglevel = usbnet_get_msglevel, .set_msglevel = usbnet_set_msglevel, .get_link = ethtool_op_get_link, .get_link_ksettings = aqc111_get_link_ksettings, .set_link_ksettings = aqc111_set_link_ksettings }; static int aqc111_change_mtu(struct net_device *net, int new_mtu) { struct usbnet *dev = netdev_priv(net); u16 reg16 = 0; u8 buf[5]; WRITE_ONCE(net->mtu, new_mtu); dev->hard_mtu = net->mtu + net->hard_header_len; aqc111_read16_cmd(dev, AQ_ACCESS_MAC, SFR_MEDIUM_STATUS_MODE, 2, &reg16); if (net->mtu > 1500) reg16 |= SFR_MEDIUM_JUMBO_EN; else reg16 &= ~SFR_MEDIUM_JUMBO_EN; aqc111_write16_cmd(dev, AQ_ACCESS_MAC, SFR_MEDIUM_STATUS_MODE, 2, &reg16); if (dev->net->mtu > 12500) { memcpy(buf, &AQC111_BULKIN_SIZE[2], 5); /* RX bulk configuration */ aqc111_write_cmd(dev, AQ_ACCESS_MAC, SFR_RX_BULKIN_QCTRL, 5, 5, buf); } /* Set high low water level */ if (dev->net->mtu <= 4500) reg16 = 0x0810; else if (dev->net->mtu <= 9500) reg16 = 0x1020; else if (dev->net->mtu <= 12500) reg16 = 0x1420; else reg16 = 0x1A20; aqc111_write16_cmd(dev, AQ_ACCESS_MAC, SFR_PAUSE_WATERLVL_LOW, 2, &reg16); return 0; } static int aqc111_set_mac_addr(struct net_device *net, void *p) { struct usbnet *dev = netdev_priv(net); int ret = 0; ret = eth_mac_addr(net, p); if (ret < 0) return ret; /* Set the MAC address */ return aqc111_write_cmd(dev, AQ_ACCESS_MAC, SFR_NODE_ID, ETH_ALEN, ETH_ALEN, net->dev_addr); } static int aqc111_vlan_rx_kill_vid(struct net_device *net, __be16 proto, u16 vid) { struct usbnet *dev = netdev_priv(net); u8 vlan_ctrl = 0; u16 reg16 = 0; u8 reg8 = 0; aqc111_read_cmd(dev, AQ_ACCESS_MAC, SFR_VLAN_ID_CONTROL, 1, 1, &reg8); vlan_ctrl = reg8; /* Address */ reg8 = (vid / 16); aqc111_write_cmd(dev, AQ_ACCESS_MAC, SFR_VLAN_ID_ADDRESS, 1, 1, &reg8); /* Data */ reg8 = vlan_ctrl | SFR_VLAN_CONTROL_RD; aqc111_write_cmd(dev, AQ_ACCESS_MAC, SFR_VLAN_ID_CONTROL, 1, 1, &reg8); aqc111_read16_cmd(dev, AQ_ACCESS_MAC, SFR_VLAN_ID_DATA0, 2, &reg16); reg16 &= ~(1 << (vid % 16)); aqc111_write16_cmd(dev, AQ_ACCESS_MAC, SFR_VLAN_ID_DATA0, 2, &reg16); reg8 = vlan_ctrl | SFR_VLAN_CONTROL_WE; aqc111_write_cmd(dev, AQ_ACCESS_MAC, SFR_VLAN_ID_CONTROL, 1, 1, &reg8); return 0; } static int aqc111_vlan_rx_add_vid(struct net_device *net, __be16 proto, u16 vid) { struct usbnet *dev = netdev_priv(net); u8 vlan_ctrl = 0; u16 reg16 = 0; u8 reg8 = 0; aqc111_read_cmd(dev, AQ_ACCESS_MAC, SFR_VLAN_ID_CONTROL, 1, 1, &reg8); vlan_ctrl = reg8; /* Address */ reg8 = (vid / 16); aqc111_write_cmd(dev, AQ_ACCESS_MAC, SFR_VLAN_ID_ADDRESS, 1, 1, &reg8); /* Data */ reg8 = vlan_ctrl | SFR_VLAN_CONTROL_RD; aqc111_write_cmd(dev, AQ_ACCESS_MAC, SFR_VLAN_ID_CONTROL, 1, 1, &reg8); aqc111_read16_cmd(dev, AQ_ACCESS_MAC, SFR_VLAN_ID_DATA0, 2, &reg16); reg16 |= (1 << (vid % 16)); aqc111_write16_cmd(dev, AQ_ACCESS_MAC, SFR_VLAN_ID_DATA0, 2, &reg16); reg8 = vlan_ctrl | SFR_VLAN_CONTROL_WE; aqc111_write_cmd(dev, AQ_ACCESS_MAC, SFR_VLAN_ID_CONTROL, 1, 1, &reg8); return 0; } static void aqc111_set_rx_mode(struct net_device *net) { struct usbnet *dev = netdev_priv(net); struct aqc111_data *aqc111_data = dev->driver_priv; int mc_count = 0; mc_count = netdev_mc_count(net); aqc111_data->rxctl &= ~(SFR_RX_CTL_PRO | SFR_RX_CTL_AMALL | SFR_RX_CTL_AM); if (net->flags & IFF_PROMISC) { aqc111_data->rxctl |= SFR_RX_CTL_PRO; } else if ((net->flags & IFF_ALLMULTI) || mc_count > AQ_MAX_MCAST) { aqc111_data->rxctl |= SFR_RX_CTL_AMALL; } else if (!netdev_mc_empty(net)) { u8 m_filter[AQ_MCAST_FILTER_SIZE] = { 0 }; struct netdev_hw_addr *ha = NULL; u32 crc_bits = 0; netdev_for_each_mc_addr(ha, net) { crc_bits = ether_crc(ETH_ALEN, ha->addr) >> 26; m_filter[crc_bits >> 3] |= BIT(crc_bits & 7); } aqc111_write_cmd_async(dev, AQ_ACCESS_MAC, SFR_MULTI_FILTER_ARRY, AQ_MCAST_FILTER_SIZE, AQ_MCAST_FILTER_SIZE, m_filter); aqc111_data->rxctl |= SFR_RX_CTL_AM; } aqc111_write16_cmd_async(dev, AQ_ACCESS_MAC, SFR_RX_CTL, 2, &aqc111_data->rxctl); } static int aqc111_set_features(struct net_device *net, netdev_features_t features) { struct usbnet *dev = netdev_priv(net); struct aqc111_data *aqc111_data = dev->driver_priv; netdev_features_t changed = net->features ^ features; u16 reg16 = 0; u8 reg8 = 0; if (changed & NETIF_F_IP_CSUM) { aqc111_read_cmd(dev, AQ_ACCESS_MAC, SFR_TXCOE_CTL, 1, 1, &reg8); reg8 ^= SFR_TXCOE_TCP | SFR_TXCOE_UDP; aqc111_write_cmd(dev, AQ_ACCESS_MAC, SFR_TXCOE_CTL, 1, 1, &reg8); } if (changed & NETIF_F_IPV6_CSUM) { aqc111_read_cmd(dev, AQ_ACCESS_MAC, SFR_TXCOE_CTL, 1, 1, &reg8); reg8 ^= SFR_TXCOE_TCPV6 | SFR_TXCOE_UDPV6; aqc111_write_cmd(dev, AQ_ACCESS_MAC, SFR_TXCOE_CTL, 1, 1, &reg8); } if (changed & NETIF_F_RXCSUM) { aqc111_read_cmd(dev, AQ_ACCESS_MAC, SFR_RXCOE_CTL, 1, 1, &reg8); if (features & NETIF_F_RXCSUM) { aqc111_data->rx_checksum = 1; reg8 &= ~(SFR_RXCOE_IP | SFR_RXCOE_TCP | SFR_RXCOE_UDP | SFR_RXCOE_TCPV6 | SFR_RXCOE_UDPV6); } else { aqc111_data->rx_checksum = 0; reg8 |= SFR_RXCOE_IP | SFR_RXCOE_TCP | SFR_RXCOE_UDP | SFR_RXCOE_TCPV6 | SFR_RXCOE_UDPV6; } aqc111_write_cmd(dev, AQ_ACCESS_MAC, SFR_RXCOE_CTL, 1, 1, &reg8); } if (changed & NETIF_F_HW_VLAN_CTAG_FILTER) { if (features & NETIF_F_HW_VLAN_CTAG_FILTER) { u16 i = 0; for (i = 0; i < 256; i++) { /* Address */ reg8 = i; aqc111_write_cmd(dev, AQ_ACCESS_MAC, SFR_VLAN_ID_ADDRESS, 1, 1, &reg8); /* Data */ aqc111_write16_cmd(dev, AQ_ACCESS_MAC, SFR_VLAN_ID_DATA0, 2, &reg16); reg8 = SFR_VLAN_CONTROL_WE; aqc111_write_cmd(dev, AQ_ACCESS_MAC, SFR_VLAN_ID_CONTROL, 1, 1, &reg8); } aqc111_read_cmd(dev, AQ_ACCESS_MAC, SFR_VLAN_ID_CONTROL, 1, 1, &reg8); reg8 |= SFR_VLAN_CONTROL_VFE; aqc111_write_cmd(dev, AQ_ACCESS_MAC, SFR_VLAN_ID_CONTROL, 1, 1, &reg8); } else { aqc111_read_cmd(dev, AQ_ACCESS_MAC, SFR_VLAN_ID_CONTROL, 1, 1, &reg8); reg8 &= ~SFR_VLAN_CONTROL_VFE; aqc111_write_cmd(dev, AQ_ACCESS_MAC, SFR_VLAN_ID_CONTROL, 1, 1, &reg8); } } return 0; } static const struct net_device_ops aqc111_netdev_ops = { .ndo_open = usbnet_open, .ndo_stop = usbnet_stop, .ndo_start_xmit = usbnet_start_xmit, .ndo_tx_timeout = usbnet_tx_timeout, .ndo_get_stats64 = dev_get_tstats64, .ndo_change_mtu = aqc111_change_mtu, .ndo_set_mac_address = aqc111_set_mac_addr, .ndo_validate_addr = eth_validate_addr, .ndo_vlan_rx_add_vid = aqc111_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = aqc111_vlan_rx_kill_vid, .ndo_set_rx_mode = aqc111_set_rx_mode, .ndo_set_features = aqc111_set_features, }; static int aqc111_read_perm_mac(struct usbnet *dev) { u8 buf[ETH_ALEN]; int ret; ret = aqc111_read_cmd(dev, AQ_FLASH_PARAMETERS, 0, 0, ETH_ALEN, buf); if (ret < 0) goto out; ether_addr_copy(dev->net->perm_addr, buf); return 0; out: return ret; } static void aqc111_read_fw_version(struct usbnet *dev, struct aqc111_data *aqc111_data) { aqc111_read_cmd(dev, AQ_ACCESS_MAC, AQ_FW_VER_MAJOR, 1, 1, &aqc111_data->fw_ver.major); aqc111_read_cmd(dev, AQ_ACCESS_MAC, AQ_FW_VER_MINOR, 1, 1, &aqc111_data->fw_ver.minor); aqc111_read_cmd(dev, AQ_ACCESS_MAC, AQ_FW_VER_REV, 1, 1, &aqc111_data->fw_ver.rev); if (aqc111_data->fw_ver.major & 0x80) aqc111_data->fw_ver.major &= ~0x80; } static int aqc111_bind(struct usbnet *dev, struct usb_interface *intf) { struct usb_device *udev = interface_to_usbdev(intf); enum usb_device_speed usb_speed = udev->speed; struct aqc111_data *aqc111_data; int ret; /* Check if vendor configuration */ if (udev->actconfig->desc.bConfigurationValue != 1) { usb_driver_set_configuration(udev, 1); return -ENODEV; } usb_reset_configuration(dev->udev); ret = usbnet_get_endpoints(dev, intf); if (ret < 0) { netdev_dbg(dev->net, "usbnet_get_endpoints failed"); return ret; } aqc111_data = kzalloc(sizeof(*aqc111_data), GFP_KERNEL); if (!aqc111_data) return -ENOMEM; /* store aqc111_data pointer in device data field */ dev->driver_priv = aqc111_data; /* Init the MAC address */ ret = aqc111_read_perm_mac(dev); if (ret) goto out; eth_hw_addr_set(dev->net, dev->net->perm_addr); /* Set Rx urb size */ dev->rx_urb_size = URB_SIZE; /* Set TX needed headroom & tailroom */ dev->net->needed_headroom += sizeof(u64); dev->net->needed_tailroom += sizeof(u64); dev->net->max_mtu = 16334; dev->net->netdev_ops = &aqc111_netdev_ops; dev->net->ethtool_ops = &aqc111_ethtool_ops; if (usb_device_no_sg_constraint(dev->udev)) dev->can_dma_sg = 1; dev->net->hw_features |= AQ_SUPPORT_HW_FEATURE; dev->net->features |= AQ_SUPPORT_FEATURE; dev->net->vlan_features |= AQ_SUPPORT_VLAN_FEATURE; netif_set_tso_max_size(dev->net, 65535); aqc111_read_fw_version(dev, aqc111_data); aqc111_data->autoneg = AUTONEG_ENABLE; aqc111_data->advertised_speed = (usb_speed == USB_SPEED_SUPER) ? SPEED_5000 : SPEED_1000; return 0; out: kfree(aqc111_data); return ret; } static void aqc111_unbind(struct usbnet *dev, struct usb_interface *intf) { struct aqc111_data *aqc111_data = dev->driver_priv; u16 reg16; /* Force bz */ reg16 = SFR_PHYPWR_RSTCTL_BZ; aqc111_write16_cmd_nopm(dev, AQ_ACCESS_MAC, SFR_PHYPWR_RSTCTL, 2, &reg16); reg16 = 0; aqc111_write16_cmd_nopm(dev, AQ_ACCESS_MAC, SFR_PHYPWR_RSTCTL, 2, &reg16); /* Power down ethernet PHY */ aqc111_data->phy_cfg &= ~AQ_ADV_MASK; aqc111_data->phy_cfg |= AQ_LOW_POWER; aqc111_data->phy_cfg &= ~AQ_PHY_POWER_EN; aqc111_write32_cmd_nopm(dev, AQ_PHY_OPS, 0, 0, &aqc111_data->phy_cfg); kfree(aqc111_data); } static void aqc111_status(struct usbnet *dev, struct urb *urb) { struct aqc111_data *aqc111_data = dev->driver_priv; u64 *event_data = NULL; int link = 0; if (urb->actual_length < sizeof(*event_data)) return; event_data = urb->transfer_buffer; le64_to_cpus(event_data); if (*event_data & AQ_LS_MASK) link = 1; else link = 0; aqc111_data->link_speed = (*event_data & AQ_SPEED_MASK) >> AQ_SPEED_SHIFT; aqc111_data->link = link; if (netif_carrier_ok(dev->net) != link) usbnet_defer_kevent(dev, EVENT_LINK_RESET); } static void aqc111_configure_rx(struct usbnet *dev, struct aqc111_data *aqc111_data) { enum usb_device_speed usb_speed = dev->udev->speed; u16 link_speed = 0, usb_host = 0; u8 buf[5] = { 0 }; u8 queue_num = 0; u16 reg16 = 0; u8 reg8 = 0; buf[0] = 0x00; buf[1] = 0xF8; buf[2] = 0x07; switch (aqc111_data->link_speed) { case AQ_INT_SPEED_5G: link_speed = 5000; reg8 = 0x05; reg16 = 0x001F; break; case AQ_INT_SPEED_2_5G: link_speed = 2500; reg16 = 0x003F; break; case AQ_INT_SPEED_1G: link_speed = 1000; reg16 = 0x009F; break; case AQ_INT_SPEED_100M: link_speed = 100; queue_num = 1; reg16 = 0x063F; buf[1] = 0xFB; buf[2] = 0x4; break; } aqc111_write_cmd(dev, AQ_ACCESS_MAC, SFR_INTER_PACKET_GAP_0, 1, 1, &reg8); aqc111_write_cmd(dev, AQ_ACCESS_MAC, SFR_TX_PAUSE_RESEND_T, 3, 3, buf); switch (usb_speed) { case USB_SPEED_SUPER: usb_host = 3; break; case USB_SPEED_HIGH: usb_host = 2; break; case USB_SPEED_FULL: case USB_SPEED_LOW: usb_host = 1; queue_num = 0; break; default: usb_host = 0; break; } if (dev->net->mtu > 12500 && dev->net->mtu <= 16334) queue_num = 2; /* For Jumbo packet 16KB */ memcpy(buf, &AQC111_BULKIN_SIZE[queue_num], 5); /* RX bulk configuration */ aqc111_write_cmd(dev, AQ_ACCESS_MAC, SFR_RX_BULKIN_QCTRL, 5, 5, buf); /* Set high low water level */ if (dev->net->mtu <= 4500) reg16 = 0x0810; else if (dev->net->mtu <= 9500) reg16 = 0x1020; else if (dev->net->mtu <= 12500) reg16 = 0x1420; else if (dev->net->mtu <= 16334) reg16 = 0x1A20; aqc111_write16_cmd(dev, AQ_ACCESS_MAC, SFR_PAUSE_WATERLVL_LOW, 2, &reg16); netdev_info(dev->net, "Link Speed %d, USB %d", link_speed, usb_host); } static void aqc111_configure_csum_offload(struct usbnet *dev) { u8 reg8 = 0; if (dev->net->features & NETIF_F_RXCSUM) { reg8 |= SFR_RXCOE_IP | SFR_RXCOE_TCP | SFR_RXCOE_UDP | SFR_RXCOE_TCPV6 | SFR_RXCOE_UDPV6; } aqc111_write_cmd(dev, AQ_ACCESS_MAC, SFR_RXCOE_CTL, 1, 1, &reg8); reg8 = 0; if (dev->net->features & NETIF_F_IP_CSUM) reg8 |= SFR_TXCOE_IP | SFR_TXCOE_TCP | SFR_TXCOE_UDP; if (dev->net->features & NETIF_F_IPV6_CSUM) reg8 |= SFR_TXCOE_TCPV6 | SFR_TXCOE_UDPV6; aqc111_write_cmd(dev, AQ_ACCESS_MAC, SFR_TXCOE_CTL, 1, 1, &reg8); } static int aqc111_link_reset(struct usbnet *dev) { struct aqc111_data *aqc111_data = dev->driver_priv; u16 reg16 = 0; u8 reg8 = 0; if (aqc111_data->link == 1) { /* Link up */ aqc111_configure_rx(dev, aqc111_data); /* Vlan Tag Filter */ reg8 = SFR_VLAN_CONTROL_VSO; if (dev->net->features & NETIF_F_HW_VLAN_CTAG_FILTER) reg8 |= SFR_VLAN_CONTROL_VFE; aqc111_write_cmd(dev, AQ_ACCESS_MAC, SFR_VLAN_ID_CONTROL, 1, 1, &reg8); reg8 = 0x0; aqc111_write_cmd(dev, AQ_ACCESS_MAC, SFR_BMRX_DMA_CONTROL, 1, 1, &reg8); aqc111_write_cmd(dev, AQ_ACCESS_MAC, SFR_BMTX_DMA_CONTROL, 1, 1, &reg8); aqc111_write_cmd(dev, AQ_ACCESS_MAC, SFR_ARC_CTRL, 1, 1, &reg8); reg16 = SFR_RX_CTL_IPE | SFR_RX_CTL_AB; aqc111_data->rxctl = reg16; aqc111_write16_cmd(dev, AQ_ACCESS_MAC, SFR_RX_CTL, 2, &reg16); reg8 = SFR_RX_PATH_READY; aqc111_write_cmd(dev, AQ_ACCESS_MAC, SFR_ETH_MAC_PATH, 1, 1, &reg8); reg8 = SFR_BULK_OUT_EFF_EN; aqc111_write_cmd(dev, AQ_ACCESS_MAC, SFR_BULK_OUT_CTRL, 1, 1, &reg8); reg16 = 0; aqc111_write16_cmd(dev, AQ_ACCESS_MAC, SFR_MEDIUM_STATUS_MODE, 2, &reg16); reg16 = SFR_MEDIUM_XGMIIMODE | SFR_MEDIUM_FULL_DUPLEX; aqc111_write16_cmd(dev, AQ_ACCESS_MAC, SFR_MEDIUM_STATUS_MODE, 2, &reg16); aqc111_configure_csum_offload(dev); aqc111_set_rx_mode(dev->net); aqc111_read16_cmd(dev, AQ_ACCESS_MAC, SFR_MEDIUM_STATUS_MODE, 2, &reg16); if (dev->net->mtu > 1500) reg16 |= SFR_MEDIUM_JUMBO_EN; reg16 |= SFR_MEDIUM_RECEIVE_EN | SFR_MEDIUM_RXFLOW_CTRLEN | SFR_MEDIUM_TXFLOW_CTRLEN; aqc111_write16_cmd(dev, AQ_ACCESS_MAC, SFR_MEDIUM_STATUS_MODE, 2, &reg16); aqc111_data->rxctl |= SFR_RX_CTL_START; aqc111_write16_cmd(dev, AQ_ACCESS_MAC, SFR_RX_CTL, 2, &aqc111_data->rxctl); netif_carrier_on(dev->net); } else { aqc111_read16_cmd(dev, AQ_ACCESS_MAC, SFR_MEDIUM_STATUS_MODE, 2, &reg16); reg16 &= ~SFR_MEDIUM_RECEIVE_EN; aqc111_write16_cmd(dev, AQ_ACCESS_MAC, SFR_MEDIUM_STATUS_MODE, 2, &reg16); aqc111_data->rxctl &= ~SFR_RX_CTL_START; aqc111_write16_cmd(dev, AQ_ACCESS_MAC, SFR_RX_CTL, 2, &aqc111_data->rxctl); reg8 = SFR_BULK_OUT_FLUSH_EN | SFR_BULK_OUT_EFF_EN; aqc111_write_cmd(dev, AQ_ACCESS_MAC, SFR_BULK_OUT_CTRL, 1, 1, &reg8); reg8 = SFR_BULK_OUT_EFF_EN; aqc111_write_cmd(dev, AQ_ACCESS_MAC, SFR_BULK_OUT_CTRL, 1, 1, &reg8); netif_carrier_off(dev->net); } return 0; } static int aqc111_reset(struct usbnet *dev) { struct aqc111_data *aqc111_data = dev->driver_priv; u8 reg8 = 0; dev->rx_urb_size = URB_SIZE; if (usb_device_no_sg_constraint(dev->udev)) dev->can_dma_sg = 1; dev->net->hw_features |= AQ_SUPPORT_HW_FEATURE; dev->net->features |= AQ_SUPPORT_FEATURE; dev->net->vlan_features |= AQ_SUPPORT_VLAN_FEATURE; /* Power up ethernet PHY */ aqc111_data->phy_cfg = AQ_PHY_POWER_EN; aqc111_write32_cmd(dev, AQ_PHY_OPS, 0, 0, &aqc111_data->phy_cfg); /* Set the MAC address */ aqc111_write_cmd(dev, AQ_ACCESS_MAC, SFR_NODE_ID, ETH_ALEN, ETH_ALEN, dev->net->dev_addr); reg8 = 0xFF; aqc111_write_cmd(dev, AQ_ACCESS_MAC, SFR_BM_INT_MASK, 1, 1, &reg8); reg8 = 0x0; aqc111_write_cmd(dev, AQ_ACCESS_MAC, SFR_SWP_CTRL, 1, 1, &reg8); aqc111_read_cmd(dev, AQ_ACCESS_MAC, SFR_MONITOR_MODE, 1, 1, &reg8); reg8 &= ~(SFR_MONITOR_MODE_EPHYRW | SFR_MONITOR_MODE_RWLC | SFR_MONITOR_MODE_RWMP | SFR_MONITOR_MODE_RWWF | SFR_MONITOR_MODE_RW_FLAG); aqc111_write_cmd(dev, AQ_ACCESS_MAC, SFR_MONITOR_MODE, 1, 1, &reg8); netif_carrier_off(dev->net); /* Phy advertise */ aqc111_set_phy_speed(dev, aqc111_data->autoneg, aqc111_data->advertised_speed); return 0; } static int aqc111_stop(struct usbnet *dev) { struct aqc111_data *aqc111_data = dev->driver_priv; u16 reg16 = 0; aqc111_read16_cmd(dev, AQ_ACCESS_MAC, SFR_MEDIUM_STATUS_MODE, 2, &reg16); reg16 &= ~SFR_MEDIUM_RECEIVE_EN; aqc111_write16_cmd(dev, AQ_ACCESS_MAC, SFR_MEDIUM_STATUS_MODE, 2, &reg16); reg16 = 0; aqc111_write16_cmd(dev, AQ_ACCESS_MAC, SFR_RX_CTL, 2, &reg16); /* Put PHY to low power*/ aqc111_data->phy_cfg |= AQ_LOW_POWER; aqc111_write32_cmd(dev, AQ_PHY_OPS, 0, 0, &aqc111_data->phy_cfg); netif_carrier_off(dev->net); return 0; } static void aqc111_rx_checksum(struct sk_buff *skb, u64 pkt_desc) { u32 pkt_type = 0; skb->ip_summed = CHECKSUM_NONE; /* checksum error bit is set */ if (pkt_desc & AQ_RX_PD_L4_ERR || pkt_desc & AQ_RX_PD_L3_ERR) return; pkt_type = pkt_desc & AQ_RX_PD_L4_TYPE_MASK; /* It must be a TCP or UDP packet with a valid checksum */ if (pkt_type == AQ_RX_PD_L4_TCP || pkt_type == AQ_RX_PD_L4_UDP) skb->ip_summed = CHECKSUM_UNNECESSARY; } static int aqc111_rx_fixup(struct usbnet *dev, struct sk_buff *skb) { struct aqc111_data *aqc111_data = dev->driver_priv; struct sk_buff *new_skb = NULL; u32 pkt_total_offset = 0; u64 *pkt_desc_ptr = NULL; u32 start_of_descs = 0; u32 desc_offset = 0; /*RX Header Offset*/ u16 pkt_count = 0; u64 desc_hdr = 0; u16 vlan_tag = 0; u32 skb_len; if (!skb) goto err; skb_len = skb->len; if (skb_len < sizeof(desc_hdr)) goto err; /* RX Descriptor Header */ skb_trim(skb, skb_len - sizeof(desc_hdr)); desc_hdr = le64_to_cpup((u64 *)skb_tail_pointer(skb)); /* Check these packets */ desc_offset = (desc_hdr & AQ_RX_DH_DESC_OFFSET_MASK) >> AQ_RX_DH_DESC_OFFSET_SHIFT; pkt_count = desc_hdr & AQ_RX_DH_PKT_CNT_MASK; start_of_descs = skb_len - ((pkt_count + 1) * sizeof(desc_hdr)); /* self check descs position */ if (start_of_descs != desc_offset) goto err; /* self check desc_offset from header and make sure that the * bounds of the metadata array are inside the SKB */ if (pkt_count * 2 + desc_offset >= skb_len) goto err; /* Packets must not overlap the metadata array */ skb_trim(skb, desc_offset); if (pkt_count == 0) goto err; /* Get the first RX packet descriptor */ pkt_desc_ptr = (u64 *)(skb->data + desc_offset); while (pkt_count--) { u64 pkt_desc = le64_to_cpup(pkt_desc_ptr); u32 pkt_len_with_padd = 0; u32 pkt_len = 0; pkt_len = (u32)((pkt_desc & AQ_RX_PD_LEN_MASK) >> AQ_RX_PD_LEN_SHIFT); pkt_len_with_padd = ((pkt_len + 7) & 0x7FFF8); pkt_total_offset += pkt_len_with_padd; if (pkt_total_offset > desc_offset || (pkt_count == 0 && pkt_total_offset != desc_offset)) { goto err; } if (pkt_desc & AQ_RX_PD_DROP || !(pkt_desc & AQ_RX_PD_RX_OK) || pkt_len > (dev->hard_mtu + AQ_RX_HW_PAD)) { skb_pull(skb, pkt_len_with_padd); /* Next RX Packet Descriptor */ pkt_desc_ptr++; continue; } new_skb = netdev_alloc_skb_ip_align(dev->net, pkt_len); if (!new_skb) goto err; skb_put(new_skb, pkt_len); memcpy(new_skb->data, skb->data, pkt_len); skb_pull(new_skb, AQ_RX_HW_PAD); if (aqc111_data->rx_checksum) aqc111_rx_checksum(new_skb, pkt_desc); if (pkt_desc & AQ_RX_PD_VLAN) { vlan_tag = pkt_desc >> AQ_RX_PD_VLAN_SHIFT; __vlan_hwaccel_put_tag(new_skb, htons(ETH_P_8021Q), vlan_tag & VLAN_VID_MASK); } usbnet_skb_return(dev, new_skb); if (pkt_count == 0) break; skb_pull(skb, pkt_len_with_padd); /* Next RX Packet Header */ pkt_desc_ptr++; new_skb = NULL; } return 1; err: return 0; } static struct sk_buff *aqc111_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags) { int frame_size = dev->maxpacket; struct sk_buff *new_skb = NULL; u64 *tx_desc_ptr = NULL; int padding_size = 0; int headroom = 0; int tailroom = 0; u64 tx_desc = 0; u16 tci = 0; /*Length of actual data*/ tx_desc |= skb->len & AQ_TX_DESC_LEN_MASK; /* TSO MSS */ tx_desc |= ((u64)(skb_shinfo(skb)->gso_size & AQ_TX_DESC_MSS_MASK)) << AQ_TX_DESC_MSS_SHIFT; headroom = (skb->len + sizeof(tx_desc)) % 8; if (headroom != 0) padding_size = 8 - headroom; if (((skb->len + sizeof(tx_desc) + padding_size) % frame_size) == 0) { padding_size += 8; tx_desc |= AQ_TX_DESC_DROP_PADD; } /* Vlan Tag */ if (vlan_get_tag(skb, &tci) >= 0) { tx_desc |= AQ_TX_DESC_VLAN; tx_desc |= ((u64)tci & AQ_TX_DESC_VLAN_MASK) << AQ_TX_DESC_VLAN_SHIFT; } if (!dev->can_dma_sg && (dev->net->features & NETIF_F_SG) && skb_linearize(skb)) return NULL; headroom = skb_headroom(skb); tailroom = skb_tailroom(skb); if (!(headroom >= sizeof(tx_desc) && tailroom >= padding_size)) { new_skb = skb_copy_expand(skb, sizeof(tx_desc), padding_size, flags); dev_kfree_skb_any(skb); skb = new_skb; if (!skb) return NULL; } if (padding_size != 0) skb_put_zero(skb, padding_size); /* Copy TX header */ tx_desc_ptr = skb_push(skb, sizeof(tx_desc)); *tx_desc_ptr = cpu_to_le64(tx_desc); usbnet_set_skb_tx_stats(skb, 1, 0); return skb; } static const struct driver_info aqc111_info = { .description = "Aquantia AQtion USB to 5GbE Controller", .bind = aqc111_bind, .unbind = aqc111_unbind, .status = aqc111_status, .link_reset = aqc111_link_reset, .reset = aqc111_reset, .stop = aqc111_stop, .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_AVOID_UNLINK_URBS | FLAG_MULTI_PACKET, .rx_fixup = aqc111_rx_fixup, .tx_fixup = aqc111_tx_fixup, }; #define ASIX111_DESC \ "ASIX USB 3.1 Gen1 to 5G Multi-Gigabit Ethernet Adapter" static const struct driver_info asix111_info = { .description = ASIX111_DESC, .bind = aqc111_bind, .unbind = aqc111_unbind, .status = aqc111_status, .link_reset = aqc111_link_reset, .reset = aqc111_reset, .stop = aqc111_stop, .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_AVOID_UNLINK_URBS | FLAG_MULTI_PACKET, .rx_fixup = aqc111_rx_fixup, .tx_fixup = aqc111_tx_fixup, }; #undef ASIX111_DESC #define ASIX112_DESC \ "ASIX USB 3.1 Gen1 to 2.5G Multi-Gigabit Ethernet Adapter" static const struct driver_info asix112_info = { .description = ASIX112_DESC, .bind = aqc111_bind, .unbind = aqc111_unbind, .status = aqc111_status, .link_reset = aqc111_link_reset, .reset = aqc111_reset, .stop = aqc111_stop, .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_AVOID_UNLINK_URBS | FLAG_MULTI_PACKET, .rx_fixup = aqc111_rx_fixup, .tx_fixup = aqc111_tx_fixup, }; #undef ASIX112_DESC static const struct driver_info trendnet_info = { .description = "USB-C 3.1 to 5GBASE-T Ethernet Adapter", .bind = aqc111_bind, .unbind = aqc111_unbind, .status = aqc111_status, .link_reset = aqc111_link_reset, .reset = aqc111_reset, .stop = aqc111_stop, .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_AVOID_UNLINK_URBS | FLAG_MULTI_PACKET, .rx_fixup = aqc111_rx_fixup, .tx_fixup = aqc111_tx_fixup, }; static const struct driver_info qnap_info = { .description = "QNAP QNA-UC5G1T USB to 5GbE Adapter", .bind = aqc111_bind, .unbind = aqc111_unbind, .status = aqc111_status, .link_reset = aqc111_link_reset, .reset = aqc111_reset, .stop = aqc111_stop, .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_AVOID_UNLINK_URBS | FLAG_MULTI_PACKET, .rx_fixup = aqc111_rx_fixup, .tx_fixup = aqc111_tx_fixup, }; static int aqc111_suspend(struct usb_interface *intf, pm_message_t message) { struct usbnet *dev = usb_get_intfdata(intf); struct aqc111_data *aqc111_data = dev->driver_priv; u16 temp_rx_ctrl = 0x00; u16 reg16; u8 reg8; usbnet_suspend(intf, message); aqc111_read16_cmd_nopm(dev, AQ_ACCESS_MAC, SFR_RX_CTL, 2, &reg16); temp_rx_ctrl = reg16; /* Stop RX operations*/ reg16 &= ~SFR_RX_CTL_START; aqc111_write16_cmd_nopm(dev, AQ_ACCESS_MAC, SFR_RX_CTL, 2, &reg16); /* Force bz */ aqc111_read16_cmd_nopm(dev, AQ_ACCESS_MAC, SFR_PHYPWR_RSTCTL, 2, &reg16); reg16 |= SFR_PHYPWR_RSTCTL_BZ; aqc111_write16_cmd_nopm(dev, AQ_ACCESS_MAC, SFR_PHYPWR_RSTCTL, 2, &reg16); reg8 = SFR_BULK_OUT_EFF_EN; aqc111_write_cmd_nopm(dev, AQ_ACCESS_MAC, SFR_BULK_OUT_CTRL, 1, 1, &reg8); temp_rx_ctrl &= ~(SFR_RX_CTL_START | SFR_RX_CTL_RF_WAK | SFR_RX_CTL_AP | SFR_RX_CTL_AM); aqc111_write16_cmd_nopm(dev, AQ_ACCESS_MAC, SFR_RX_CTL, 2, &temp_rx_ctrl); reg8 = 0x00; aqc111_write_cmd_nopm(dev, AQ_ACCESS_MAC, SFR_ETH_MAC_PATH, 1, 1, &reg8); if (aqc111_data->wol_flags) { struct aqc111_wol_cfg wol_cfg; memset(&wol_cfg, 0, sizeof(struct aqc111_wol_cfg)); aqc111_data->phy_cfg |= AQ_WOL; ether_addr_copy(wol_cfg.hw_addr, dev->net->dev_addr); wol_cfg.flags = aqc111_data->wol_flags; temp_rx_ctrl |= (SFR_RX_CTL_AB | SFR_RX_CTL_START); aqc111_write16_cmd_nopm(dev, AQ_ACCESS_MAC, SFR_RX_CTL, 2, &temp_rx_ctrl); reg8 = 0x00; aqc111_write_cmd_nopm(dev, AQ_ACCESS_MAC, SFR_BM_INT_MASK, 1, 1, &reg8); reg8 = SFR_BMRX_DMA_EN; aqc111_write_cmd_nopm(dev, AQ_ACCESS_MAC, SFR_BMRX_DMA_CONTROL, 1, 1, &reg8); reg8 = SFR_RX_PATH_READY; aqc111_write_cmd_nopm(dev, AQ_ACCESS_MAC, SFR_ETH_MAC_PATH, 1, 1, &reg8); reg8 = 0x07; aqc111_write_cmd_nopm(dev, AQ_ACCESS_MAC, SFR_RX_BULKIN_QCTRL, 1, 1, &reg8); reg8 = 0x00; aqc111_write_cmd_nopm(dev, AQ_ACCESS_MAC, SFR_RX_BULKIN_QTIMR_LOW, 1, 1, &reg8); aqc111_write_cmd_nopm(dev, AQ_ACCESS_MAC, SFR_RX_BULKIN_QTIMR_HIGH, 1, 1, &reg8); reg8 = 0xFF; aqc111_write_cmd_nopm(dev, AQ_ACCESS_MAC, SFR_RX_BULKIN_QSIZE, 1, 1, &reg8); aqc111_write_cmd_nopm(dev, AQ_ACCESS_MAC, SFR_RX_BULKIN_QIFG, 1, 1, &reg8); aqc111_read16_cmd_nopm(dev, AQ_ACCESS_MAC, SFR_MEDIUM_STATUS_MODE, 2, &reg16); reg16 |= SFR_MEDIUM_RECEIVE_EN; aqc111_write16_cmd_nopm(dev, AQ_ACCESS_MAC, SFR_MEDIUM_STATUS_MODE, 2, &reg16); aqc111_write_cmd(dev, AQ_WOL_CFG, 0, 0, WOL_CFG_SIZE, &wol_cfg); aqc111_write32_cmd(dev, AQ_PHY_OPS, 0, 0, &aqc111_data->phy_cfg); } else { aqc111_data->phy_cfg |= AQ_LOW_POWER; aqc111_write32_cmd(dev, AQ_PHY_OPS, 0, 0, &aqc111_data->phy_cfg); /* Disable RX path */ aqc111_read16_cmd_nopm(dev, AQ_ACCESS_MAC, SFR_MEDIUM_STATUS_MODE, 2, &reg16); reg16 &= ~SFR_MEDIUM_RECEIVE_EN; aqc111_write16_cmd_nopm(dev, AQ_ACCESS_MAC, SFR_MEDIUM_STATUS_MODE, 2, &reg16); } return 0; } static int aqc111_resume(struct usb_interface *intf) { struct usbnet *dev = usb_get_intfdata(intf); struct aqc111_data *aqc111_data = dev->driver_priv; u16 reg16; u8 reg8; netif_carrier_off(dev->net); /* Power up ethernet PHY */ aqc111_data->phy_cfg |= AQ_PHY_POWER_EN; aqc111_data->phy_cfg &= ~AQ_LOW_POWER; aqc111_data->phy_cfg &= ~AQ_WOL; reg8 = 0xFF; aqc111_write_cmd_nopm(dev, AQ_ACCESS_MAC, SFR_BM_INT_MASK, 1, 1, &reg8); /* Configure RX control register => start operation */ reg16 = aqc111_data->rxctl; reg16 &= ~SFR_RX_CTL_START; aqc111_write16_cmd_nopm(dev, AQ_ACCESS_MAC, SFR_RX_CTL, 2, &reg16); reg16 |= SFR_RX_CTL_START; aqc111_write16_cmd_nopm(dev, AQ_ACCESS_MAC, SFR_RX_CTL, 2, &reg16); aqc111_set_phy_speed(dev, aqc111_data->autoneg, aqc111_data->advertised_speed); aqc111_read16_cmd_nopm(dev, AQ_ACCESS_MAC, SFR_MEDIUM_STATUS_MODE, 2, &reg16); reg16 |= SFR_MEDIUM_RECEIVE_EN; aqc111_write16_cmd_nopm(dev, AQ_ACCESS_MAC, SFR_MEDIUM_STATUS_MODE, 2, &reg16); reg8 = SFR_RX_PATH_READY; aqc111_write_cmd_nopm(dev, AQ_ACCESS_MAC, SFR_ETH_MAC_PATH, 1, 1, &reg8); reg8 = 0x0; aqc111_write_cmd(dev, AQ_ACCESS_MAC, SFR_BMRX_DMA_CONTROL, 1, 1, &reg8); return usbnet_resume(intf); } #define AQC111_USB_ETH_DEV(vid, pid, table) \ USB_DEVICE_INTERFACE_CLASS((vid), (pid), USB_CLASS_VENDOR_SPEC), \ .driver_info = (unsigned long)&(table) \ }, \ { \ USB_DEVICE_AND_INTERFACE_INFO((vid), (pid), \ USB_CLASS_COMM, \ USB_CDC_SUBCLASS_ETHERNET, \ USB_CDC_PROTO_NONE), \ .driver_info = (unsigned long)&(table), static const struct usb_device_id products[] = { {AQC111_USB_ETH_DEV(0x2eca, 0xc101, aqc111_info)}, {AQC111_USB_ETH_DEV(0x0b95, 0x2790, asix111_info)}, {AQC111_USB_ETH_DEV(0x0b95, 0x2791, asix112_info)}, {AQC111_USB_ETH_DEV(0x20f4, 0xe05a, trendnet_info)}, {AQC111_USB_ETH_DEV(0x1c04, 0x0015, qnap_info)}, { },/* END */ }; MODULE_DEVICE_TABLE(usb, products); static struct usb_driver aq_driver = { .name = "aqc111", .id_table = products, .probe = usbnet_probe, .suspend = aqc111_suspend, .resume = aqc111_resume, .disconnect = usbnet_disconnect, }; module_usb_driver(aq_driver); MODULE_DESCRIPTION("Aquantia AQtion USB to 5/2.5GbE Controllers"); MODULE_LICENSE("GPL");
2 4 94 94 1 3 2 93 6 88 7 7 2 5 4 3 7 11 1 8 8 2 2 2 26 2 7 3 1 1 1 1 1 1 2 2 1 1 2 2 6 1 6 8 1 8 8 3 7 1 4 3 5 1 4 10 1 3 1 1 1 1 1 1 1 1 2 2 1 1 1 1 1 1 1 1 1 1 1 1 1 9 9 7 6 92 7 1 1 1 8 6 6 12 12 5 5 25 24 25 25 24 9 17 1 20 1 6 20 1 20 20 32 31 25 32 214 11 86 116 98 95 2 70 100 99 21 5 2 3 2 3 1 1 1 1 1 3 1 7 2 1 2 1 1 3 1 1 9 2 1 1 1 4 1 1 1 1 6 2 2 2 2 5 2 5 4 2 13 1 2 1 10 2 1 8 2 1 1 1 3 8 1 1 1 2 1 1 2 17 4 18 3 2 13 2 87 104 10 8 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 // SPDX-License-Identifier: GPL-2.0 /* * Written for linux by Johan Myreen as a translation from * the assembly version by Linus (with diacriticals added) * * Some additional features added by Christoph Niemann (ChN), March 1993 * * Loadable keymaps by Risto Kankkunen, May 1993 * * Diacriticals redone & other small changes, aeb@cwi.nl, June 1993 * Added decr/incr_console, dynamic keymaps, Unicode support, * dynamic function/string keys, led setting, Sept 1994 * `Sticky' modifier keys, 951006. * * 11-11-96: SAK should now work in the raw mode (Martin Mares) * * Modified to provide 'generic' keyboard support by Hamish Macdonald * Merge with the m68k keyboard driver and split-off of the PC low-level * parts by Geert Uytterhoeven, May 1997 * * 27-05-97: Added support for the Magic SysRq Key (Martin Mares) * 30-07-98: Dead keys redone, aeb@cwi.nl. * 21-08-02: Converted to input API, major cleanup. (Vojtech Pavlik) */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/consolemap.h> #include <linux/init.h> #include <linux/input.h> #include <linux/jiffies.h> #include <linux/kbd_diacr.h> #include <linux/kbd_kern.h> #include <linux/leds.h> #include <linux/mm.h> #include <linux/module.h> #include <linux/nospec.h> #include <linux/notifier.h> #include <linux/reboot.h> #include <linux/sched/debug.h> #include <linux/sched/signal.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/string.h> #include <linux/tty_flip.h> #include <linux/tty.h> #include <linux/uaccess.h> #include <linux/vt_kern.h> #include <asm/irq_regs.h> /* * Exported functions/variables */ #define KBD_DEFMODE (BIT(VC_REPEAT) | BIT(VC_META)) #if defined(CONFIG_X86) || defined(CONFIG_PARISC) #include <asm/kbdleds.h> #else static inline int kbd_defleds(void) { return 0; } #endif #define KBD_DEFLOCK 0 /* * Handler Tables. */ #define K_HANDLERS\ k_self, k_fn, k_spec, k_pad,\ k_dead, k_cons, k_cur, k_shift,\ k_meta, k_ascii, k_lock, k_lowercase,\ k_slock, k_dead2, k_brl, k_ignore typedef void (k_handler_fn)(struct vc_data *vc, unsigned char value, char up_flag); static k_handler_fn K_HANDLERS; static k_handler_fn *k_handler[16] = { K_HANDLERS }; #define FN_HANDLERS\ fn_null, fn_enter, fn_show_ptregs, fn_show_mem,\ fn_show_state, fn_send_intr, fn_lastcons, fn_caps_toggle,\ fn_num, fn_hold, fn_scroll_forw, fn_scroll_back,\ fn_boot_it, fn_caps_on, fn_compose, fn_SAK,\ fn_dec_console, fn_inc_console, fn_spawn_con, fn_bare_num typedef void (fn_handler_fn)(struct vc_data *vc); static fn_handler_fn FN_HANDLERS; static fn_handler_fn *fn_handler[] = { FN_HANDLERS }; /* * Variables exported for vt_ioctl.c */ struct vt_spawn_console vt_spawn_con = { .lock = __SPIN_LOCK_UNLOCKED(vt_spawn_con.lock), .pid = NULL, .sig = 0, }; /* * Internal Data. */ static struct kbd_struct kbd_table[MAX_NR_CONSOLES]; static struct kbd_struct *kbd = kbd_table; /* maximum values each key_handler can handle */ static const unsigned char max_vals[] = { [ KT_LATIN ] = 255, [ KT_FN ] = ARRAY_SIZE(func_table) - 1, [ KT_SPEC ] = ARRAY_SIZE(fn_handler) - 1, [ KT_PAD ] = NR_PAD - 1, [ KT_DEAD ] = NR_DEAD - 1, [ KT_CONS ] = 255, [ KT_CUR ] = 3, [ KT_SHIFT ] = NR_SHIFT - 1, [ KT_META ] = 255, [ KT_ASCII ] = NR_ASCII - 1, [ KT_LOCK ] = NR_LOCK - 1, [ KT_LETTER ] = 255, [ KT_SLOCK ] = NR_LOCK - 1, [ KT_DEAD2 ] = 255, [ KT_BRL ] = NR_BRL - 1, }; static const int NR_TYPES = ARRAY_SIZE(max_vals); static void kbd_bh(struct tasklet_struct *unused); static DECLARE_TASKLET_DISABLED(keyboard_tasklet, kbd_bh); static struct input_handler kbd_handler; static DEFINE_SPINLOCK(kbd_event_lock); static DEFINE_SPINLOCK(led_lock); static DEFINE_SPINLOCK(func_buf_lock); /* guard 'func_buf' and friends */ static DECLARE_BITMAP(key_down, KEY_CNT); /* keyboard key bitmap */ static unsigned char shift_down[NR_SHIFT]; /* shift state counters.. */ static bool dead_key_next; /* Handles a number being assembled on the number pad */ static bool npadch_active; static unsigned int npadch_value; static unsigned int diacr; static bool rep; /* flag telling character repeat */ static int shift_state = 0; static unsigned int ledstate = -1U; /* undefined */ static unsigned char ledioctl; static bool vt_switch; /* * Notifier list for console keyboard events */ static ATOMIC_NOTIFIER_HEAD(keyboard_notifier_list); int register_keyboard_notifier(struct notifier_block *nb) { return atomic_notifier_chain_register(&keyboard_notifier_list, nb); } EXPORT_SYMBOL_GPL(register_keyboard_notifier); int unregister_keyboard_notifier(struct notifier_block *nb) { return atomic_notifier_chain_unregister(&keyboard_notifier_list, nb); } EXPORT_SYMBOL_GPL(unregister_keyboard_notifier); /* * Translation of scancodes to keycodes. We set them on only the first * keyboard in the list that accepts the scancode and keycode. * Explanation for not choosing the first attached keyboard anymore: * USB keyboards for example have two event devices: one for all "normal" * keys and one for extra function keys (like "volume up", "make coffee", * etc.). So this means that scancodes for the extra function keys won't * be valid for the first event device, but will be for the second. */ struct getset_keycode_data { struct input_keymap_entry ke; int error; }; static int getkeycode_helper(struct input_handle *handle, void *data) { struct getset_keycode_data *d = data; d->error = input_get_keycode(handle->dev, &d->ke); return d->error == 0; /* stop as soon as we successfully get one */ } static int getkeycode(unsigned int scancode) { struct getset_keycode_data d = { .ke = { .flags = 0, .len = sizeof(scancode), .keycode = 0, }, .error = -ENODEV, }; memcpy(d.ke.scancode, &scancode, sizeof(scancode)); input_handler_for_each_handle(&kbd_handler, &d, getkeycode_helper); return d.error ?: d.ke.keycode; } static int setkeycode_helper(struct input_handle *handle, void *data) { struct getset_keycode_data *d = data; d->error = input_set_keycode(handle->dev, &d->ke); return d->error == 0; /* stop as soon as we successfully set one */ } static int setkeycode(unsigned int scancode, unsigned int keycode) { struct getset_keycode_data d = { .ke = { .flags = 0, .len = sizeof(scancode), .keycode = keycode, }, .error = -ENODEV, }; memcpy(d.ke.scancode, &scancode, sizeof(scancode)); input_handler_for_each_handle(&kbd_handler, &d, setkeycode_helper); return d.error; } /* * Making beeps and bells. Note that we prefer beeps to bells, but when * shutting the sound off we do both. */ static int kd_sound_helper(struct input_handle *handle, void *data) { unsigned int *hz = data; struct input_dev *dev = handle->dev; if (test_bit(EV_SND, dev->evbit)) { if (test_bit(SND_TONE, dev->sndbit)) { input_inject_event(handle, EV_SND, SND_TONE, *hz); if (*hz) return 0; } if (test_bit(SND_BELL, dev->sndbit)) input_inject_event(handle, EV_SND, SND_BELL, *hz ? 1 : 0); } return 0; } static void kd_nosound(struct timer_list *unused) { static unsigned int zero; input_handler_for_each_handle(&kbd_handler, &zero, kd_sound_helper); } static DEFINE_TIMER(kd_mksound_timer, kd_nosound); void kd_mksound(unsigned int hz, unsigned int ticks) { del_timer_sync(&kd_mksound_timer); input_handler_for_each_handle(&kbd_handler, &hz, kd_sound_helper); if (hz && ticks) mod_timer(&kd_mksound_timer, jiffies + ticks); } EXPORT_SYMBOL(kd_mksound); /* * Setting the keyboard rate. */ static int kbd_rate_helper(struct input_handle *handle, void *data) { struct input_dev *dev = handle->dev; struct kbd_repeat *rpt = data; if (test_bit(EV_REP, dev->evbit)) { if (rpt[0].delay > 0) input_inject_event(handle, EV_REP, REP_DELAY, rpt[0].delay); if (rpt[0].period > 0) input_inject_event(handle, EV_REP, REP_PERIOD, rpt[0].period); rpt[1].delay = dev->rep[REP_DELAY]; rpt[1].period = dev->rep[REP_PERIOD]; } return 0; } int kbd_rate(struct kbd_repeat *rpt) { struct kbd_repeat data[2] = { *rpt }; input_handler_for_each_handle(&kbd_handler, data, kbd_rate_helper); *rpt = data[1]; /* Copy currently used settings */ return 0; } /* * Helper Functions. */ static void put_queue(struct vc_data *vc, int ch) { tty_insert_flip_char(&vc->port, ch, 0); tty_flip_buffer_push(&vc->port); } static void puts_queue(struct vc_data *vc, const char *cp) { tty_insert_flip_string(&vc->port, cp, strlen(cp)); tty_flip_buffer_push(&vc->port); } static void applkey(struct vc_data *vc, int key, char mode) { static char buf[] = { 0x1b, 'O', 0x00, 0x00 }; buf[1] = (mode ? 'O' : '['); buf[2] = key; puts_queue(vc, buf); } /* * Many other routines do put_queue, but I think either * they produce ASCII, or they produce some user-assigned * string, and in both cases we might assume that it is * in utf-8 already. */ static void to_utf8(struct vc_data *vc, uint c) { if (c < 0x80) /* 0******* */ put_queue(vc, c); else if (c < 0x800) { /* 110***** 10****** */ put_queue(vc, 0xc0 | (c >> 6)); put_queue(vc, 0x80 | (c & 0x3f)); } else if (c < 0x10000) { if (c >= 0xD800 && c < 0xE000) return; if (c == 0xFFFF) return; /* 1110**** 10****** 10****** */ put_queue(vc, 0xe0 | (c >> 12)); put_queue(vc, 0x80 | ((c >> 6) & 0x3f)); put_queue(vc, 0x80 | (c & 0x3f)); } else if (c < 0x110000) { /* 11110*** 10****** 10****** 10****** */ put_queue(vc, 0xf0 | (c >> 18)); put_queue(vc, 0x80 | ((c >> 12) & 0x3f)); put_queue(vc, 0x80 | ((c >> 6) & 0x3f)); put_queue(vc, 0x80 | (c & 0x3f)); } } /* FIXME: review locking for vt.c callers */ static void set_leds(void) { tasklet_schedule(&keyboard_tasklet); } /* * Called after returning from RAW mode or when changing consoles - recompute * shift_down[] and shift_state from key_down[] maybe called when keymap is * undefined, so that shiftkey release is seen. The caller must hold the * kbd_event_lock. */ static void do_compute_shiftstate(void) { unsigned int k, sym, val; shift_state = 0; memset(shift_down, 0, sizeof(shift_down)); for_each_set_bit(k, key_down, min(NR_KEYS, KEY_CNT)) { sym = U(key_maps[0][k]); if (KTYP(sym) != KT_SHIFT && KTYP(sym) != KT_SLOCK) continue; val = KVAL(sym); if (val == KVAL(K_CAPSSHIFT)) val = KVAL(K_SHIFT); shift_down[val]++; shift_state |= BIT(val); } } /* We still have to export this method to vt.c */ void vt_set_leds_compute_shiftstate(void) { unsigned long flags; /* * When VT is switched, the keyboard led needs to be set once. * Ensure that after the switch is completed, the state of the * keyboard LED is consistent with the state of the keyboard lock. */ vt_switch = true; set_leds(); spin_lock_irqsave(&kbd_event_lock, flags); do_compute_shiftstate(); spin_unlock_irqrestore(&kbd_event_lock, flags); } /* * We have a combining character DIACR here, followed by the character CH. * If the combination occurs in the table, return the corresponding value. * Otherwise, if CH is a space or equals DIACR, return DIACR. * Otherwise, conclude that DIACR was not combining after all, * queue it and return CH. */ static unsigned int handle_diacr(struct vc_data *vc, unsigned int ch) { unsigned int d = diacr; unsigned int i; diacr = 0; if ((d & ~0xff) == BRL_UC_ROW) { if ((ch & ~0xff) == BRL_UC_ROW) return d | ch; } else { for (i = 0; i < accent_table_size; i++) if (accent_table[i].diacr == d && accent_table[i].base == ch) return accent_table[i].result; } if (ch == ' ' || ch == (BRL_UC_ROW|0) || ch == d) return d; if (kbd->kbdmode == VC_UNICODE) to_utf8(vc, d); else { int c = conv_uni_to_8bit(d); if (c != -1) put_queue(vc, c); } return ch; } /* * Special function handlers */ static void fn_enter(struct vc_data *vc) { if (diacr) { if (kbd->kbdmode == VC_UNICODE) to_utf8(vc, diacr); else { int c = conv_uni_to_8bit(diacr); if (c != -1) put_queue(vc, c); } diacr = 0; } put_queue(vc, '\r'); if (vc_kbd_mode(kbd, VC_CRLF)) put_queue(vc, '\n'); } static void fn_caps_toggle(struct vc_data *vc) { if (rep) return; chg_vc_kbd_led(kbd, VC_CAPSLOCK); } static void fn_caps_on(struct vc_data *vc) { if (rep) return; set_vc_kbd_led(kbd, VC_CAPSLOCK); } static void fn_show_ptregs(struct vc_data *vc) { struct pt_regs *regs = get_irq_regs(); if (regs) show_regs(regs); } static void fn_hold(struct vc_data *vc) { struct tty_struct *tty = vc->port.tty; if (rep || !tty) return; /* * Note: SCROLLOCK will be set (cleared) by stop_tty (start_tty); * these routines are also activated by ^S/^Q. * (And SCROLLOCK can also be set by the ioctl KDSKBLED.) */ if (tty->flow.stopped) start_tty(tty); else stop_tty(tty); } static void fn_num(struct vc_data *vc) { if (vc_kbd_mode(kbd, VC_APPLIC)) applkey(vc, 'P', 1); else fn_bare_num(vc); } /* * Bind this to Shift-NumLock if you work in application keypad mode * but want to be able to change the NumLock flag. * Bind this to NumLock if you prefer that the NumLock key always * changes the NumLock flag. */ static void fn_bare_num(struct vc_data *vc) { if (!rep) chg_vc_kbd_led(kbd, VC_NUMLOCK); } static void fn_lastcons(struct vc_data *vc) { /* switch to the last used console, ChN */ set_console(last_console); } static void fn_dec_console(struct vc_data *vc) { int i, cur = fg_console; /* Currently switching? Queue this next switch relative to that. */ if (want_console != -1) cur = want_console; for (i = cur - 1; i != cur; i--) { if (i == -1) i = MAX_NR_CONSOLES - 1; if (vc_cons_allocated(i)) break; } set_console(i); } static void fn_inc_console(struct vc_data *vc) { int i, cur = fg_console; /* Currently switching? Queue this next switch relative to that. */ if (want_console != -1) cur = want_console; for (i = cur+1; i != cur; i++) { if (i == MAX_NR_CONSOLES) i = 0; if (vc_cons_allocated(i)) break; } set_console(i); } static void fn_send_intr(struct vc_data *vc) { tty_insert_flip_char(&vc->port, 0, TTY_BREAK); tty_flip_buffer_push(&vc->port); } static void fn_scroll_forw(struct vc_data *vc) { scrollfront(vc, 0); } static void fn_scroll_back(struct vc_data *vc) { scrollback(vc); } static void fn_show_mem(struct vc_data *vc) { show_mem(); } static void fn_show_state(struct vc_data *vc) { show_state(); } static void fn_boot_it(struct vc_data *vc) { ctrl_alt_del(); } static void fn_compose(struct vc_data *vc) { dead_key_next = true; } static void fn_spawn_con(struct vc_data *vc) { spin_lock(&vt_spawn_con.lock); if (vt_spawn_con.pid) if (kill_pid(vt_spawn_con.pid, vt_spawn_con.sig, 1)) { put_pid(vt_spawn_con.pid); vt_spawn_con.pid = NULL; } spin_unlock(&vt_spawn_con.lock); } static void fn_SAK(struct vc_data *vc) { struct work_struct *SAK_work = &vc_cons[fg_console].SAK_work; schedule_work(SAK_work); } static void fn_null(struct vc_data *vc) { do_compute_shiftstate(); } /* * Special key handlers */ static void k_ignore(struct vc_data *vc, unsigned char value, char up_flag) { } static void k_spec(struct vc_data *vc, unsigned char value, char up_flag) { if (up_flag) return; if (value >= ARRAY_SIZE(fn_handler)) return; if ((kbd->kbdmode == VC_RAW || kbd->kbdmode == VC_MEDIUMRAW || kbd->kbdmode == VC_OFF) && value != KVAL(K_SAK)) return; /* SAK is allowed even in raw mode */ fn_handler[value](vc); } static void k_lowercase(struct vc_data *vc, unsigned char value, char up_flag) { pr_err("k_lowercase was called - impossible\n"); } static void k_unicode(struct vc_data *vc, unsigned int value, char up_flag) { if (up_flag) return; /* no action, if this is a key release */ if (diacr) value = handle_diacr(vc, value); if (dead_key_next) { dead_key_next = false; diacr = value; return; } if (kbd->kbdmode == VC_UNICODE) to_utf8(vc, value); else { int c = conv_uni_to_8bit(value); if (c != -1) put_queue(vc, c); } } /* * Handle dead key. Note that we now may have several * dead keys modifying the same character. Very useful * for Vietnamese. */ static void k_deadunicode(struct vc_data *vc, unsigned int value, char up_flag) { if (up_flag) return; diacr = (diacr ? handle_diacr(vc, value) : value); } static void k_self(struct vc_data *vc, unsigned char value, char up_flag) { k_unicode(vc, conv_8bit_to_uni(value), up_flag); } static void k_dead2(struct vc_data *vc, unsigned char value, char up_flag) { k_deadunicode(vc, value, up_flag); } /* * Obsolete - for backwards compatibility only */ static void k_dead(struct vc_data *vc, unsigned char value, char up_flag) { static const unsigned char ret_diacr[NR_DEAD] = { '`', /* dead_grave */ '\'', /* dead_acute */ '^', /* dead_circumflex */ '~', /* dead_tilda */ '"', /* dead_diaeresis */ ',', /* dead_cedilla */ '_', /* dead_macron */ 'U', /* dead_breve */ '.', /* dead_abovedot */ '*', /* dead_abovering */ '=', /* dead_doubleacute */ 'c', /* dead_caron */ 'k', /* dead_ogonek */ 'i', /* dead_iota */ '#', /* dead_voiced_sound */ 'o', /* dead_semivoiced_sound */ '!', /* dead_belowdot */ '?', /* dead_hook */ '+', /* dead_horn */ '-', /* dead_stroke */ ')', /* dead_abovecomma */ '(', /* dead_abovereversedcomma */ ':', /* dead_doublegrave */ 'n', /* dead_invertedbreve */ ';', /* dead_belowcomma */ '$', /* dead_currency */ '@', /* dead_greek */ }; k_deadunicode(vc, ret_diacr[value], up_flag); } static void k_cons(struct vc_data *vc, unsigned char value, char up_flag) { if (up_flag) return; set_console(value); } static void k_fn(struct vc_data *vc, unsigned char value, char up_flag) { if (up_flag) return; if ((unsigned)value < ARRAY_SIZE(func_table)) { unsigned long flags; spin_lock_irqsave(&func_buf_lock, flags); if (func_table[value]) puts_queue(vc, func_table[value]); spin_unlock_irqrestore(&func_buf_lock, flags); } else pr_err("k_fn called with value=%d\n", value); } static void k_cur(struct vc_data *vc, unsigned char value, char up_flag) { static const char cur_chars[] = "BDCA"; if (up_flag) return; applkey(vc, cur_chars[value], vc_kbd_mode(kbd, VC_CKMODE)); } static void k_pad(struct vc_data *vc, unsigned char value, char up_flag) { static const char pad_chars[] = "0123456789+-*/\015,.?()#"; static const char app_map[] = "pqrstuvwxylSRQMnnmPQS"; if (up_flag) return; /* no action, if this is a key release */ /* kludge... shift forces cursor/number keys */ if (vc_kbd_mode(kbd, VC_APPLIC) && !shift_down[KG_SHIFT]) { applkey(vc, app_map[value], 1); return; } if (!vc_kbd_led(kbd, VC_NUMLOCK)) { switch (value) { case KVAL(K_PCOMMA): case KVAL(K_PDOT): k_fn(vc, KVAL(K_REMOVE), 0); return; case KVAL(K_P0): k_fn(vc, KVAL(K_INSERT), 0); return; case KVAL(K_P1): k_fn(vc, KVAL(K_SELECT), 0); return; case KVAL(K_P2): k_cur(vc, KVAL(K_DOWN), 0); return; case KVAL(K_P3): k_fn(vc, KVAL(K_PGDN), 0); return; case KVAL(K_P4): k_cur(vc, KVAL(K_LEFT), 0); return; case KVAL(K_P6): k_cur(vc, KVAL(K_RIGHT), 0); return; case KVAL(K_P7): k_fn(vc, KVAL(K_FIND), 0); return; case KVAL(K_P8): k_cur(vc, KVAL(K_UP), 0); return; case KVAL(K_P9): k_fn(vc, KVAL(K_PGUP), 0); return; case KVAL(K_P5): applkey(vc, 'G', vc_kbd_mode(kbd, VC_APPLIC)); return; } } put_queue(vc, pad_chars[value]); if (value == KVAL(K_PENTER) && vc_kbd_mode(kbd, VC_CRLF)) put_queue(vc, '\n'); } static void k_shift(struct vc_data *vc, unsigned char value, char up_flag) { int old_state = shift_state; if (rep) return; /* * Mimic typewriter: * a CapsShift key acts like Shift but undoes CapsLock */ if (value == KVAL(K_CAPSSHIFT)) { value = KVAL(K_SHIFT); if (!up_flag) clr_vc_kbd_led(kbd, VC_CAPSLOCK); } if (up_flag) { /* * handle the case that two shift or control * keys are depressed simultaneously */ if (shift_down[value]) shift_down[value]--; } else shift_down[value]++; if (shift_down[value]) shift_state |= BIT(value); else shift_state &= ~BIT(value); /* kludge */ if (up_flag && shift_state != old_state && npadch_active) { if (kbd->kbdmode == VC_UNICODE) to_utf8(vc, npadch_value); else put_queue(vc, npadch_value & 0xff); npadch_active = false; } } static void k_meta(struct vc_data *vc, unsigned char value, char up_flag) { if (up_flag) return; if (vc_kbd_mode(kbd, VC_META)) { put_queue(vc, '\033'); put_queue(vc, value); } else put_queue(vc, value | BIT(7)); } static void k_ascii(struct vc_data *vc, unsigned char value, char up_flag) { unsigned int base; if (up_flag) return; if (value < 10) { /* decimal input of code, while Alt depressed */ base = 10; } else { /* hexadecimal input of code, while AltGr depressed */ value -= 10; base = 16; } if (!npadch_active) { npadch_value = 0; npadch_active = true; } npadch_value = npadch_value * base + value; } static void k_lock(struct vc_data *vc, unsigned char value, char up_flag) { if (up_flag || rep) return; chg_vc_kbd_lock(kbd, value); } static void k_slock(struct vc_data *vc, unsigned char value, char up_flag) { k_shift(vc, value, up_flag); if (up_flag || rep) return; chg_vc_kbd_slock(kbd, value); /* try to make Alt, oops, AltGr and such work */ if (!key_maps[kbd->lockstate ^ kbd->slockstate]) { kbd->slockstate = 0; chg_vc_kbd_slock(kbd, value); } } /* by default, 300ms interval for combination release */ static unsigned brl_timeout = 300; MODULE_PARM_DESC(brl_timeout, "Braille keys release delay in ms (0 for commit on first key release)"); module_param(brl_timeout, uint, 0644); static unsigned brl_nbchords = 1; MODULE_PARM_DESC(brl_nbchords, "Number of chords that produce a braille pattern (0 for dead chords)"); module_param(brl_nbchords, uint, 0644); static void k_brlcommit(struct vc_data *vc, unsigned int pattern, char up_flag) { static unsigned long chords; static unsigned committed; if (!brl_nbchords) k_deadunicode(vc, BRL_UC_ROW | pattern, up_flag); else { committed |= pattern; chords++; if (chords == brl_nbchords) { k_unicode(vc, BRL_UC_ROW | committed, up_flag); chords = 0; committed = 0; } } } static void k_brl(struct vc_data *vc, unsigned char value, char up_flag) { static unsigned pressed, committing; static unsigned long releasestart; if (kbd->kbdmode != VC_UNICODE) { if (!up_flag) pr_warn("keyboard mode must be unicode for braille patterns\n"); return; } if (!value) { k_unicode(vc, BRL_UC_ROW, up_flag); return; } if (value > 8) return; if (!up_flag) { pressed |= BIT(value - 1); if (!brl_timeout) committing = pressed; } else if (brl_timeout) { if (!committing || time_after(jiffies, releasestart + msecs_to_jiffies(brl_timeout))) { committing = pressed; releasestart = jiffies; } pressed &= ~BIT(value - 1); if (!pressed && committing) { k_brlcommit(vc, committing, 0); committing = 0; } } else { if (committing) { k_brlcommit(vc, committing, 0); committing = 0; } pressed &= ~BIT(value - 1); } } #if IS_ENABLED(CONFIG_INPUT_LEDS) && IS_ENABLED(CONFIG_LEDS_TRIGGERS) struct kbd_led_trigger { struct led_trigger trigger; unsigned int mask; }; static int kbd_led_trigger_activate(struct led_classdev *cdev) { struct kbd_led_trigger *trigger = container_of(cdev->trigger, struct kbd_led_trigger, trigger); tasklet_disable(&keyboard_tasklet); if (ledstate != -1U) led_set_brightness(cdev, ledstate & trigger->mask ? LED_FULL : LED_OFF); tasklet_enable(&keyboard_tasklet); return 0; } #define KBD_LED_TRIGGER(_led_bit, _name) { \ .trigger = { \ .name = _name, \ .activate = kbd_led_trigger_activate, \ }, \ .mask = BIT(_led_bit), \ } #define KBD_LOCKSTATE_TRIGGER(_led_bit, _name) \ KBD_LED_TRIGGER((_led_bit) + 8, _name) static struct kbd_led_trigger kbd_led_triggers[] = { KBD_LED_TRIGGER(VC_SCROLLOCK, "kbd-scrolllock"), KBD_LED_TRIGGER(VC_NUMLOCK, "kbd-numlock"), KBD_LED_TRIGGER(VC_CAPSLOCK, "kbd-capslock"), KBD_LED_TRIGGER(VC_KANALOCK, "kbd-kanalock"), KBD_LOCKSTATE_TRIGGER(VC_SHIFTLOCK, "kbd-shiftlock"), KBD_LOCKSTATE_TRIGGER(VC_ALTGRLOCK, "kbd-altgrlock"), KBD_LOCKSTATE_TRIGGER(VC_CTRLLOCK, "kbd-ctrllock"), KBD_LOCKSTATE_TRIGGER(VC_ALTLOCK, "kbd-altlock"), KBD_LOCKSTATE_TRIGGER(VC_SHIFTLLOCK, "kbd-shiftllock"), KBD_LOCKSTATE_TRIGGER(VC_SHIFTRLOCK, "kbd-shiftrlock"), KBD_LOCKSTATE_TRIGGER(VC_CTRLLLOCK, "kbd-ctrlllock"), KBD_LOCKSTATE_TRIGGER(VC_CTRLRLOCK, "kbd-ctrlrlock"), }; static void kbd_propagate_led_state(unsigned int old_state, unsigned int new_state) { struct kbd_led_trigger *trigger; unsigned int changed = old_state ^ new_state; int i; for (i = 0; i < ARRAY_SIZE(kbd_led_triggers); i++) { trigger = &kbd_led_triggers[i]; if (changed & trigger->mask) led_trigger_event(&trigger->trigger, new_state & trigger->mask ? LED_FULL : LED_OFF); } } static int kbd_update_leds_helper(struct input_handle *handle, void *data) { unsigned int led_state = *(unsigned int *)data; if (test_bit(EV_LED, handle->dev->evbit)) kbd_propagate_led_state(~led_state, led_state); return 0; } static void kbd_init_leds(void) { int error; int i; for (i = 0; i < ARRAY_SIZE(kbd_led_triggers); i++) { error = led_trigger_register(&kbd_led_triggers[i].trigger); if (error) pr_err("error %d while registering trigger %s\n", error, kbd_led_triggers[i].trigger.name); } } #else static int kbd_update_leds_helper(struct input_handle *handle, void *data) { unsigned int leds = *(unsigned int *)data; if (test_bit(EV_LED, handle->dev->evbit)) { input_inject_event(handle, EV_LED, LED_SCROLLL, !!(leds & BIT(0))); input_inject_event(handle, EV_LED, LED_NUML, !!(leds & BIT(1))); input_inject_event(handle, EV_LED, LED_CAPSL, !!(leds & BIT(2))); input_inject_event(handle, EV_SYN, SYN_REPORT, 0); } return 0; } static void kbd_propagate_led_state(unsigned int old_state, unsigned int new_state) { input_handler_for_each_handle(&kbd_handler, &new_state, kbd_update_leds_helper); } static void kbd_init_leds(void) { } #endif /* * The leds display either (i) the status of NumLock, CapsLock, ScrollLock, * or (ii) whatever pattern of lights people want to show using KDSETLED, * or (iii) specified bits of specified words in kernel memory. */ static unsigned char getledstate(void) { return ledstate & 0xff; } void setledstate(struct kbd_struct *kb, unsigned int led) { unsigned long flags; spin_lock_irqsave(&led_lock, flags); if (!(led & ~7)) { ledioctl = led; kb->ledmode = LED_SHOW_IOCTL; } else kb->ledmode = LED_SHOW_FLAGS; set_leds(); spin_unlock_irqrestore(&led_lock, flags); } static inline unsigned char getleds(void) { struct kbd_struct *kb = kbd_table + fg_console; if (kb->ledmode == LED_SHOW_IOCTL) return ledioctl; return kb->ledflagstate; } /** * vt_get_leds - helper for braille console * @console: console to read * @flag: flag we want to check * * Check the status of a keyboard led flag and report it back */ int vt_get_leds(unsigned int console, int flag) { struct kbd_struct *kb = &kbd_table[console]; int ret; unsigned long flags; spin_lock_irqsave(&led_lock, flags); ret = vc_kbd_led(kb, flag); spin_unlock_irqrestore(&led_lock, flags); return ret; } EXPORT_SYMBOL_GPL(vt_get_leds); /** * vt_set_led_state - set LED state of a console * @console: console to set * @leds: LED bits * * Set the LEDs on a console. This is a wrapper for the VT layer * so that we can keep kbd knowledge internal */ void vt_set_led_state(unsigned int console, int leds) { struct kbd_struct *kb = &kbd_table[console]; setledstate(kb, leds); } /** * vt_kbd_con_start - Keyboard side of console start * @console: console * * Handle console start. This is a wrapper for the VT layer * so that we can keep kbd knowledge internal * * FIXME: We eventually need to hold the kbd lock here to protect * the LED updating. We can't do it yet because fn_hold calls stop_tty * and start_tty under the kbd_event_lock, while normal tty paths * don't hold the lock. We probably need to split out an LED lock * but not during an -rc release! */ void vt_kbd_con_start(unsigned int console) { struct kbd_struct *kb = &kbd_table[console]; unsigned long flags; spin_lock_irqsave(&led_lock, flags); clr_vc_kbd_led(kb, VC_SCROLLOCK); set_leds(); spin_unlock_irqrestore(&led_lock, flags); } /** * vt_kbd_con_stop - Keyboard side of console stop * @console: console * * Handle console stop. This is a wrapper for the VT layer * so that we can keep kbd knowledge internal */ void vt_kbd_con_stop(unsigned int console) { struct kbd_struct *kb = &kbd_table[console]; unsigned long flags; spin_lock_irqsave(&led_lock, flags); set_vc_kbd_led(kb, VC_SCROLLOCK); set_leds(); spin_unlock_irqrestore(&led_lock, flags); } /* * This is the tasklet that updates LED state of LEDs using standard * keyboard triggers. The reason we use tasklet is that we need to * handle the scenario when keyboard handler is not registered yet * but we already getting updates from the VT to update led state. */ static void kbd_bh(struct tasklet_struct *unused) { unsigned int leds; unsigned long flags; spin_lock_irqsave(&led_lock, flags); leds = getleds(); leds |= (unsigned int)kbd->lockstate << 8; spin_unlock_irqrestore(&led_lock, flags); if (vt_switch) { ledstate = ~leds; vt_switch = false; } if (leds != ledstate) { kbd_propagate_led_state(ledstate, leds); ledstate = leds; } } #if defined(CONFIG_X86) || defined(CONFIG_ALPHA) ||\ defined(CONFIG_MIPS) || defined(CONFIG_PPC) || defined(CONFIG_SPARC) ||\ defined(CONFIG_PARISC) || defined(CONFIG_SUPERH) ||\ (defined(CONFIG_ARM) && defined(CONFIG_KEYBOARD_ATKBD) && !defined(CONFIG_ARCH_RPC)) static inline bool kbd_is_hw_raw(const struct input_dev *dev) { if (!test_bit(EV_MSC, dev->evbit) || !test_bit(MSC_RAW, dev->mscbit)) return false; return dev->id.bustype == BUS_I8042 && dev->id.vendor == 0x0001 && dev->id.product == 0x0001; } static const unsigned short x86_keycodes[256] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84,118, 86, 87, 88,115,120,119,121,112,123, 92, 284,285,309, 0,312, 91,327,328,329,331,333,335,336,337,338,339, 367,288,302,304,350, 89,334,326,267,126,268,269,125,347,348,349, 360,261,262,263,268,376,100,101,321,316,373,286,289,102,351,355, 103,104,105,275,287,279,258,106,274,107,294,364,358,363,362,361, 291,108,381,281,290,272,292,305,280, 99,112,257,306,359,113,114, 264,117,271,374,379,265,266, 93, 94, 95, 85,259,375,260, 90,116, 377,109,111,277,278,282,283,295,296,297,299,300,301,293,303,307, 308,310,313,314,315,317,318,319,320,357,322,323,324,325,276,330, 332,340,365,342,343,344,345,346,356,270,341,368,369,370,371,372 }; #ifdef CONFIG_SPARC static int sparc_l1_a_state; extern void sun_do_break(void); #endif static int emulate_raw(struct vc_data *vc, unsigned int keycode, unsigned char up_flag) { int code; switch (keycode) { case KEY_PAUSE: put_queue(vc, 0xe1); put_queue(vc, 0x1d | up_flag); put_queue(vc, 0x45 | up_flag); break; case KEY_HANGEUL: if (!up_flag) put_queue(vc, 0xf2); break; case KEY_HANJA: if (!up_flag) put_queue(vc, 0xf1); break; case KEY_SYSRQ: /* * Real AT keyboards (that's what we're trying * to emulate here) emit 0xe0 0x2a 0xe0 0x37 when * pressing PrtSc/SysRq alone, but simply 0x54 * when pressing Alt+PrtSc/SysRq. */ if (test_bit(KEY_LEFTALT, key_down) || test_bit(KEY_RIGHTALT, key_down)) { put_queue(vc, 0x54 | up_flag); } else { put_queue(vc, 0xe0); put_queue(vc, 0x2a | up_flag); put_queue(vc, 0xe0); put_queue(vc, 0x37 | up_flag); } break; default: if (keycode > 255) return -1; code = x86_keycodes[keycode]; if (!code) return -1; if (code & 0x100) put_queue(vc, 0xe0); put_queue(vc, (code & 0x7f) | up_flag); break; } return 0; } #else static inline bool kbd_is_hw_raw(const struct input_dev *dev) { return false; } static int emulate_raw(struct vc_data *vc, unsigned int keycode, unsigned char up_flag) { if (keycode > 127) return -1; put_queue(vc, keycode | up_flag); return 0; } #endif static void kbd_rawcode(unsigned char data) { struct vc_data *vc = vc_cons[fg_console].d; kbd = &kbd_table[vc->vc_num]; if (kbd->kbdmode == VC_RAW) put_queue(vc, data); } static void kbd_keycode(unsigned int keycode, int down, bool hw_raw) { struct vc_data *vc = vc_cons[fg_console].d; unsigned short keysym, *key_map; unsigned char type; bool raw_mode; struct tty_struct *tty; int shift_final; struct keyboard_notifier_param param = { .vc = vc, .value = keycode, .down = down }; int rc; tty = vc->port.tty; if (tty && (!tty->driver_data)) { /* No driver data? Strange. Okay we fix it then. */ tty->driver_data = vc; } kbd = &kbd_table[vc->vc_num]; #ifdef CONFIG_SPARC if (keycode == KEY_STOP) sparc_l1_a_state = down; #endif rep = (down == 2); raw_mode = (kbd->kbdmode == VC_RAW); if (raw_mode && !hw_raw) if (emulate_raw(vc, keycode, !down << 7)) if (keycode < BTN_MISC && printk_ratelimit()) pr_warn("can't emulate rawmode for keycode %d\n", keycode); #ifdef CONFIG_SPARC if (keycode == KEY_A && sparc_l1_a_state) { sparc_l1_a_state = false; sun_do_break(); } #endif if (kbd->kbdmode == VC_MEDIUMRAW) { /* * This is extended medium raw mode, with keys above 127 * encoded as 0, high 7 bits, low 7 bits, with the 0 bearing * the 'up' flag if needed. 0 is reserved, so this shouldn't * interfere with anything else. The two bytes after 0 will * always have the up flag set not to interfere with older * applications. This allows for 16384 different keycodes, * which should be enough. */ if (keycode < 128) { put_queue(vc, keycode | (!down << 7)); } else { put_queue(vc, !down << 7); put_queue(vc, (keycode >> 7) | BIT(7)); put_queue(vc, keycode | BIT(7)); } raw_mode = true; } assign_bit(keycode, key_down, down); if (rep && (!vc_kbd_mode(kbd, VC_REPEAT) || (tty && !L_ECHO(tty) && tty_chars_in_buffer(tty)))) { /* * Don't repeat a key if the input buffers are not empty and the * characters get aren't echoed locally. This makes key repeat * usable with slow applications and under heavy loads. */ return; } param.shift = shift_final = (shift_state | kbd->slockstate) ^ kbd->lockstate; param.ledstate = kbd->ledflagstate; key_map = key_maps[shift_final]; rc = atomic_notifier_call_chain(&keyboard_notifier_list, KBD_KEYCODE, &param); if (rc == NOTIFY_STOP || !key_map) { atomic_notifier_call_chain(&keyboard_notifier_list, KBD_UNBOUND_KEYCODE, &param); do_compute_shiftstate(); kbd->slockstate = 0; return; } if (keycode < NR_KEYS) keysym = key_map[keycode]; else if (keycode >= KEY_BRL_DOT1 && keycode <= KEY_BRL_DOT8) keysym = U(K(KT_BRL, keycode - KEY_BRL_DOT1 + 1)); else return; type = KTYP(keysym); if (type < 0xf0) { param.value = keysym; rc = atomic_notifier_call_chain(&keyboard_notifier_list, KBD_UNICODE, &param); if (rc != NOTIFY_STOP) if (down && !raw_mode) k_unicode(vc, keysym, !down); return; } type -= 0xf0; if (type == KT_LETTER) { type = KT_LATIN; if (vc_kbd_led(kbd, VC_CAPSLOCK)) { key_map = key_maps[shift_final ^ BIT(KG_SHIFT)]; if (key_map) keysym = key_map[keycode]; } } param.value = keysym; rc = atomic_notifier_call_chain(&keyboard_notifier_list, KBD_KEYSYM, &param); if (rc == NOTIFY_STOP) return; if ((raw_mode || kbd->kbdmode == VC_OFF) && type != KT_SPEC && type != KT_SHIFT) return; (*k_handler[type])(vc, keysym & 0xff, !down); param.ledstate = kbd->ledflagstate; atomic_notifier_call_chain(&keyboard_notifier_list, KBD_POST_KEYSYM, &param); if (type != KT_SLOCK) kbd->slockstate = 0; } static void kbd_event(struct input_handle *handle, unsigned int event_type, unsigned int event_code, int value) { /* We are called with interrupts disabled, just take the lock */ spin_lock(&kbd_event_lock); if (event_type == EV_MSC && event_code == MSC_RAW && kbd_is_hw_raw(handle->dev)) kbd_rawcode(value); if (event_type == EV_KEY && event_code <= KEY_MAX) kbd_keycode(event_code, value, kbd_is_hw_raw(handle->dev)); spin_unlock(&kbd_event_lock); tasklet_schedule(&keyboard_tasklet); do_poke_blanked_console = 1; schedule_console_callback(); } static bool kbd_match(struct input_handler *handler, struct input_dev *dev) { if (test_bit(EV_SND, dev->evbit)) return true; if (test_bit(EV_KEY, dev->evbit)) { if (find_next_bit(dev->keybit, BTN_MISC, KEY_RESERVED) < BTN_MISC) return true; if (find_next_bit(dev->keybit, KEY_BRL_DOT10 + 1, KEY_BRL_DOT1) <= KEY_BRL_DOT10) return true; } return false; } /* * When a keyboard (or other input device) is found, the kbd_connect * function is called. The function then looks at the device, and if it * likes it, it can open it and get events from it. In this (kbd_connect) * function, we should decide which VT to bind that keyboard to initially. */ static int kbd_connect(struct input_handler *handler, struct input_dev *dev, const struct input_device_id *id) { struct input_handle *handle; int error; handle = kzalloc(sizeof(struct input_handle), GFP_KERNEL); if (!handle) return -ENOMEM; handle->dev = dev; handle->handler = handler; handle->name = "kbd"; error = input_register_handle(handle); if (error) goto err_free_handle; error = input_open_device(handle); if (error) goto err_unregister_handle; return 0; err_unregister_handle: input_unregister_handle(handle); err_free_handle: kfree(handle); return error; } static void kbd_disconnect(struct input_handle *handle) { input_close_device(handle); input_unregister_handle(handle); kfree(handle); } /* * Start keyboard handler on the new keyboard by refreshing LED state to * match the rest of the system. */ static void kbd_start(struct input_handle *handle) { tasklet_disable(&keyboard_tasklet); if (ledstate != -1U) kbd_update_leds_helper(handle, &ledstate); tasklet_enable(&keyboard_tasklet); } static const struct input_device_id kbd_ids[] = { { .flags = INPUT_DEVICE_ID_MATCH_EVBIT, .evbit = { BIT_MASK(EV_KEY) }, }, { .flags = INPUT_DEVICE_ID_MATCH_EVBIT, .evbit = { BIT_MASK(EV_SND) }, }, { }, /* Terminating entry */ }; MODULE_DEVICE_TABLE(input, kbd_ids); static struct input_handler kbd_handler = { .event = kbd_event, .match = kbd_match, .connect = kbd_connect, .disconnect = kbd_disconnect, .start = kbd_start, .name = "kbd", .id_table = kbd_ids, }; int __init kbd_init(void) { int i; int error; for (i = 0; i < MAX_NR_CONSOLES; i++) { kbd_table[i].ledflagstate = kbd_defleds(); kbd_table[i].default_ledflagstate = kbd_defleds(); kbd_table[i].ledmode = LED_SHOW_FLAGS; kbd_table[i].lockstate = KBD_DEFLOCK; kbd_table[i].slockstate = 0; kbd_table[i].modeflags = KBD_DEFMODE; kbd_table[i].kbdmode = default_utf8 ? VC_UNICODE : VC_XLATE; } kbd_init_leds(); error = input_register_handler(&kbd_handler); if (error) return error; tasklet_enable(&keyboard_tasklet); tasklet_schedule(&keyboard_tasklet); return 0; } /* Ioctl support code */ /** * vt_do_diacrit - diacritical table updates * @cmd: ioctl request * @udp: pointer to user data for ioctl * @perm: permissions check computed by caller * * Update the diacritical tables atomically and safely. Lock them * against simultaneous keypresses */ int vt_do_diacrit(unsigned int cmd, void __user *udp, int perm) { unsigned long flags; int asize; int ret = 0; switch (cmd) { case KDGKBDIACR: { struct kbdiacrs __user *a = udp; struct kbdiacr *dia; int i; dia = kmalloc_array(MAX_DIACR, sizeof(struct kbdiacr), GFP_KERNEL); if (!dia) return -ENOMEM; /* Lock the diacriticals table, make a copy and then copy it after we unlock */ spin_lock_irqsave(&kbd_event_lock, flags); asize = accent_table_size; for (i = 0; i < asize; i++) { dia[i].diacr = conv_uni_to_8bit( accent_table[i].diacr); dia[i].base = conv_uni_to_8bit( accent_table[i].base); dia[i].result = conv_uni_to_8bit( accent_table[i].result); } spin_unlock_irqrestore(&kbd_event_lock, flags); if (put_user(asize, &a->kb_cnt)) ret = -EFAULT; else if (copy_to_user(a->kbdiacr, dia, asize * sizeof(struct kbdiacr))) ret = -EFAULT; kfree(dia); return ret; } case KDGKBDIACRUC: { struct kbdiacrsuc __user *a = udp; void *buf; buf = kmalloc_array(MAX_DIACR, sizeof(struct kbdiacruc), GFP_KERNEL); if (buf == NULL) return -ENOMEM; /* Lock the diacriticals table, make a copy and then copy it after we unlock */ spin_lock_irqsave(&kbd_event_lock, flags); asize = accent_table_size; memcpy(buf, accent_table, asize * sizeof(struct kbdiacruc)); spin_unlock_irqrestore(&kbd_event_lock, flags); if (put_user(asize, &a->kb_cnt)) ret = -EFAULT; else if (copy_to_user(a->kbdiacruc, buf, asize*sizeof(struct kbdiacruc))) ret = -EFAULT; kfree(buf); return ret; } case KDSKBDIACR: { struct kbdiacrs __user *a = udp; struct kbdiacr *dia = NULL; unsigned int ct; int i; if (!perm) return -EPERM; if (get_user(ct, &a->kb_cnt)) return -EFAULT; if (ct >= MAX_DIACR) return -EINVAL; if (ct) { dia = memdup_array_user(a->kbdiacr, ct, sizeof(struct kbdiacr)); if (IS_ERR(dia)) return PTR_ERR(dia); } spin_lock_irqsave(&kbd_event_lock, flags); accent_table_size = ct; for (i = 0; i < ct; i++) { accent_table[i].diacr = conv_8bit_to_uni(dia[i].diacr); accent_table[i].base = conv_8bit_to_uni(dia[i].base); accent_table[i].result = conv_8bit_to_uni(dia[i].result); } spin_unlock_irqrestore(&kbd_event_lock, flags); kfree(dia); return 0; } case KDSKBDIACRUC: { struct kbdiacrsuc __user *a = udp; unsigned int ct; void *buf = NULL; if (!perm) return -EPERM; if (get_user(ct, &a->kb_cnt)) return -EFAULT; if (ct >= MAX_DIACR) return -EINVAL; if (ct) { buf = memdup_array_user(a->kbdiacruc, ct, sizeof(struct kbdiacruc)); if (IS_ERR(buf)) return PTR_ERR(buf); } spin_lock_irqsave(&kbd_event_lock, flags); if (ct) memcpy(accent_table, buf, ct * sizeof(struct kbdiacruc)); accent_table_size = ct; spin_unlock_irqrestore(&kbd_event_lock, flags); kfree(buf); return 0; } } return ret; } /** * vt_do_kdskbmode - set keyboard mode ioctl * @console: the console to use * @arg: the requested mode * * Update the keyboard mode bits while holding the correct locks. * Return 0 for success or an error code. */ int vt_do_kdskbmode(unsigned int console, unsigned int arg) { struct kbd_struct *kb = &kbd_table[console]; int ret = 0; unsigned long flags; spin_lock_irqsave(&kbd_event_lock, flags); switch(arg) { case K_RAW: kb->kbdmode = VC_RAW; break; case K_MEDIUMRAW: kb->kbdmode = VC_MEDIUMRAW; break; case K_XLATE: kb->kbdmode = VC_XLATE; do_compute_shiftstate(); break; case K_UNICODE: kb->kbdmode = VC_UNICODE; do_compute_shiftstate(); break; case K_OFF: kb->kbdmode = VC_OFF; break; default: ret = -EINVAL; } spin_unlock_irqrestore(&kbd_event_lock, flags); return ret; } /** * vt_do_kdskbmeta - set keyboard meta state * @console: the console to use * @arg: the requested meta state * * Update the keyboard meta bits while holding the correct locks. * Return 0 for success or an error code. */ int vt_do_kdskbmeta(unsigned int console, unsigned int arg) { struct kbd_struct *kb = &kbd_table[console]; int ret = 0; unsigned long flags; spin_lock_irqsave(&kbd_event_lock, flags); switch(arg) { case K_METABIT: clr_vc_kbd_mode(kb, VC_META); break; case K_ESCPREFIX: set_vc_kbd_mode(kb, VC_META); break; default: ret = -EINVAL; } spin_unlock_irqrestore(&kbd_event_lock, flags); return ret; } int vt_do_kbkeycode_ioctl(int cmd, struct kbkeycode __user *user_kbkc, int perm) { struct kbkeycode tmp; int kc = 0; if (copy_from_user(&tmp, user_kbkc, sizeof(struct kbkeycode))) return -EFAULT; switch (cmd) { case KDGETKEYCODE: kc = getkeycode(tmp.scancode); if (kc >= 0) kc = put_user(kc, &user_kbkc->keycode); break; case KDSETKEYCODE: if (!perm) return -EPERM; kc = setkeycode(tmp.scancode, tmp.keycode); break; } return kc; } static unsigned short vt_kdgkbent(unsigned char kbdmode, unsigned char idx, unsigned char map) { unsigned short *key_map, val; unsigned long flags; /* Ensure another thread doesn't free it under us */ spin_lock_irqsave(&kbd_event_lock, flags); key_map = key_maps[map]; if (key_map) { val = U(key_map[idx]); if (kbdmode != VC_UNICODE && KTYP(val) >= NR_TYPES) val = K_HOLE; } else val = idx ? K_HOLE : K_NOSUCHMAP; spin_unlock_irqrestore(&kbd_event_lock, flags); return val; } static int vt_kdskbent(unsigned char kbdmode, unsigned char idx, unsigned char map, unsigned short val) { unsigned long flags; unsigned short *key_map, *new_map, oldval; if (!idx && val == K_NOSUCHMAP) { spin_lock_irqsave(&kbd_event_lock, flags); /* deallocate map */ key_map = key_maps[map]; if (map && key_map) { key_maps[map] = NULL; if (key_map[0] == U(K_ALLOCATED)) { kfree(key_map); keymap_count--; } } spin_unlock_irqrestore(&kbd_event_lock, flags); return 0; } if (KTYP(val) < NR_TYPES) { if (KVAL(val) > max_vals[KTYP(val)]) return -EINVAL; } else if (kbdmode != VC_UNICODE) return -EINVAL; /* ++Geert: non-PC keyboards may generate keycode zero */ #if !defined(__mc68000__) && !defined(__powerpc__) /* assignment to entry 0 only tests validity of args */ if (!idx) return 0; #endif new_map = kmalloc(sizeof(plain_map), GFP_KERNEL); if (!new_map) return -ENOMEM; spin_lock_irqsave(&kbd_event_lock, flags); key_map = key_maps[map]; if (key_map == NULL) { int j; if (keymap_count >= MAX_NR_OF_USER_KEYMAPS && !capable(CAP_SYS_RESOURCE)) { spin_unlock_irqrestore(&kbd_event_lock, flags); kfree(new_map); return -EPERM; } key_maps[map] = new_map; key_map = new_map; key_map[0] = U(K_ALLOCATED); for (j = 1; j < NR_KEYS; j++) key_map[j] = U(K_HOLE); keymap_count++; } else kfree(new_map); oldval = U(key_map[idx]); if (val == oldval) goto out; /* Attention Key */ if ((oldval == K_SAK || val == K_SAK) && !capable(CAP_SYS_ADMIN)) { spin_unlock_irqrestore(&kbd_event_lock, flags); return -EPERM; } key_map[idx] = U(val); if (!map && (KTYP(oldval) == KT_SHIFT || KTYP(val) == KT_SHIFT)) do_compute_shiftstate(); out: spin_unlock_irqrestore(&kbd_event_lock, flags); return 0; } int vt_do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm, unsigned int console) { struct kbd_struct *kb = &kbd_table[console]; struct kbentry kbe; if (copy_from_user(&kbe, user_kbe, sizeof(struct kbentry))) return -EFAULT; switch (cmd) { case KDGKBENT: return put_user(vt_kdgkbent(kb->kbdmode, kbe.kb_index, kbe.kb_table), &user_kbe->kb_value); case KDSKBENT: if (!perm || !capable(CAP_SYS_TTY_CONFIG)) return -EPERM; return vt_kdskbent(kb->kbdmode, kbe.kb_index, kbe.kb_table, kbe.kb_value); } return 0; } static char *vt_kdskbsent(char *kbs, unsigned char cur) { static DECLARE_BITMAP(is_kmalloc, MAX_NR_FUNC); char *cur_f = func_table[cur]; if (cur_f && strlen(cur_f) >= strlen(kbs)) { strcpy(cur_f, kbs); return kbs; } func_table[cur] = kbs; return __test_and_set_bit(cur, is_kmalloc) ? cur_f : NULL; } int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm) { unsigned char kb_func; unsigned long flags; char *kbs; int ret; if (get_user(kb_func, &user_kdgkb->kb_func)) return -EFAULT; kb_func = array_index_nospec(kb_func, MAX_NR_FUNC); switch (cmd) { case KDGKBSENT: { /* size should have been a struct member */ ssize_t len = sizeof(user_kdgkb->kb_string); kbs = kmalloc(len, GFP_KERNEL); if (!kbs) return -ENOMEM; spin_lock_irqsave(&func_buf_lock, flags); len = strscpy(kbs, func_table[kb_func] ? : "", len); spin_unlock_irqrestore(&func_buf_lock, flags); if (len < 0) { ret = -ENOSPC; break; } ret = copy_to_user(user_kdgkb->kb_string, kbs, len + 1) ? -EFAULT : 0; break; } case KDSKBSENT: if (!perm || !capable(CAP_SYS_TTY_CONFIG)) return -EPERM; kbs = strndup_user(user_kdgkb->kb_string, sizeof(user_kdgkb->kb_string)); if (IS_ERR(kbs)) return PTR_ERR(kbs); spin_lock_irqsave(&func_buf_lock, flags); kbs = vt_kdskbsent(kbs, kb_func); spin_unlock_irqrestore(&func_buf_lock, flags); ret = 0; break; } kfree(kbs); return ret; } int vt_do_kdskled(unsigned int console, int cmd, unsigned long arg, int perm) { struct kbd_struct *kb = &kbd_table[console]; unsigned long flags; unsigned char ucval; switch(cmd) { /* the ioctls below read/set the flags usually shown in the leds */ /* don't use them - they will go away without warning */ case KDGKBLED: spin_lock_irqsave(&kbd_event_lock, flags); ucval = kb->ledflagstate | (kb->default_ledflagstate << 4); spin_unlock_irqrestore(&kbd_event_lock, flags); return put_user(ucval, (char __user *)arg); case KDSKBLED: if (!perm) return -EPERM; if (arg & ~0x77) return -EINVAL; spin_lock_irqsave(&led_lock, flags); kb->ledflagstate = (arg & 7); kb->default_ledflagstate = ((arg >> 4) & 7); set_leds(); spin_unlock_irqrestore(&led_lock, flags); return 0; /* the ioctls below only set the lights, not the functions */ /* for those, see KDGKBLED and KDSKBLED above */ case KDGETLED: ucval = getledstate(); return put_user(ucval, (char __user *)arg); case KDSETLED: if (!perm) return -EPERM; setledstate(kb, arg); return 0; } return -ENOIOCTLCMD; } int vt_do_kdgkbmode(unsigned int console) { struct kbd_struct *kb = &kbd_table[console]; /* This is a spot read so needs no locking */ switch (kb->kbdmode) { case VC_RAW: return K_RAW; case VC_MEDIUMRAW: return K_MEDIUMRAW; case VC_UNICODE: return K_UNICODE; case VC_OFF: return K_OFF; default: return K_XLATE; } } /** * vt_do_kdgkbmeta - report meta status * @console: console to report * * Report the meta flag status of this console */ int vt_do_kdgkbmeta(unsigned int console) { struct kbd_struct *kb = &kbd_table[console]; /* Again a spot read so no locking */ return vc_kbd_mode(kb, VC_META) ? K_ESCPREFIX : K_METABIT; } /** * vt_reset_unicode - reset the unicode status * @console: console being reset * * Restore the unicode console state to its default */ void vt_reset_unicode(unsigned int console) { unsigned long flags; spin_lock_irqsave(&kbd_event_lock, flags); kbd_table[console].kbdmode = default_utf8 ? VC_UNICODE : VC_XLATE; spin_unlock_irqrestore(&kbd_event_lock, flags); } /** * vt_get_shift_state - shift bit state * * Report the shift bits from the keyboard state. We have to export * this to support some oddities in the vt layer. */ int vt_get_shift_state(void) { /* Don't lock as this is a transient report */ return shift_state; } /** * vt_reset_keyboard - reset keyboard state * @console: console to reset * * Reset the keyboard bits for a console as part of a general console * reset event */ void vt_reset_keyboard(unsigned int console) { struct kbd_struct *kb = &kbd_table[console]; unsigned long flags; spin_lock_irqsave(&kbd_event_lock, flags); set_vc_kbd_mode(kb, VC_REPEAT); clr_vc_kbd_mode(kb, VC_CKMODE); clr_vc_kbd_mode(kb, VC_APPLIC); clr_vc_kbd_mode(kb, VC_CRLF); kb->lockstate = 0; kb->slockstate = 0; spin_lock(&led_lock); kb->ledmode = LED_SHOW_FLAGS; kb->ledflagstate = kb->default_ledflagstate; spin_unlock(&led_lock); /* do not do set_leds here because this causes an endless tasklet loop when the keyboard hasn't been initialized yet */ spin_unlock_irqrestore(&kbd_event_lock, flags); } /** * vt_get_kbd_mode_bit - read keyboard status bits * @console: console to read from * @bit: mode bit to read * * Report back a vt mode bit. We do this without locking so the * caller must be sure that there are no synchronization needs */ int vt_get_kbd_mode_bit(unsigned int console, int bit) { struct kbd_struct *kb = &kbd_table[console]; return vc_kbd_mode(kb, bit); } /** * vt_set_kbd_mode_bit - read keyboard status bits * @console: console to read from * @bit: mode bit to read * * Set a vt mode bit. We do this without locking so the * caller must be sure that there are no synchronization needs */ void vt_set_kbd_mode_bit(unsigned int console, int bit) { struct kbd_struct *kb = &kbd_table[console]; unsigned long flags; spin_lock_irqsave(&kbd_event_lock, flags); set_vc_kbd_mode(kb, bit); spin_unlock_irqrestore(&kbd_event_lock, flags); } /** * vt_clr_kbd_mode_bit - read keyboard status bits * @console: console to read from * @bit: mode bit to read * * Report back a vt mode bit. We do this without locking so the * caller must be sure that there are no synchronization needs */ void vt_clr_kbd_mode_bit(unsigned int console, int bit) { struct kbd_struct *kb = &kbd_table[console]; unsigned long flags; spin_lock_irqsave(&kbd_event_lock, flags); clr_vc_kbd_mode(kb, bit); spin_unlock_irqrestore(&kbd_event_lock, flags); }
4 1 1 6 6 4 3 3 4 1 1 4 6 2 4 4 4 4 4 4 2 1 3 1 14 1 1 1 2 1 1 1 1 1 1 1 1 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 // SPDX-License-Identifier: GPL-2.0-only /* * slip.c This module implements the SLIP protocol for kernel-based * devices like TTY. It interfaces between a raw TTY, and the * kernel's INET protocol layers. * * Version: @(#)slip.c 0.8.3 12/24/94 * * Authors: Laurence Culhane, <loz@holmes.demon.co.uk> * Fred N. van Kempen, <waltje@uwalt.nl.mugnet.org> * * Fixes: * Alan Cox : Sanity checks and avoid tx overruns. * Has a new sl->mtu field. * Alan Cox : Found cause of overrun. ifconfig sl0 * mtu upwards. Driver now spots this * and grows/shrinks its buffers(hack!). * Memory leak if you run out of memory * setting up a slip driver fixed. * Matt Dillon : Printable slip (borrowed from NET2E) * Pauline Middelink : Slip driver fixes. * Alan Cox : Honours the old SL_COMPRESSED flag * Alan Cox : KISS AX.25 and AXUI IP support * Michael Riepe : Automatic CSLIP recognition added * Charles Hedrick : CSLIP header length problem fix. * Alan Cox : Corrected non-IP cases of the above. * Alan Cox : Now uses hardware type as per FvK. * Alan Cox : Default to 192.168.0.0 (RFC 1597) * A.N.Kuznetsov : dev_tint() recursion fix. * Dmitry Gorodchanin : SLIP memory leaks * Dmitry Gorodchanin : Code cleanup. Reduce tty driver * buffering from 4096 to 256 bytes. * Improving SLIP response time. * CONFIG_SLIP_MODE_SLIP6. * ifconfig sl? up & down now works * correctly. * Modularization. * Alan Cox : Oops - fix AX.25 buffer lengths * Dmitry Gorodchanin : Even more cleanups. Preserve CSLIP * statistics. Include CSLIP code only * if it really needed. * Alan Cox : Free slhc buffers in the right place. * Alan Cox : Allow for digipeated IP over AX.25 * Matti Aarnio : Dynamic SLIP devices, with ideas taken * from Jim Freeman's <jfree@caldera.com> * dynamic PPP devices. We do NOT kfree() * device entries, just reg./unreg. them * as they are needed. We kfree() them * at module cleanup. * With MODULE-loading ``insmod'', user * can issue parameter: slip_maxdev=1024 * (Or how much he/she wants.. Default * is 256) * Stanislav Voronyi : Slip line checking, with ideas taken * from multislip BSDI driver which was * written by Igor Chechik, RELCOM Corp. * Only algorithms have been ported to * Linux SLIP driver. * Vitaly E. Lavrov : Sane behaviour on tty hangup. * Alexey Kuznetsov : Cleanup interfaces to tty & netdevice * modules. */ #define SL_CHECK_TRANSMIT #include <linux/compat.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/uaccess.h> #include <linux/bitops.h> #include <linux/sched/signal.h> #include <linux/string.h> #include <linux/mm.h> #include <linux/interrupt.h> #include <linux/in.h> #include <linux/tty.h> #include <linux/errno.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/skbuff.h> #include <linux/rtnetlink.h> #include <linux/if_arp.h> #include <linux/if_slip.h> #include <linux/delay.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/workqueue.h> #include "slip.h" #ifdef CONFIG_INET #include <linux/ip.h> #include <linux/tcp.h> #include <net/slhc_vj.h> #endif #define SLIP_VERSION "0.8.4-NET3.019-NEWTTY" static struct net_device **slip_devs; static int slip_maxdev = SL_NRUNIT; module_param(slip_maxdev, int, 0); MODULE_PARM_DESC(slip_maxdev, "Maximum number of slip devices"); static int slip_esc(unsigned char *p, unsigned char *d, int len); static void slip_unesc(struct slip *sl, unsigned char c); #ifdef CONFIG_SLIP_MODE_SLIP6 static int slip_esc6(unsigned char *p, unsigned char *d, int len); static void slip_unesc6(struct slip *sl, unsigned char c); #endif #ifdef CONFIG_SLIP_SMART static void sl_keepalive(struct timer_list *t); static void sl_outfill(struct timer_list *t); static int sl_siocdevprivate(struct net_device *dev, struct ifreq *rq, void __user *data, int cmd); #endif /******************************** * Buffer administration routines: * sl_alloc_bufs() * sl_free_bufs() * sl_realloc_bufs() * * NOTE: sl_realloc_bufs != sl_free_bufs + sl_alloc_bufs, because * sl_realloc_bufs provides strong atomicity and reallocation * on actively running device. *********************************/ /* Allocate channel buffers. */ static int sl_alloc_bufs(struct slip *sl, int mtu) { int err = -ENOBUFS; unsigned long len; char *rbuff = NULL; char *xbuff = NULL; #ifdef SL_INCLUDE_CSLIP char *cbuff = NULL; struct slcompress *slcomp = NULL; #endif /* * Allocate the SLIP frame buffers: * * rbuff Receive buffer. * xbuff Transmit buffer. * cbuff Temporary compression buffer. */ len = mtu * 2; /* * allow for arrival of larger UDP packets, even if we say not to * also fixes a bug in which SunOS sends 512-byte packets even with * an MSS of 128 */ if (len < 576 * 2) len = 576 * 2; rbuff = kmalloc(len + 4, GFP_KERNEL); if (rbuff == NULL) goto err_exit; xbuff = kmalloc(len + 4, GFP_KERNEL); if (xbuff == NULL) goto err_exit; #ifdef SL_INCLUDE_CSLIP cbuff = kmalloc(len + 4, GFP_KERNEL); if (cbuff == NULL) goto err_exit; slcomp = slhc_init(16, 16); if (IS_ERR(slcomp)) goto err_exit; #endif spin_lock_bh(&sl->lock); if (sl->tty == NULL) { spin_unlock_bh(&sl->lock); err = -ENODEV; goto err_exit; } sl->mtu = mtu; sl->buffsize = len; sl->rcount = 0; sl->xleft = 0; rbuff = xchg(&sl->rbuff, rbuff); xbuff = xchg(&sl->xbuff, xbuff); #ifdef SL_INCLUDE_CSLIP cbuff = xchg(&sl->cbuff, cbuff); slcomp = xchg(&sl->slcomp, slcomp); #endif #ifdef CONFIG_SLIP_MODE_SLIP6 sl->xdata = 0; sl->xbits = 0; #endif spin_unlock_bh(&sl->lock); err = 0; /* Cleanup */ err_exit: #ifdef SL_INCLUDE_CSLIP kfree(cbuff); slhc_free(slcomp); #endif kfree(xbuff); kfree(rbuff); return err; } /* Free a SLIP channel buffers. */ static void sl_free_bufs(struct slip *sl) { /* Free all SLIP frame buffers. */ kfree(xchg(&sl->rbuff, NULL)); kfree(xchg(&sl->xbuff, NULL)); #ifdef SL_INCLUDE_CSLIP kfree(xchg(&sl->cbuff, NULL)); slhc_free(xchg(&sl->slcomp, NULL)); #endif } /* Reallocate slip channel buffers. */ static int sl_realloc_bufs(struct slip *sl, int mtu) { int err = 0; struct net_device *dev = sl->dev; unsigned char *xbuff, *rbuff; #ifdef SL_INCLUDE_CSLIP unsigned char *cbuff; #endif int len = mtu * 2; /* * allow for arrival of larger UDP packets, even if we say not to * also fixes a bug in which SunOS sends 512-byte packets even with * an MSS of 128 */ if (len < 576 * 2) len = 576 * 2; xbuff = kmalloc(len + 4, GFP_ATOMIC); rbuff = kmalloc(len + 4, GFP_ATOMIC); #ifdef SL_INCLUDE_CSLIP cbuff = kmalloc(len + 4, GFP_ATOMIC); #endif #ifdef SL_INCLUDE_CSLIP if (xbuff == NULL || rbuff == NULL || cbuff == NULL) { #else if (xbuff == NULL || rbuff == NULL) { #endif if (mtu > sl->mtu) { printk(KERN_WARNING "%s: unable to grow slip buffers, MTU change cancelled.\n", dev->name); err = -ENOBUFS; } goto done; } spin_lock_bh(&sl->lock); err = -ENODEV; if (sl->tty == NULL) goto done_on_bh; xbuff = xchg(&sl->xbuff, xbuff); rbuff = xchg(&sl->rbuff, rbuff); #ifdef SL_INCLUDE_CSLIP cbuff = xchg(&sl->cbuff, cbuff); #endif if (sl->xleft) { if (sl->xleft <= len) { memcpy(sl->xbuff, sl->xhead, sl->xleft); } else { sl->xleft = 0; dev->stats.tx_dropped++; } } sl->xhead = sl->xbuff; if (sl->rcount) { if (sl->rcount <= len) { memcpy(sl->rbuff, rbuff, sl->rcount); } else { sl->rcount = 0; dev->stats.rx_over_errors++; set_bit(SLF_ERROR, &sl->flags); } } sl->mtu = mtu; WRITE_ONCE(dev->mtu, mtu); sl->buffsize = len; err = 0; done_on_bh: spin_unlock_bh(&sl->lock); done: kfree(xbuff); kfree(rbuff); #ifdef SL_INCLUDE_CSLIP kfree(cbuff); #endif return err; } /* Set the "sending" flag. This must be atomic hence the set_bit. */ static inline void sl_lock(struct slip *sl) { netif_stop_queue(sl->dev); } /* Clear the "sending" flag. This must be atomic, hence the ASM. */ static inline void sl_unlock(struct slip *sl) { netif_wake_queue(sl->dev); } /* Send one completely decapsulated IP datagram to the IP layer. */ static void sl_bump(struct slip *sl) { struct net_device *dev = sl->dev; struct sk_buff *skb; int count; count = sl->rcount; #ifdef SL_INCLUDE_CSLIP if (sl->mode & (SL_MODE_ADAPTIVE | SL_MODE_CSLIP)) { unsigned char c = sl->rbuff[0]; if (c & SL_TYPE_COMPRESSED_TCP) { /* ignore compressed packets when CSLIP is off */ if (!(sl->mode & SL_MODE_CSLIP)) { printk(KERN_WARNING "%s: compressed packet ignored\n", dev->name); return; } /* make sure we've reserved enough space for uncompress to use */ if (count + 80 > sl->buffsize) { dev->stats.rx_over_errors++; return; } count = slhc_uncompress(sl->slcomp, sl->rbuff, count); if (count <= 0) return; } else if (c >= SL_TYPE_UNCOMPRESSED_TCP) { if (!(sl->mode & SL_MODE_CSLIP)) { /* turn on header compression */ sl->mode |= SL_MODE_CSLIP; sl->mode &= ~SL_MODE_ADAPTIVE; printk(KERN_INFO "%s: header compression turned on\n", dev->name); } sl->rbuff[0] &= 0x4f; if (slhc_remember(sl->slcomp, sl->rbuff, count) <= 0) return; } } #endif /* SL_INCLUDE_CSLIP */ dev->stats.rx_bytes += count; skb = dev_alloc_skb(count); if (skb == NULL) { printk(KERN_WARNING "%s: memory squeeze, dropping packet.\n", dev->name); dev->stats.rx_dropped++; return; } skb->dev = dev; skb_put_data(skb, sl->rbuff, count); skb_reset_mac_header(skb); skb->protocol = htons(ETH_P_IP); netif_rx(skb); dev->stats.rx_packets++; } /* Encapsulate one IP datagram and stuff into a TTY queue. */ static void sl_encaps(struct slip *sl, unsigned char *icp, int len) { unsigned char *p; int actual, count; if (len > sl->mtu) { /* Sigh, shouldn't occur BUT ... */ printk(KERN_WARNING "%s: truncating oversized transmit packet!\n", sl->dev->name); sl->dev->stats.tx_dropped++; sl_unlock(sl); return; } p = icp; #ifdef SL_INCLUDE_CSLIP if (sl->mode & SL_MODE_CSLIP) len = slhc_compress(sl->slcomp, p, len, sl->cbuff, &p, 1); #endif #ifdef CONFIG_SLIP_MODE_SLIP6 if (sl->mode & SL_MODE_SLIP6) count = slip_esc6(p, sl->xbuff, len); else #endif count = slip_esc(p, sl->xbuff, len); /* Order of next two lines is *very* important. * When we are sending a little amount of data, * the transfer may be completed inside the ops->write() * routine, because it's running with interrupts enabled. * In this case we *never* got WRITE_WAKEUP event, * if we did not request it before write operation. * 14 Oct 1994 Dmitry Gorodchanin. */ set_bit(TTY_DO_WRITE_WAKEUP, &sl->tty->flags); actual = sl->tty->ops->write(sl->tty, sl->xbuff, count); #ifdef SL_CHECK_TRANSMIT netif_trans_update(sl->dev); #endif sl->xleft = count - actual; sl->xhead = sl->xbuff + actual; #ifdef CONFIG_SLIP_SMART /* VSV */ clear_bit(SLF_OUTWAIT, &sl->flags); /* reset outfill flag */ #endif } /* Write out any remaining transmit buffer. Scheduled when tty is writable */ static void slip_transmit(struct work_struct *work) { struct slip *sl = container_of(work, struct slip, tx_work); int actual; spin_lock_bh(&sl->lock); /* First make sure we're connected. */ if (!sl->tty || sl->magic != SLIP_MAGIC || !netif_running(sl->dev)) { spin_unlock_bh(&sl->lock); return; } if (sl->xleft <= 0) { /* Now serial buffer is almost free & we can start * transmission of another packet */ sl->dev->stats.tx_packets++; clear_bit(TTY_DO_WRITE_WAKEUP, &sl->tty->flags); spin_unlock_bh(&sl->lock); sl_unlock(sl); return; } actual = sl->tty->ops->write(sl->tty, sl->xhead, sl->xleft); sl->xleft -= actual; sl->xhead += actual; spin_unlock_bh(&sl->lock); } /* * Called by the driver when there's room for more data. * Schedule the transmit. */ static void slip_write_wakeup(struct tty_struct *tty) { struct slip *sl; rcu_read_lock(); sl = rcu_dereference(tty->disc_data); if (sl) schedule_work(&sl->tx_work); rcu_read_unlock(); } static void sl_tx_timeout(struct net_device *dev, unsigned int txqueue) { struct slip *sl = netdev_priv(dev); spin_lock(&sl->lock); if (netif_queue_stopped(dev)) { if (!netif_running(dev) || !sl->tty) goto out; /* May be we must check transmitter timeout here ? * 14 Oct 1994 Dmitry Gorodchanin. */ #ifdef SL_CHECK_TRANSMIT if (time_before(jiffies, dev_trans_start(dev) + 20 * HZ)) { /* 20 sec timeout not reached */ goto out; } printk(KERN_WARNING "%s: transmit timed out, %s?\n", dev->name, (tty_chars_in_buffer(sl->tty) || sl->xleft) ? "bad line quality" : "driver error"); sl->xleft = 0; clear_bit(TTY_DO_WRITE_WAKEUP, &sl->tty->flags); sl_unlock(sl); #endif } out: spin_unlock(&sl->lock); } /* Encapsulate an IP datagram and kick it into a TTY queue. */ static netdev_tx_t sl_xmit(struct sk_buff *skb, struct net_device *dev) { struct slip *sl = netdev_priv(dev); spin_lock(&sl->lock); if (!netif_running(dev)) { spin_unlock(&sl->lock); printk(KERN_WARNING "%s: xmit call when iface is down\n", dev->name); dev_kfree_skb(skb); return NETDEV_TX_OK; } if (sl->tty == NULL) { spin_unlock(&sl->lock); dev_kfree_skb(skb); return NETDEV_TX_OK; } sl_lock(sl); dev->stats.tx_bytes += skb->len; sl_encaps(sl, skb->data, skb->len); spin_unlock(&sl->lock); dev_kfree_skb(skb); return NETDEV_TX_OK; } /****************************************** * Routines looking at netdevice side. ******************************************/ /* Netdevice UP -> DOWN routine */ static int sl_close(struct net_device *dev) { struct slip *sl = netdev_priv(dev); spin_lock_bh(&sl->lock); if (sl->tty) /* TTY discipline is running. */ clear_bit(TTY_DO_WRITE_WAKEUP, &sl->tty->flags); netif_stop_queue(dev); sl->rcount = 0; sl->xleft = 0; spin_unlock_bh(&sl->lock); return 0; } /* Netdevice DOWN -> UP routine */ static int sl_open(struct net_device *dev) { struct slip *sl = netdev_priv(dev); if (sl->tty == NULL) return -ENODEV; sl->flags &= (1 << SLF_INUSE); netif_start_queue(dev); return 0; } /* Netdevice change MTU request */ static int sl_change_mtu(struct net_device *dev, int new_mtu) { struct slip *sl = netdev_priv(dev); return sl_realloc_bufs(sl, new_mtu); } /* Netdevice get statistics request */ static void sl_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) { struct net_device_stats *devstats = &dev->stats; #ifdef SL_INCLUDE_CSLIP struct slip *sl = netdev_priv(dev); struct slcompress *comp = sl->slcomp; #endif stats->rx_packets = devstats->rx_packets; stats->tx_packets = devstats->tx_packets; stats->rx_bytes = devstats->rx_bytes; stats->tx_bytes = devstats->tx_bytes; stats->rx_dropped = devstats->rx_dropped; stats->tx_dropped = devstats->tx_dropped; stats->tx_errors = devstats->tx_errors; stats->rx_errors = devstats->rx_errors; stats->rx_over_errors = devstats->rx_over_errors; #ifdef SL_INCLUDE_CSLIP if (comp) { /* Generic compressed statistics */ stats->rx_compressed = comp->sls_i_compressed; stats->tx_compressed = comp->sls_o_compressed; /* Are we really still needs this? */ stats->rx_fifo_errors += comp->sls_i_compressed; stats->rx_dropped += comp->sls_i_tossed; stats->tx_fifo_errors += comp->sls_o_compressed; stats->collisions += comp->sls_o_misses; } #endif } /* Netdevice register callback */ static int sl_init(struct net_device *dev) { struct slip *sl = netdev_priv(dev); /* * Finish setting up the DEVICE info. */ dev->mtu = sl->mtu; dev->type = ARPHRD_SLIP + sl->mode; #ifdef SL_CHECK_TRANSMIT dev->watchdog_timeo = 20*HZ; #endif return 0; } static void sl_uninit(struct net_device *dev) { struct slip *sl = netdev_priv(dev); sl_free_bufs(sl); } /* Hook the destructor so we can free slip devices at the right point in time */ static void sl_free_netdev(struct net_device *dev) { int i = dev->base_addr; slip_devs[i] = NULL; } static const struct net_device_ops sl_netdev_ops = { .ndo_init = sl_init, .ndo_uninit = sl_uninit, .ndo_open = sl_open, .ndo_stop = sl_close, .ndo_start_xmit = sl_xmit, .ndo_get_stats64 = sl_get_stats64, .ndo_change_mtu = sl_change_mtu, .ndo_tx_timeout = sl_tx_timeout, #ifdef CONFIG_SLIP_SMART .ndo_siocdevprivate = sl_siocdevprivate, #endif }; static void sl_setup(struct net_device *dev) { dev->netdev_ops = &sl_netdev_ops; dev->needs_free_netdev = true; dev->priv_destructor = sl_free_netdev; dev->hard_header_len = 0; dev->addr_len = 0; dev->tx_queue_len = 10; /* MTU range: 68 - 65534 */ dev->min_mtu = 68; dev->max_mtu = 65534; /* New-style flags. */ dev->flags = IFF_NOARP|IFF_POINTOPOINT|IFF_MULTICAST; } /****************************************** Routines looking at TTY side. ******************************************/ /* * Handle the 'receiver data ready' interrupt. * This function is called by the 'tty_io' module in the kernel when * a block of SLIP data has been received, which can now be decapsulated * and sent on to some IP layer for further processing. This will not * be re-entered while running but other ldisc functions may be called * in parallel */ static void slip_receive_buf(struct tty_struct *tty, const u8 *cp, const u8 *fp, size_t count) { struct slip *sl = tty->disc_data; if (!sl || sl->magic != SLIP_MAGIC || !netif_running(sl->dev)) return; /* Read the characters out of the buffer */ while (count--) { if (fp && *fp++) { if (!test_and_set_bit(SLF_ERROR, &sl->flags)) sl->dev->stats.rx_errors++; cp++; continue; } #ifdef CONFIG_SLIP_MODE_SLIP6 if (sl->mode & SL_MODE_SLIP6) slip_unesc6(sl, *cp++); else #endif slip_unesc(sl, *cp++); } } /************************************ * slip_open helper routines. ************************************/ /* Collect hanged up channels */ static void sl_sync(void) { int i; struct net_device *dev; struct slip *sl; for (i = 0; i < slip_maxdev; i++) { dev = slip_devs[i]; if (dev == NULL) break; sl = netdev_priv(dev); if (sl->tty || sl->leased) continue; if (dev->flags & IFF_UP) dev_close(dev); } } /* Find a free SLIP channel, and link in this `tty' line. */ static struct slip *sl_alloc(void) { int i; char name[IFNAMSIZ]; struct net_device *dev = NULL; struct slip *sl; for (i = 0; i < slip_maxdev; i++) { dev = slip_devs[i]; if (dev == NULL) break; } /* Sorry, too many, all slots in use */ if (i >= slip_maxdev) return NULL; sprintf(name, "sl%d", i); dev = alloc_netdev(sizeof(*sl), name, NET_NAME_UNKNOWN, sl_setup); if (!dev) return NULL; dev->base_addr = i; sl = netdev_priv(dev); /* Initialize channel control data */ sl->magic = SLIP_MAGIC; sl->dev = dev; spin_lock_init(&sl->lock); INIT_WORK(&sl->tx_work, slip_transmit); sl->mode = SL_MODE_DEFAULT; #ifdef CONFIG_SLIP_SMART /* initialize timer_list struct */ timer_setup(&sl->keepalive_timer, sl_keepalive, 0); timer_setup(&sl->outfill_timer, sl_outfill, 0); #endif slip_devs[i] = dev; return sl; } /* * Open the high-level part of the SLIP channel. * This function is called by the TTY module when the * SLIP line discipline is called for. Because we are * sure the tty line exists, we only have to link it to * a free SLIP channel... * * Called in process context serialized from other ldisc calls. */ static int slip_open(struct tty_struct *tty) { struct slip *sl; int err; if (!capable(CAP_NET_ADMIN)) return -EPERM; if (tty->ops->write == NULL) return -EOPNOTSUPP; /* RTnetlink lock is misused here to serialize concurrent opens of slip channels. There are better ways, but it is the simplest one. */ rtnl_lock(); /* Collect hanged up channels. */ sl_sync(); sl = tty->disc_data; err = -EEXIST; /* First make sure we're not already connected. */ if (sl && sl->magic == SLIP_MAGIC) goto err_exit; /* OK. Find a free SLIP channel to use. */ err = -ENFILE; sl = sl_alloc(); if (sl == NULL) goto err_exit; sl->tty = tty; tty->disc_data = sl; sl->pid = current->pid; if (!test_bit(SLF_INUSE, &sl->flags)) { /* Perform the low-level SLIP initialization. */ err = sl_alloc_bufs(sl, SL_MTU); if (err) goto err_free_chan; set_bit(SLF_INUSE, &sl->flags); err = register_netdevice(sl->dev); if (err) goto err_free_bufs; } #ifdef CONFIG_SLIP_SMART if (sl->keepalive) { sl->keepalive_timer.expires = jiffies + sl->keepalive * HZ; add_timer(&sl->keepalive_timer); } if (sl->outfill) { sl->outfill_timer.expires = jiffies + sl->outfill * HZ; add_timer(&sl->outfill_timer); } #endif /* Done. We have linked the TTY line to a channel. */ rtnl_unlock(); tty->receive_room = 65536; /* We don't flow control */ /* TTY layer expects 0 on success */ return 0; err_free_bufs: sl_free_bufs(sl); err_free_chan: sl->tty = NULL; tty->disc_data = NULL; clear_bit(SLF_INUSE, &sl->flags); sl_free_netdev(sl->dev); /* do not call free_netdev before rtnl_unlock */ rtnl_unlock(); free_netdev(sl->dev); return err; err_exit: rtnl_unlock(); /* Count references from TTY module */ return err; } /* * Close down a SLIP channel. * This means flushing out any pending queues, and then returning. This * call is serialized against other ldisc functions. * * We also use this method fo a hangup event */ static void slip_close(struct tty_struct *tty) { struct slip *sl = tty->disc_data; /* First make sure we're connected. */ if (!sl || sl->magic != SLIP_MAGIC || sl->tty != tty) return; spin_lock_bh(&sl->lock); rcu_assign_pointer(tty->disc_data, NULL); sl->tty = NULL; spin_unlock_bh(&sl->lock); synchronize_rcu(); flush_work(&sl->tx_work); /* VSV = very important to remove timers */ #ifdef CONFIG_SLIP_SMART del_timer_sync(&sl->keepalive_timer); del_timer_sync(&sl->outfill_timer); #endif /* Flush network side */ unregister_netdev(sl->dev); /* This will complete via sl_free_netdev */ } static void slip_hangup(struct tty_struct *tty) { slip_close(tty); } /************************************************************************ * STANDARD SLIP ENCAPSULATION * ************************************************************************/ static int slip_esc(unsigned char *s, unsigned char *d, int len) { unsigned char *ptr = d; unsigned char c; /* * Send an initial END character to flush out any * data that may have accumulated in the receiver * due to line noise. */ *ptr++ = END; /* * For each byte in the packet, send the appropriate * character sequence, according to the SLIP protocol. */ while (len-- > 0) { switch (c = *s++) { case END: *ptr++ = ESC; *ptr++ = ESC_END; break; case ESC: *ptr++ = ESC; *ptr++ = ESC_ESC; break; default: *ptr++ = c; break; } } *ptr++ = END; return ptr - d; } static void slip_unesc(struct slip *sl, unsigned char s) { switch (s) { case END: #ifdef CONFIG_SLIP_SMART /* drop keeptest bit = VSV */ if (test_bit(SLF_KEEPTEST, &sl->flags)) clear_bit(SLF_KEEPTEST, &sl->flags); #endif if (!test_and_clear_bit(SLF_ERROR, &sl->flags) && (sl->rcount > 2)) sl_bump(sl); clear_bit(SLF_ESCAPE, &sl->flags); sl->rcount = 0; return; case ESC: set_bit(SLF_ESCAPE, &sl->flags); return; case ESC_ESC: if (test_and_clear_bit(SLF_ESCAPE, &sl->flags)) s = ESC; break; case ESC_END: if (test_and_clear_bit(SLF_ESCAPE, &sl->flags)) s = END; break; } if (!test_bit(SLF_ERROR, &sl->flags)) { if (sl->rcount < sl->buffsize) { sl->rbuff[sl->rcount++] = s; return; } sl->dev->stats.rx_over_errors++; set_bit(SLF_ERROR, &sl->flags); } } #ifdef CONFIG_SLIP_MODE_SLIP6 /************************************************************************ * 6 BIT SLIP ENCAPSULATION * ************************************************************************/ static int slip_esc6(unsigned char *s, unsigned char *d, int len) { unsigned char *ptr = d; unsigned char c; int i; unsigned short v = 0; short bits = 0; /* * Send an initial END character to flush out any * data that may have accumulated in the receiver * due to line noise. */ *ptr++ = 0x70; /* * Encode the packet into printable ascii characters */ for (i = 0; i < len; ++i) { v = (v << 8) | s[i]; bits += 8; while (bits >= 6) { bits -= 6; c = 0x30 + ((v >> bits) & 0x3F); *ptr++ = c; } } if (bits) { c = 0x30 + ((v << (6 - bits)) & 0x3F); *ptr++ = c; } *ptr++ = 0x70; return ptr - d; } static void slip_unesc6(struct slip *sl, unsigned char s) { unsigned char c; if (s == 0x70) { #ifdef CONFIG_SLIP_SMART /* drop keeptest bit = VSV */ if (test_bit(SLF_KEEPTEST, &sl->flags)) clear_bit(SLF_KEEPTEST, &sl->flags); #endif if (!test_and_clear_bit(SLF_ERROR, &sl->flags) && (sl->rcount > 2)) sl_bump(sl); sl->rcount = 0; sl->xbits = 0; sl->xdata = 0; } else if (s >= 0x30 && s < 0x70) { sl->xdata = (sl->xdata << 6) | ((s - 0x30) & 0x3F); sl->xbits += 6; if (sl->xbits >= 8) { sl->xbits -= 8; c = (unsigned char)(sl->xdata >> sl->xbits); if (!test_bit(SLF_ERROR, &sl->flags)) { if (sl->rcount < sl->buffsize) { sl->rbuff[sl->rcount++] = c; return; } sl->dev->stats.rx_over_errors++; set_bit(SLF_ERROR, &sl->flags); } } } } #endif /* CONFIG_SLIP_MODE_SLIP6 */ /* Perform I/O control on an active SLIP channel. */ static int slip_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg) { struct slip *sl = tty->disc_data; unsigned int tmp; int __user *p = (int __user *)arg; /* First make sure we're connected. */ if (!sl || sl->magic != SLIP_MAGIC) return -EINVAL; switch (cmd) { case SIOCGIFNAME: tmp = strlen(sl->dev->name) + 1; if (copy_to_user((void __user *)arg, sl->dev->name, tmp)) return -EFAULT; return 0; case SIOCGIFENCAP: if (put_user(sl->mode, p)) return -EFAULT; return 0; case SIOCSIFENCAP: if (get_user(tmp, p)) return -EFAULT; #ifndef SL_INCLUDE_CSLIP if (tmp & (SL_MODE_CSLIP|SL_MODE_ADAPTIVE)) return -EINVAL; #else if ((tmp & (SL_MODE_ADAPTIVE | SL_MODE_CSLIP)) == (SL_MODE_ADAPTIVE | SL_MODE_CSLIP)) /* return -EINVAL; */ tmp &= ~SL_MODE_ADAPTIVE; #endif #ifndef CONFIG_SLIP_MODE_SLIP6 if (tmp & SL_MODE_SLIP6) return -EINVAL; #endif sl->mode = tmp; sl->dev->type = ARPHRD_SLIP + sl->mode; return 0; case SIOCSIFHWADDR: return -EINVAL; #ifdef CONFIG_SLIP_SMART /* VSV changes start here */ case SIOCSKEEPALIVE: if (get_user(tmp, p)) return -EFAULT; if (tmp > 255) /* max for unchar */ return -EINVAL; spin_lock_bh(&sl->lock); if (!sl->tty) { spin_unlock_bh(&sl->lock); return -ENODEV; } sl->keepalive = (u8)tmp; if (sl->keepalive != 0) { mod_timer(&sl->keepalive_timer, jiffies + sl->keepalive * HZ); set_bit(SLF_KEEPTEST, &sl->flags); } else del_timer(&sl->keepalive_timer); spin_unlock_bh(&sl->lock); return 0; case SIOCGKEEPALIVE: if (put_user(sl->keepalive, p)) return -EFAULT; return 0; case SIOCSOUTFILL: if (get_user(tmp, p)) return -EFAULT; if (tmp > 255) /* max for unchar */ return -EINVAL; spin_lock_bh(&sl->lock); if (!sl->tty) { spin_unlock_bh(&sl->lock); return -ENODEV; } sl->outfill = (u8)tmp; if (sl->outfill != 0) { mod_timer(&sl->outfill_timer, jiffies + sl->outfill * HZ); set_bit(SLF_OUTWAIT, &sl->flags); } else del_timer(&sl->outfill_timer); spin_unlock_bh(&sl->lock); return 0; case SIOCGOUTFILL: if (put_user(sl->outfill, p)) return -EFAULT; return 0; /* VSV changes end */ #endif default: return tty_mode_ioctl(tty, cmd, arg); } } /* VSV changes start here */ #ifdef CONFIG_SLIP_SMART /* function sl_siocdevprivate called from net/core/dev.c to allow get/set outfill/keepalive parameter by ifconfig */ static int sl_siocdevprivate(struct net_device *dev, struct ifreq *rq, void __user *data, int cmd) { struct slip *sl = netdev_priv(dev); unsigned long *p = (unsigned long *)&rq->ifr_ifru; if (sl == NULL) /* Allocation failed ?? */ return -ENODEV; if (in_compat_syscall()) return -EOPNOTSUPP; spin_lock_bh(&sl->lock); if (!sl->tty) { spin_unlock_bh(&sl->lock); return -ENODEV; } switch (cmd) { case SIOCSKEEPALIVE: /* max for unchar */ if ((unsigned)*p > 255) { spin_unlock_bh(&sl->lock); return -EINVAL; } sl->keepalive = (u8)*p; if (sl->keepalive != 0) { sl->keepalive_timer.expires = jiffies + sl->keepalive * HZ; mod_timer(&sl->keepalive_timer, jiffies + sl->keepalive * HZ); set_bit(SLF_KEEPTEST, &sl->flags); } else del_timer(&sl->keepalive_timer); break; case SIOCGKEEPALIVE: *p = sl->keepalive; break; case SIOCSOUTFILL: if ((unsigned)*p > 255) { /* max for unchar */ spin_unlock_bh(&sl->lock); return -EINVAL; } sl->outfill = (u8)*p; if (sl->outfill != 0) { mod_timer(&sl->outfill_timer, jiffies + sl->outfill * HZ); set_bit(SLF_OUTWAIT, &sl->flags); } else del_timer(&sl->outfill_timer); break; case SIOCGOUTFILL: *p = sl->outfill; break; case SIOCSLEASE: /* Resolve race condition, when ioctl'ing hanged up and opened by another process device. */ if (sl->tty != current->signal->tty && sl->pid != current->pid) { spin_unlock_bh(&sl->lock); return -EPERM; } sl->leased = 0; if (*p) sl->leased = 1; break; case SIOCGLEASE: *p = sl->leased; } spin_unlock_bh(&sl->lock); return 0; } #endif /* VSV changes end */ static struct tty_ldisc_ops sl_ldisc = { .owner = THIS_MODULE, .num = N_SLIP, .name = "slip", .open = slip_open, .close = slip_close, .hangup = slip_hangup, .ioctl = slip_ioctl, .receive_buf = slip_receive_buf, .write_wakeup = slip_write_wakeup, }; static int __init slip_init(void) { int status; if (slip_maxdev < 4) slip_maxdev = 4; /* Sanity */ printk(KERN_INFO "SLIP: version %s (dynamic channels, max=%d)" #ifdef CONFIG_SLIP_MODE_SLIP6 " (6 bit encapsulation enabled)" #endif ".\n", SLIP_VERSION, slip_maxdev); #if defined(SL_INCLUDE_CSLIP) printk(KERN_INFO "CSLIP: code copyright 1989 Regents of the University of California.\n"); #endif #ifdef CONFIG_SLIP_SMART printk(KERN_INFO "SLIP linefill/keepalive option.\n"); #endif slip_devs = kcalloc(slip_maxdev, sizeof(struct net_device *), GFP_KERNEL); if (!slip_devs) return -ENOMEM; /* Fill in our line protocol discipline, and register it */ status = tty_register_ldisc(&sl_ldisc); if (status != 0) { printk(KERN_ERR "SLIP: can't register line discipline (err = %d)\n", status); kfree(slip_devs); } return status; } static void __exit slip_exit(void) { int i; struct net_device *dev; struct slip *sl; unsigned long timeout = jiffies + HZ; int busy = 0; if (slip_devs == NULL) return; /* First of all: check for active disciplines and hangup them. */ do { if (busy) msleep_interruptible(100); busy = 0; for (i = 0; i < slip_maxdev; i++) { dev = slip_devs[i]; if (!dev) continue; sl = netdev_priv(dev); spin_lock_bh(&sl->lock); if (sl->tty) { busy++; tty_hangup(sl->tty); } spin_unlock_bh(&sl->lock); } } while (busy && time_before(jiffies, timeout)); /* FIXME: hangup is async so we should wait when doing this second phase */ for (i = 0; i < slip_maxdev; i++) { dev = slip_devs[i]; if (!dev) continue; slip_devs[i] = NULL; sl = netdev_priv(dev); if (sl->tty) { printk(KERN_ERR "%s: tty discipline still running\n", dev->name); } unregister_netdev(dev); } kfree(slip_devs); slip_devs = NULL; tty_unregister_ldisc(&sl_ldisc); } module_init(slip_init); module_exit(slip_exit); #ifdef CONFIG_SLIP_SMART /* * This is start of the code for multislip style line checking * added by Stanislav Voronyi. All changes before marked VSV */ static void sl_outfill(struct timer_list *t) { struct slip *sl = from_timer(sl, t, outfill_timer); spin_lock(&sl->lock); if (sl->tty == NULL) goto out; if (sl->outfill) { if (test_bit(SLF_OUTWAIT, &sl->flags)) { /* no packets were transmitted, do outfill */ #ifdef CONFIG_SLIP_MODE_SLIP6 unsigned char s = (sl->mode & SL_MODE_SLIP6)?0x70:END; #else unsigned char s = END; #endif /* put END into tty queue. Is it right ??? */ if (!netif_queue_stopped(sl->dev)) { /* if device busy no outfill */ sl->tty->ops->write(sl->tty, &s, 1); } } else set_bit(SLF_OUTWAIT, &sl->flags); mod_timer(&sl->outfill_timer, jiffies+sl->outfill*HZ); } out: spin_unlock(&sl->lock); } static void sl_keepalive(struct timer_list *t) { struct slip *sl = from_timer(sl, t, keepalive_timer); spin_lock(&sl->lock); if (sl->tty == NULL) goto out; if (sl->keepalive) { if (test_bit(SLF_KEEPTEST, &sl->flags)) { /* keepalive still high :(, we must hangup */ if (sl->outfill) /* outfill timer must be deleted too */ (void)del_timer(&sl->outfill_timer); printk(KERN_DEBUG "%s: no packets received during keepalive timeout, hangup.\n", sl->dev->name); /* this must hangup tty & close slip */ tty_hangup(sl->tty); /* I think we need not something else */ goto out; } else set_bit(SLF_KEEPTEST, &sl->flags); mod_timer(&sl->keepalive_timer, jiffies+sl->keepalive*HZ); } out: spin_unlock(&sl->lock); } #endif MODULE_DESCRIPTION("SLIP (serial line) protocol module"); MODULE_LICENSE("GPL"); MODULE_ALIAS_LDISC(N_SLIP);
6 6 6 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Stream Parser * * Copyright (c) 2016 Tom Herbert <tom@herbertland.com> */ #ifndef __NET_STRPARSER_H_ #define __NET_STRPARSER_H_ #include <linux/skbuff.h> #include <net/sock.h> #define STRP_STATS_ADD(stat, count) ((stat) += (count)) #define STRP_STATS_INCR(stat) ((stat)++) struct strp_stats { unsigned long long msgs; unsigned long long bytes; unsigned int mem_fail; unsigned int need_more_hdr; unsigned int msg_too_big; unsigned int msg_timeouts; unsigned int bad_hdr_len; }; struct strp_aggr_stats { unsigned long long msgs; unsigned long long bytes; unsigned int mem_fail; unsigned int need_more_hdr; unsigned int msg_too_big; unsigned int msg_timeouts; unsigned int bad_hdr_len; unsigned int aborts; unsigned int interrupted; unsigned int unrecov_intr; }; struct strparser; /* Callbacks are called with lock held for the attached socket */ struct strp_callbacks { int (*parse_msg)(struct strparser *strp, struct sk_buff *skb); void (*rcv_msg)(struct strparser *strp, struct sk_buff *skb); int (*read_sock_done)(struct strparser *strp, int err); void (*abort_parser)(struct strparser *strp, int err); void (*lock)(struct strparser *strp); void (*unlock)(struct strparser *strp); }; struct strp_msg { int full_len; int offset; }; struct _strp_msg { /* Internal cb structure. struct strp_msg must be first for passing * to upper layer. */ struct strp_msg strp; int accum_len; }; struct sk_skb_cb { #define SK_SKB_CB_PRIV_LEN 20 unsigned char data[SK_SKB_CB_PRIV_LEN]; /* align strp on cache line boundary within skb->cb[] */ unsigned char pad[4]; struct _strp_msg strp; /* strp users' data follows */ struct tls_msg { u8 control; } tls; /* temp_reg is a temporary register used for bpf_convert_data_end_access * when dst_reg == src_reg. */ u64 temp_reg; }; static inline struct strp_msg *strp_msg(struct sk_buff *skb) { return (struct strp_msg *)((void *)skb->cb + offsetof(struct sk_skb_cb, strp)); } /* Structure for an attached lower socket */ struct strparser { struct sock *sk; u32 stopped : 1; u32 paused : 1; u32 aborted : 1; u32 interrupted : 1; u32 unrecov_intr : 1; struct sk_buff **skb_nextp; struct sk_buff *skb_head; unsigned int need_bytes; struct delayed_work msg_timer_work; struct work_struct work; struct strp_stats stats; struct strp_callbacks cb; }; /* Must be called with lock held for attached socket */ static inline void strp_pause(struct strparser *strp) { strp->paused = 1; } /* May be called without holding lock for attached socket */ void strp_unpause(struct strparser *strp); /* Must be called with process lock held (lock_sock) */ void __strp_unpause(struct strparser *strp); static inline void save_strp_stats(struct strparser *strp, struct strp_aggr_stats *agg_stats) { /* Save psock statistics in the mux when psock is being unattached. */ #define SAVE_PSOCK_STATS(_stat) (agg_stats->_stat += \ strp->stats._stat) SAVE_PSOCK_STATS(msgs); SAVE_PSOCK_STATS(bytes); SAVE_PSOCK_STATS(mem_fail); SAVE_PSOCK_STATS(need_more_hdr); SAVE_PSOCK_STATS(msg_too_big); SAVE_PSOCK_STATS(msg_timeouts); SAVE_PSOCK_STATS(bad_hdr_len); #undef SAVE_PSOCK_STATS if (strp->aborted) agg_stats->aborts++; if (strp->interrupted) agg_stats->interrupted++; if (strp->unrecov_intr) agg_stats->unrecov_intr++; } static inline void aggregate_strp_stats(struct strp_aggr_stats *stats, struct strp_aggr_stats *agg_stats) { #define SAVE_PSOCK_STATS(_stat) (agg_stats->_stat += stats->_stat) SAVE_PSOCK_STATS(msgs); SAVE_PSOCK_STATS(bytes); SAVE_PSOCK_STATS(mem_fail); SAVE_PSOCK_STATS(need_more_hdr); SAVE_PSOCK_STATS(msg_too_big); SAVE_PSOCK_STATS(msg_timeouts); SAVE_PSOCK_STATS(bad_hdr_len); SAVE_PSOCK_STATS(aborts); SAVE_PSOCK_STATS(interrupted); SAVE_PSOCK_STATS(unrecov_intr); #undef SAVE_PSOCK_STATS } void strp_done(struct strparser *strp); void strp_stop(struct strparser *strp); void strp_check_rcv(struct strparser *strp); int strp_init(struct strparser *strp, struct sock *sk, const struct strp_callbacks *cb); void strp_data_ready(struct strparser *strp); int strp_process(struct strparser *strp, struct sk_buff *orig_skb, unsigned int orig_offset, size_t orig_len, size_t max_msg_size, long timeo); #endif /* __NET_STRPARSER_H_ */
903 1 9413 9392 169 818 7156 512 7456 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* Credentials management - see Documentation/security/credentials.rst * * Copyright (C) 2008 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) */ #ifndef _LINUX_CRED_H #define _LINUX_CRED_H #include <linux/capability.h> #include <linux/init.h> #include <linux/key.h> #include <linux/atomic.h> #include <linux/refcount.h> #include <linux/uidgid.h> #include <linux/sched.h> #include <linux/sched/user.h> struct cred; struct inode; /* * COW Supplementary groups list */ struct group_info { refcount_t usage; int ngroups; kgid_t gid[]; } __randomize_layout; /** * get_group_info - Get a reference to a group info structure * @group_info: The group info to reference * * This gets a reference to a set of supplementary groups. * * If the caller is accessing a task's credentials, they must hold the RCU read * lock when reading. */ static inline struct group_info *get_group_info(struct group_info *gi) { refcount_inc(&gi->usage); return gi; } /** * put_group_info - Release a reference to a group info structure * @group_info: The group info to release */ #define put_group_info(group_info) \ do { \ if (refcount_dec_and_test(&(group_info)->usage)) \ groups_free(group_info); \ } while (0) #ifdef CONFIG_MULTIUSER extern struct group_info *groups_alloc(int); extern void groups_free(struct group_info *); extern int in_group_p(kgid_t); extern int in_egroup_p(kgid_t); extern int groups_search(const struct group_info *, kgid_t); extern int set_current_groups(struct group_info *); extern void set_groups(struct cred *, struct group_info *); extern bool may_setgroups(void); extern void groups_sort(struct group_info *); #else static inline void groups_free(struct group_info *group_info) { } static inline int in_group_p(kgid_t grp) { return 1; } static inline int in_egroup_p(kgid_t grp) { return 1; } static inline int groups_search(const struct group_info *group_info, kgid_t grp) { return 1; } #endif /* * The security context of a task * * The parts of the context break down into two categories: * * (1) The objective context of a task. These parts are used when some other * task is attempting to affect this one. * * (2) The subjective context. These details are used when the task is acting * upon another object, be that a file, a task, a key or whatever. * * Note that some members of this structure belong to both categories - the * LSM security pointer for instance. * * A task has two security pointers. task->real_cred points to the objective * context that defines that task's actual details. The objective part of this * context is used whenever that task is acted upon. * * task->cred points to the subjective context that defines the details of how * that task is going to act upon another object. This may be overridden * temporarily to point to another security context, but normally points to the * same context as task->real_cred. */ struct cred { atomic_long_t usage; kuid_t uid; /* real UID of the task */ kgid_t gid; /* real GID of the task */ kuid_t suid; /* saved UID of the task */ kgid_t sgid; /* saved GID of the task */ kuid_t euid; /* effective UID of the task */ kgid_t egid; /* effective GID of the task */ kuid_t fsuid; /* UID for VFS ops */ kgid_t fsgid; /* GID for VFS ops */ unsigned securebits; /* SUID-less security management */ kernel_cap_t cap_inheritable; /* caps our children can inherit */ kernel_cap_t cap_permitted; /* caps we're permitted */ kernel_cap_t cap_effective; /* caps we can actually use */ kernel_cap_t cap_bset; /* capability bounding set */ kernel_cap_t cap_ambient; /* Ambient capability set */ #ifdef CONFIG_KEYS unsigned char jit_keyring; /* default keyring to attach requested * keys to */ struct key *session_keyring; /* keyring inherited over fork */ struct key *process_keyring; /* keyring private to this process */ struct key *thread_keyring; /* keyring private to this thread */ struct key *request_key_auth; /* assumed request_key authority */ #endif #ifdef CONFIG_SECURITY void *security; /* LSM security */ #endif struct user_struct *user; /* real user ID subscription */ struct user_namespace *user_ns; /* user_ns the caps and keyrings are relative to. */ struct ucounts *ucounts; struct group_info *group_info; /* supplementary groups for euid/fsgid */ /* RCU deletion */ union { int non_rcu; /* Can we skip RCU deletion? */ struct rcu_head rcu; /* RCU deletion hook */ }; } __randomize_layout; extern void __put_cred(struct cred *); extern void exit_creds(struct task_struct *); extern int copy_creds(struct task_struct *, unsigned long); extern const struct cred *get_task_cred(struct task_struct *); extern struct cred *cred_alloc_blank(void); extern struct cred *prepare_creds(void); extern struct cred *prepare_exec_creds(void); extern int commit_creds(struct cred *); extern void abort_creds(struct cred *); extern const struct cred *override_creds(const struct cred *); extern void revert_creds(const struct cred *); extern struct cred *prepare_kernel_cred(struct task_struct *); extern int set_security_override(struct cred *, u32); extern int set_security_override_from_ctx(struct cred *, const char *); extern int set_create_files_as(struct cred *, struct inode *); extern int cred_fscmp(const struct cred *, const struct cred *); extern void __init cred_init(void); extern int set_cred_ucounts(struct cred *); static inline bool cap_ambient_invariant_ok(const struct cred *cred) { return cap_issubset(cred->cap_ambient, cap_intersect(cred->cap_permitted, cred->cap_inheritable)); } /* * Override creds without bumping reference count. Caller must ensure * reference remains valid or has taken reference. Almost always not the * interface you want. Use override_creds()/revert_creds() instead. */ static inline const struct cred *override_creds_light(const struct cred *override_cred) { const struct cred *old = current->cred; rcu_assign_pointer(current->cred, override_cred); return old; } static inline void revert_creds_light(const struct cred *revert_cred) { rcu_assign_pointer(current->cred, revert_cred); } /** * get_new_cred_many - Get references on a new set of credentials * @cred: The new credentials to reference * @nr: Number of references to acquire * * Get references on the specified set of new credentials. The caller must * release all acquired references. */ static inline struct cred *get_new_cred_many(struct cred *cred, int nr) { atomic_long_add(nr, &cred->usage); return cred; } /** * get_new_cred - Get a reference on a new set of credentials * @cred: The new credentials to reference * * Get a reference on the specified set of new credentials. The caller must * release the reference. */ static inline struct cred *get_new_cred(struct cred *cred) { return get_new_cred_many(cred, 1); } /** * get_cred_many - Get references on a set of credentials * @cred: The credentials to reference * @nr: Number of references to acquire * * Get references on the specified set of credentials. The caller must release * all acquired reference. If %NULL is passed, it is returned with no action. * * This is used to deal with a committed set of credentials. Although the * pointer is const, this will temporarily discard the const and increment the * usage count. The purpose of this is to attempt to catch at compile time the * accidental alteration of a set of credentials that should be considered * immutable. */ static inline const struct cred *get_cred_many(const struct cred *cred, int nr) { struct cred *nonconst_cred = (struct cred *) cred; if (!cred) return cred; nonconst_cred->non_rcu = 0; return get_new_cred_many(nonconst_cred, nr); } /* * get_cred - Get a reference on a set of credentials * @cred: The credentials to reference * * Get a reference on the specified set of credentials. The caller must * release the reference. If %NULL is passed, it is returned with no action. * * This is used to deal with a committed set of credentials. */ static inline const struct cred *get_cred(const struct cred *cred) { return get_cred_many(cred, 1); } static inline const struct cred *get_cred_rcu(const struct cred *cred) { struct cred *nonconst_cred = (struct cred *) cred; if (!cred) return NULL; if (!atomic_long_inc_not_zero(&nonconst_cred->usage)) return NULL; nonconst_cred->non_rcu = 0; return cred; } /** * put_cred - Release a reference to a set of credentials * @cred: The credentials to release * @nr: Number of references to release * * Release a reference to a set of credentials, deleting them when the last ref * is released. If %NULL is passed, nothing is done. * * This takes a const pointer to a set of credentials because the credentials * on task_struct are attached by const pointers to prevent accidental * alteration of otherwise immutable credential sets. */ static inline void put_cred_many(const struct cred *_cred, int nr) { struct cred *cred = (struct cred *) _cred; if (cred) { if (atomic_long_sub_and_test(nr, &cred->usage)) __put_cred(cred); } } /* * put_cred - Release a reference to a set of credentials * @cred: The credentials to release * * Release a reference to a set of credentials, deleting them when the last ref * is released. If %NULL is passed, nothing is done. */ static inline void put_cred(const struct cred *cred) { put_cred_many(cred, 1); } /** * current_cred - Access the current task's subjective credentials * * Access the subjective credentials of the current task. RCU-safe, * since nobody else can modify it. */ #define current_cred() \ rcu_dereference_protected(current->cred, 1) /** * current_real_cred - Access the current task's objective credentials * * Access the objective credentials of the current task. RCU-safe, * since nobody else can modify it. */ #define current_real_cred() \ rcu_dereference_protected(current->real_cred, 1) /** * __task_cred - Access a task's objective credentials * @task: The task to query * * Access the objective credentials of a task. The caller must hold the RCU * readlock. * * The result of this function should not be passed directly to get_cred(); * rather get_task_cred() should be used instead. */ #define __task_cred(task) \ rcu_dereference((task)->real_cred) /** * get_current_cred - Get the current task's subjective credentials * * Get the subjective credentials of the current task, pinning them so that * they can't go away. Accessing the current task's credentials directly is * not permitted. */ #define get_current_cred() \ (get_cred(current_cred())) /** * get_current_user - Get the current task's user_struct * * Get the user record of the current task, pinning it so that it can't go * away. */ #define get_current_user() \ ({ \ struct user_struct *__u; \ const struct cred *__cred; \ __cred = current_cred(); \ __u = get_uid(__cred->user); \ __u; \ }) /** * get_current_groups - Get the current task's supplementary group list * * Get the supplementary group list of the current task, pinning it so that it * can't go away. */ #define get_current_groups() \ ({ \ struct group_info *__groups; \ const struct cred *__cred; \ __cred = current_cred(); \ __groups = get_group_info(__cred->group_info); \ __groups; \ }) #define task_cred_xxx(task, xxx) \ ({ \ __typeof__(((struct cred *)NULL)->xxx) ___val; \ rcu_read_lock(); \ ___val = __task_cred((task))->xxx; \ rcu_read_unlock(); \ ___val; \ }) #define task_uid(task) (task_cred_xxx((task), uid)) #define task_euid(task) (task_cred_xxx((task), euid)) #define task_ucounts(task) (task_cred_xxx((task), ucounts)) #define current_cred_xxx(xxx) \ ({ \ current_cred()->xxx; \ }) #define current_uid() (current_cred_xxx(uid)) #define current_gid() (current_cred_xxx(gid)) #define current_euid() (current_cred_xxx(euid)) #define current_egid() (current_cred_xxx(egid)) #define current_suid() (current_cred_xxx(suid)) #define current_sgid() (current_cred_xxx(sgid)) #define current_fsuid() (current_cred_xxx(fsuid)) #define current_fsgid() (current_cred_xxx(fsgid)) #define current_cap() (current_cred_xxx(cap_effective)) #define current_user() (current_cred_xxx(user)) #define current_ucounts() (current_cred_xxx(ucounts)) extern struct user_namespace init_user_ns; #ifdef CONFIG_USER_NS #define current_user_ns() (current_cred_xxx(user_ns)) #else static inline struct user_namespace *current_user_ns(void) { return &init_user_ns; } #endif #define current_uid_gid(_uid, _gid) \ do { \ const struct cred *__cred; \ __cred = current_cred(); \ *(_uid) = __cred->uid; \ *(_gid) = __cred->gid; \ } while(0) #define current_euid_egid(_euid, _egid) \ do { \ const struct cred *__cred; \ __cred = current_cred(); \ *(_euid) = __cred->euid; \ *(_egid) = __cred->egid; \ } while(0) #define current_fsuid_fsgid(_fsuid, _fsgid) \ do { \ const struct cred *__cred; \ __cred = current_cred(); \ *(_fsuid) = __cred->fsuid; \ *(_fsgid) = __cred->fsgid; \ } while(0) #endif /* _LINUX_CRED_H */
10 1 1 2 8 2 5 1 4 3 3 1 1 3 3 3 4 1 3 2 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 // SPDX-License-Identifier: GPL-2.0 /* * Opening fs-verity files * * Copyright 2019 Google LLC */ #include "fsverity_private.h" #include <linux/mm.h> #include <linux/slab.h> static struct kmem_cache *fsverity_info_cachep; /** * fsverity_init_merkle_tree_params() - initialize Merkle tree parameters * @params: the parameters struct to initialize * @inode: the inode for which the Merkle tree is being built * @hash_algorithm: number of hash algorithm to use * @log_blocksize: log base 2 of block size to use * @salt: pointer to salt (optional) * @salt_size: size of salt, possibly 0 * * Validate the hash algorithm and block size, then compute the tree topology * (num levels, num blocks in each level, etc.) and initialize @params. * * Return: 0 on success, -errno on failure */ int fsverity_init_merkle_tree_params(struct merkle_tree_params *params, const struct inode *inode, unsigned int hash_algorithm, unsigned int log_blocksize, const u8 *salt, size_t salt_size) { const struct fsverity_hash_alg *hash_alg; int err; u64 blocks; u64 blocks_in_level[FS_VERITY_MAX_LEVELS]; u64 offset; int level; memset(params, 0, sizeof(*params)); hash_alg = fsverity_get_hash_alg(inode, hash_algorithm); if (IS_ERR(hash_alg)) return PTR_ERR(hash_alg); params->hash_alg = hash_alg; params->digest_size = hash_alg->digest_size; params->hashstate = fsverity_prepare_hash_state(hash_alg, salt, salt_size); if (IS_ERR(params->hashstate)) { err = PTR_ERR(params->hashstate); params->hashstate = NULL; fsverity_err(inode, "Error %d preparing hash state", err); goto out_err; } /* * fs/verity/ directly assumes that the Merkle tree block size is a * power of 2 less than or equal to PAGE_SIZE. Another restriction * arises from the interaction between fs/verity/ and the filesystems * themselves: filesystems expect to be able to verify a single * filesystem block of data at a time. Therefore, the Merkle tree block * size must also be less than or equal to the filesystem block size. * * The above are the only hard limitations, so in theory the Merkle tree * block size could be as small as twice the digest size. However, * that's not useful, and it would result in some unusually deep and * large Merkle trees. So we currently require that the Merkle tree * block size be at least 1024 bytes. That's small enough to test the * sub-page block case on systems with 4K pages, but not too small. */ if (log_blocksize < 10 || log_blocksize > PAGE_SHIFT || log_blocksize > inode->i_blkbits) { fsverity_warn(inode, "Unsupported log_blocksize: %u", log_blocksize); err = -EINVAL; goto out_err; } params->log_blocksize = log_blocksize; params->block_size = 1 << log_blocksize; params->log_blocks_per_page = PAGE_SHIFT - log_blocksize; params->blocks_per_page = 1 << params->log_blocks_per_page; if (WARN_ON_ONCE(!is_power_of_2(params->digest_size))) { err = -EINVAL; goto out_err; } if (params->block_size < 2 * params->digest_size) { fsverity_warn(inode, "Merkle tree block size (%u) too small for hash algorithm \"%s\"", params->block_size, hash_alg->name); err = -EINVAL; goto out_err; } params->log_digestsize = ilog2(params->digest_size); params->log_arity = log_blocksize - params->log_digestsize; params->hashes_per_block = 1 << params->log_arity; /* * Compute the number of levels in the Merkle tree and create a map from * level to the starting block of that level. Level 'num_levels - 1' is * the root and is stored first. Level 0 is the level directly "above" * the data blocks and is stored last. */ /* Compute number of levels and the number of blocks in each level */ blocks = ((u64)inode->i_size + params->block_size - 1) >> log_blocksize; while (blocks > 1) { if (params->num_levels >= FS_VERITY_MAX_LEVELS) { fsverity_err(inode, "Too many levels in Merkle tree"); err = -EFBIG; goto out_err; } blocks = (blocks + params->hashes_per_block - 1) >> params->log_arity; blocks_in_level[params->num_levels++] = blocks; } /* Compute the starting block of each level */ offset = 0; for (level = (int)params->num_levels - 1; level >= 0; level--) { params->level_start[level] = offset; offset += blocks_in_level[level]; } /* * With block_size != PAGE_SIZE, an in-memory bitmap will need to be * allocated to track the "verified" status of hash blocks. Don't allow * this bitmap to get too large. For now, limit it to 1 MiB, which * limits the file size to about 4.4 TB with SHA-256 and 4K blocks. * * Together with the fact that the data, and thus also the Merkle tree, * cannot have more than ULONG_MAX pages, this implies that hash block * indices can always fit in an 'unsigned long'. But to be safe, we * explicitly check for that too. Note, this is only for hash block * indices; data block indices might not fit in an 'unsigned long'. */ if ((params->block_size != PAGE_SIZE && offset > 1 << 23) || offset > ULONG_MAX) { fsverity_err(inode, "Too many blocks in Merkle tree"); err = -EFBIG; goto out_err; } params->tree_size = offset << log_blocksize; params->tree_pages = PAGE_ALIGN(params->tree_size) >> PAGE_SHIFT; return 0; out_err: kfree(params->hashstate); memset(params, 0, sizeof(*params)); return err; } /* * Compute the file digest by hashing the fsverity_descriptor excluding the * builtin signature and with the sig_size field set to 0. */ static int compute_file_digest(const struct fsverity_hash_alg *hash_alg, struct fsverity_descriptor *desc, u8 *file_digest) { __le32 sig_size = desc->sig_size; int err; desc->sig_size = 0; err = fsverity_hash_buffer(hash_alg, desc, sizeof(*desc), file_digest); desc->sig_size = sig_size; return err; } /* * Create a new fsverity_info from the given fsverity_descriptor (with optional * appended builtin signature), and check the signature if present. The * fsverity_descriptor must have already undergone basic validation. */ struct fsverity_info *fsverity_create_info(const struct inode *inode, struct fsverity_descriptor *desc) { struct fsverity_info *vi; int err; vi = kmem_cache_zalloc(fsverity_info_cachep, GFP_KERNEL); if (!vi) return ERR_PTR(-ENOMEM); vi->inode = inode; err = fsverity_init_merkle_tree_params(&vi->tree_params, inode, desc->hash_algorithm, desc->log_blocksize, desc->salt, desc->salt_size); if (err) { fsverity_err(inode, "Error %d initializing Merkle tree parameters", err); goto fail; } memcpy(vi->root_hash, desc->root_hash, vi->tree_params.digest_size); err = compute_file_digest(vi->tree_params.hash_alg, desc, vi->file_digest); if (err) { fsverity_err(inode, "Error %d computing file digest", err); goto fail; } err = fsverity_verify_signature(vi, desc->signature, le32_to_cpu(desc->sig_size)); if (err) goto fail; if (vi->tree_params.block_size != PAGE_SIZE) { /* * When the Merkle tree block size and page size differ, we use * a bitmap to keep track of which hash blocks have been * verified. This bitmap must contain one bit per hash block, * including alignment to a page boundary at the end. * * Eventually, to support extremely large files in an efficient * way, it might be necessary to make pages of this bitmap * reclaimable. But for now, simply allocating the whole bitmap * is a simple solution that works well on the files on which * fsverity is realistically used. E.g., with SHA-256 and 4K * blocks, a 100MB file only needs a 24-byte bitmap, and the * bitmap for any file under 17GB fits in a 4K page. */ unsigned long num_bits = vi->tree_params.tree_pages << vi->tree_params.log_blocks_per_page; vi->hash_block_verified = kvcalloc(BITS_TO_LONGS(num_bits), sizeof(unsigned long), GFP_KERNEL); if (!vi->hash_block_verified) { err = -ENOMEM; goto fail; } } return vi; fail: fsverity_free_info(vi); return ERR_PTR(err); } void fsverity_set_info(struct inode *inode, struct fsverity_info *vi) { /* * Multiple tasks may race to set ->i_verity_info, so use * cmpxchg_release(). This pairs with the smp_load_acquire() in * fsverity_get_info(). I.e., here we publish ->i_verity_info with a * RELEASE barrier so that other tasks can ACQUIRE it. */ if (cmpxchg_release(&inode->i_verity_info, NULL, vi) != NULL) { /* Lost the race, so free the fsverity_info we allocated. */ fsverity_free_info(vi); /* * Afterwards, the caller may access ->i_verity_info directly, * so make sure to ACQUIRE the winning fsverity_info. */ (void)fsverity_get_info(inode); } } void fsverity_free_info(struct fsverity_info *vi) { if (!vi) return; kfree(vi->tree_params.hashstate); kvfree(vi->hash_block_verified); kmem_cache_free(fsverity_info_cachep, vi); } static bool validate_fsverity_descriptor(struct inode *inode, const struct fsverity_descriptor *desc, size_t desc_size) { if (desc_size < sizeof(*desc)) { fsverity_err(inode, "Unrecognized descriptor size: %zu bytes", desc_size); return false; } if (desc->version != 1) { fsverity_err(inode, "Unrecognized descriptor version: %u", desc->version); return false; } if (memchr_inv(desc->__reserved, 0, sizeof(desc->__reserved))) { fsverity_err(inode, "Reserved bits set in descriptor"); return false; } if (desc->salt_size > sizeof(desc->salt)) { fsverity_err(inode, "Invalid salt_size: %u", desc->salt_size); return false; } if (le64_to_cpu(desc->data_size) != inode->i_size) { fsverity_err(inode, "Wrong data_size: %llu (desc) != %lld (inode)", le64_to_cpu(desc->data_size), inode->i_size); return false; } if (le32_to_cpu(desc->sig_size) > desc_size - sizeof(*desc)) { fsverity_err(inode, "Signature overflows verity descriptor"); return false; } return true; } /* * Read the inode's fsverity_descriptor (with optional appended builtin * signature) from the filesystem, and do basic validation of it. */ int fsverity_get_descriptor(struct inode *inode, struct fsverity_descriptor **desc_ret) { int res; struct fsverity_descriptor *desc; res = inode->i_sb->s_vop->get_verity_descriptor(inode, NULL, 0); if (res < 0) { fsverity_err(inode, "Error %d getting verity descriptor size", res); return res; } if (res > FS_VERITY_MAX_DESCRIPTOR_SIZE) { fsverity_err(inode, "Verity descriptor is too large (%d bytes)", res); return -EMSGSIZE; } desc = kmalloc(res, GFP_KERNEL); if (!desc) return -ENOMEM; res = inode->i_sb->s_vop->get_verity_descriptor(inode, desc, res); if (res < 0) { fsverity_err(inode, "Error %d reading verity descriptor", res); kfree(desc); return res; } if (!validate_fsverity_descriptor(inode, desc, res)) { kfree(desc); return -EINVAL; } *desc_ret = desc; return 0; } /* Ensure the inode has an ->i_verity_info */ static int ensure_verity_info(struct inode *inode) { struct fsverity_info *vi = fsverity_get_info(inode); struct fsverity_descriptor *desc; int err; if (vi) return 0; err = fsverity_get_descriptor(inode, &desc); if (err) return err; vi = fsverity_create_info(inode, desc); if (IS_ERR(vi)) { err = PTR_ERR(vi); goto out_free_desc; } fsverity_set_info(inode, vi); err = 0; out_free_desc: kfree(desc); return err; } int __fsverity_file_open(struct inode *inode, struct file *filp) { if (filp->f_mode & FMODE_WRITE) return -EPERM; return ensure_verity_info(inode); } EXPORT_SYMBOL_GPL(__fsverity_file_open); int __fsverity_prepare_setattr(struct dentry *dentry, struct iattr *attr) { if (attr->ia_valid & ATTR_SIZE) return -EPERM; return 0; } EXPORT_SYMBOL_GPL(__fsverity_prepare_setattr); void __fsverity_cleanup_inode(struct inode *inode) { fsverity_free_info(inode->i_verity_info); inode->i_verity_info = NULL; } EXPORT_SYMBOL_GPL(__fsverity_cleanup_inode); void __init fsverity_init_info_cache(void) { fsverity_info_cachep = KMEM_CACHE_USERCOPY( fsverity_info, SLAB_RECLAIM_ACCOUNT | SLAB_PANIC, file_digest); }
1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 3422 3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434 3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456 3457 3458 3459 3460 3461 3462 3463 3464 3465 3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479 3480 3481 3482 3483 3484 3485 3486 3487 3488 3489 3490 3491 3492 3493 3494 3495 3496 3497 3498 3499 3500 3501 3502 3503 3504 3505 3506 3507 3508 3509 3510 3511 3512 3513 3514 3515 3516 3517 3518 3519 3520 3521 3522 3523 3524 3525 3526 3527 3528 3529 3530 3531 3532 3533 3534 3535 3536 3537 3538 3539 3540 3541 3542 3543 3544 3545 3546 3547 3548 3549 3550 3551 3552 3553 3554 3555 3556 3557 3558 3559 3560 3561 3562 3563 3564 3565 3566 3567 3568 3569 3570 3571 3572 3573 3574 3575 3576 3577 3578 3579 3580 3581 3582 3583 3584 3585 3586 3587 3588 3589 3590 3591 3592 3593 3594 3595 3596 3597 3598 3599 3600 3601 3602 3603 3604 3605 3606 3607 3608 3609 3610 3611 3612 3613 3614 3615 3616 3617 3618 3619 3620 3621 3622 3623 3624 3625 3626 3627 3628 3629 3630 3631 3632 3633 3634 3635 3636 3637 3638 3639 3640 3641 3642 3643 3644 3645 3646 3647 3648 3649 3650 3651 3652 3653 3654 3655 3656 3657 3658 3659 3660 3661 3662 3663 3664 3665 3666 3667 3668 3669 3670 3671 3672 3673 3674 3675 3676 3677 3678 3679 3680 3681 3682 3683 3684 3685 3686 3687 3688 3689 3690 3691 3692 3693 3694 3695 3696 3697 3698 3699 3700 3701 3702 3703 3704 3705 3706 3707 3708 3709 3710 3711 3712 3713 3714 3715 3716 3717 3718 3719 3720 3721 3722 3723 3724 3725 3726 3727 3728 3729 3730 3731 3732 3733 3734 3735 3736 3737 3738 3739 3740 3741 3742 3743 3744 3745 3746 3747 3748 3749 3750 3751 3752 3753 3754 3755 3756 3757 3758 3759 3760 3761 3762 3763 3764 3765 3766 3767 3768 3769 3770 3771 3772 3773 3774 3775 3776 3777 3778 3779 3780 3781 3782 3783 3784 3785 3786 3787 3788 3789 3790 3791 3792 3793 3794 3795 3796 3797 3798 3799 3800 3801 3802 3803 3804 3805 3806 3807 3808 3809 3810 3811 3812 3813 3814 3815 3816 3817 3818 3819 3820 3821 3822 3823 3824 3825 3826 3827 3828 3829 3830 3831 3832 3833 3834 3835 3836 3837 3838 3839 3840 3841 3842 3843 3844 3845 3846 3847 3848 3849 3850 3851 3852 3853 3854 3855 3856 3857 3858 3859 3860 3861 3862 3863 3864 3865 3866 3867 3868 3869 3870 3871 3872 3873 3874 3875 3876 3877 3878 3879 3880 3881 3882 3883 3884 3885 3886 3887 3888 3889 3890 3891 3892 3893 3894 3895 3896 3897 3898 3899 3900 3901 3902 3903 3904 3905 3906 3907 3908 3909 3910 3911 3912 3913 3914 3915 3916 3917 3918 3919 3920 3921 3922 3923 3924 3925 3926 3927 3928 3929 3930 3931 3932 3933 3934 3935 3936 3937 3938 3939 3940 3941 3942 3943 3944 3945 3946 3947 3948 3949 3950 3951 3952 3953 3954 3955 3956 3957 3958 3959 3960 3961 3962 3963 3964 3965 3966 3967 3968 3969 3970 3971 3972 3973 3974 3975 3976 3977 3978 3979 3980 3981 3982 3983 3984 3985 3986 3987 3988 3989 3990 3991 3992 3993 3994 3995 3996 3997 3998 3999 4000 4001 4002 4003 4004 4005 4006 4007 4008 4009 4010 4011 4012 4013 4014 4015 4016 4017 4018 4019 4020 4021 4022 4023 4024 4025 4026 4027 4028 4029 4030 4031 4032 4033 4034 4035 4036 4037 4038 4039 4040 4041 4042 4043 4044 4045 4046 4047 4048 4049 4050 4051 4052 4053 4054 4055 4056 4057 4058 4059 4060 4061 4062 4063 4064 4065 4066 4067 4068 4069 4070 4071 4072 4073 4074 4075 4076 4077 4078 4079 4080 4081 4082 4083 4084 4085 4086 4087 4088 4089 4090 4091 4092 4093 4094 4095 4096 4097 4098 4099 4100 4101 4102 4103 4104 4105 4106 4107 4108 4109 4110 4111 4112 4113 4114 4115 4116 4117 4118 4119 4120 4121 4122 4123 4124 4125 4126 4127 4128 4129 4130 4131 4132 4133 4134 4135 4136 4137 4138 4139 4140 4141 4142 4143 4144 4145 4146 4147 4148 4149 4150 4151 4152 4153 4154 4155 4156 4157 4158 4159 4160 4161 4162 4163 4164 4165 4166 4167 4168 4169 4170 4171 4172 4173 4174 4175 4176 4177 4178 4179 4180 4181 4182 4183 4184 4185 4186 4187 4188 4189 4190 4191 4192 4193 4194 4195 4196 4197 4198 4199 4200 4201 4202 4203 4204 4205 4206 4207 4208 4209 4210 4211 4212 4213 4214 4215 4216 4217 4218 4219 4220 4221 4222 4223 4224 4225 4226 4227 4228 4229 4230 4231 4232 4233 4234 4235 4236 4237 4238 4239 4240 4241 4242 4243 4244 4245 4246 4247 4248 4249 4250 4251 4252 4253 4254 4255 4256 4257 4258 4259 4260 4261 4262 4263 4264 4265 4266 4267 4268 4269 4270 4271 4272 4273 4274 4275 4276 4277 4278 4279 4280 4281 4282 4283 4284 4285 4286 4287 4288 4289 4290 4291 4292 4293 4294 4295 4296 4297 4298 4299 4300 4301 4302 4303 4304 4305 4306 4307 4308 4309 4310 4311 4312 4313 4314 4315 4316 4317 4318 4319 4320 4321 4322 4323 4324 4325 4326 4327 4328 4329 4330 4331 4332 4333 4334 4335 4336 4337 4338 4339 4340 4341 4342 4343 4344 4345 4346 4347 4348 4349 4350 4351 4352 4353 4354 4355 4356 4357 4358 4359 4360 4361 4362 4363 4364 4365 4366 4367 4368 4369 4370 4371 4372 4373 4374 4375 4376 4377 4378 4379 4380 4381 4382 4383 4384 4385 4386 4387 4388 4389 4390 4391 4392 4393 4394 4395 4396 4397 4398 4399 4400 4401 4402 4403 4404 4405 4406 4407 4408 4409 4410 4411 4412 4413 4414 4415 4416 4417 4418 4419 4420 4421 4422 4423 4424 4425 4426 4427 4428 4429 4430 4431 4432 4433 4434 4435 4436 4437 4438 4439 4440 4441 4442 4443 4444 4445 4446 4447 4448 4449 4450 4451 4452 4453 4454 4455 4456 4457 4458 4459 4460 4461 4462 4463 4464 4465 4466 4467 4468 4469 4470 4471 4472 4473 4474 4475 4476 4477 4478 4479 4480 4481 4482 4483 4484 4485 4486 4487 4488 4489 4490 4491 4492 4493 4494 4495 4496 4497 4498 4499 4500 4501 4502 4503 4504 4505 4506 4507 4508 4509 4510 4511 4512 4513 4514 4515 4516 4517 4518 4519 4520 4521 4522 4523 4524 4525 4526 4527 4528 4529 4530 4531 4532 4533 4534 4535 4536 4537 4538 4539 4540 4541 4542 4543 4544 4545 4546 4547 4548 4549 4550 4551 4552 4553 4554 4555 4556 4557 4558 4559 4560 4561 4562 4563 4564 4565 4566 4567 4568 4569 4570 4571 4572 4573 4574 4575 4576 4577 4578 4579 4580 4581 4582 4583 4584 4585 4586 4587 4588 4589 4590 4591 4592 4593 4594 4595 4596 4597 4598 4599 4600 4601 4602 4603 4604 4605 4606 4607 4608 4609 4610 4611 4612 4613 4614 4615 4616 4617 4618 4619 4620 4621 4622 4623 4624 4625 4626 4627 4628 4629 4630 4631 4632 4633 4634 4635 4636 4637 4638 4639 4640 4641 4642 4643 4644 4645 4646 4647 4648 4649 4650 4651 4652 4653 4654 4655 4656 4657 4658 4659 4660 4661 4662 4663 4664 4665 4666 4667 4668 4669 4670 4671 4672 4673 4674 4675 4676 4677 4678 4679 4680 4681 4682 4683 4684 4685 4686 4687 4688 4689 4690 4691 4692 4693 4694 4695 4696 4697 4698 4699 4700 4701 4702 4703 4704 4705 4706 4707 4708 4709 4710 4711 4712 4713 4714 4715 4716 4717 4718 4719 4720 4721 4722 4723 4724 4725 4726 4727 4728 4729 4730 4731 4732 4733 4734 4735 4736 4737 4738 4739 4740 4741 4742 4743 4744 4745 4746 4747 4748 4749 4750 4751 4752 4753 4754 4755 4756 4757 4758 4759 4760 4761 4762 4763 4764 4765 4766 4767 4768 4769 4770 4771 4772 4773 4774 4775 4776 4777 4778 4779 4780 4781 4782 4783 4784 4785 4786 4787 4788 4789 4790 4791 4792 4793 4794 4795 4796 4797 4798 4799 4800 4801 4802 4803 4804 4805 4806 4807 4808 4809 4810 4811 4812 4813 4814 4815 4816 4817 4818 4819 4820 4821 4822 4823 4824 4825 4826 4827 4828 4829 4830 4831 4832 4833 4834 4835 4836 4837 4838 4839 4840 4841 4842 4843 4844 4845 4846 4847 4848 4849 4850 4851 4852 4853 4854 4855 4856 4857 4858 4859 4860 4861 4862 4863 4864 4865 4866 4867 4868 4869 4870 4871 4872 4873 4874 4875 4876 4877 4878 4879 4880 4881 4882 4883 4884 4885 4886 4887 4888 4889 4890 4891 4892 4893 4894 4895 4896 4897 4898 4899 4900 4901 4902 4903 4904 4905 4906 4907 4908 4909 4910 4911 4912 4913 4914 4915 4916 4917 4918 4919 4920 4921 4922 4923 4924 4925 4926 4927 4928 4929 4930 4931 4932 4933 4934 4935 4936 4937 4938 4939 4940 4941 4942 4943 4944 4945 4946 4947 4948 4949 4950 4951 4952 4953 4954 4955 4956 4957 4958 4959 4960 4961 4962 4963 4964 4965 4966 4967 4968 4969 4970 4971 4972 4973 4974 4975 4976 4977 4978 4979 4980 4981 4982 4983 4984 4985 4986 4987 4988 4989 4990 4991 4992 4993 4994 4995 4996 4997 4998 4999 5000 5001 5002 5003 5004 5005 5006 5007 5008 5009 5010 5011 5012 5013 5014 5015 5016 5017 5018 5019 5020 5021 5022 5023 5024 5025 5026 5027 5028 5029 5030 // SPDX-License-Identifier: GPL-2.0-or-later /* * OV519 driver * * Copyright (C) 2008-2011 Jean-François Moine <moinejf@free.fr> * Copyright (C) 2009 Hans de Goede <hdegoede@redhat.com> * * This module is adapted from the ov51x-jpeg package, which itself * was adapted from the ov511 driver. * * Original copyright for the ov511 driver is: * * Copyright (c) 1999-2006 Mark W. McClelland * Support for OV519, OV8610 Copyright (c) 2003 Joerg Heckenbach * Many improvements by Bret Wallach <bwallac1@san.rr.com> * Color fixes by by Orion Sky Lawlor <olawlor@acm.org> (2/26/2000) * OV7620 fixes by Charl P. Botha <cpbotha@ieee.org> * Changes by Claudio Matsuoka <claudio@conectiva.com> * * ov51x-jpeg original copyright is: * * Copyright (c) 2004-2007 Romain Beauxis <toots@rastageeks.org> * Support for OV7670 sensors was contributed by Sam Skipsey <aoanla@yahoo.com> */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #define MODULE_NAME "ov519" #include <linux/input.h> #include "gspca.h" /* The jpeg_hdr is used by w996Xcf only */ /* The CONEX_CAM define for jpeg.h needs renaming, now its used here too */ #define CONEX_CAM #include "jpeg.h" MODULE_AUTHOR("Jean-Francois Moine <http://moinejf.free.fr>"); MODULE_DESCRIPTION("OV519 USB Camera Driver"); MODULE_LICENSE("GPL"); /* global parameters */ static int frame_rate; /* Number of times to retry a failed I2C transaction. Increase this if you * are getting "Failed to read sensor ID..." */ static int i2c_detect_tries = 10; /* ov519 device descriptor */ struct sd { struct gspca_dev gspca_dev; /* !! must be the first item */ struct v4l2_ctrl *jpegqual; struct v4l2_ctrl *freq; struct { /* h/vflip control cluster */ struct v4l2_ctrl *hflip; struct v4l2_ctrl *vflip; }; struct { /* autobrightness/brightness control cluster */ struct v4l2_ctrl *autobright; struct v4l2_ctrl *brightness; }; u8 revision; u8 packet_nr; char bridge; #define BRIDGE_OV511 0 #define BRIDGE_OV511PLUS 1 #define BRIDGE_OV518 2 #define BRIDGE_OV518PLUS 3 #define BRIDGE_OV519 4 /* = ov530 */ #define BRIDGE_OVFX2 5 #define BRIDGE_W9968CF 6 #define BRIDGE_MASK 7 char invert_led; #define BRIDGE_INVERT_LED 8 char snapshot_pressed; char snapshot_needs_reset; /* Determined by sensor type */ u8 sif; #define QUALITY_MIN 50 #define QUALITY_MAX 70 #define QUALITY_DEF 50 u8 stopped; /* Streaming is temporarily paused */ u8 first_frame; u8 frame_rate; /* current Framerate */ u8 clockdiv; /* clockdiv override */ s8 sensor; /* Type of image sensor chip (SEN_*) */ u8 sensor_addr; u16 sensor_width; u16 sensor_height; s16 sensor_reg_cache[256]; u8 jpeg_hdr[JPEG_HDR_SZ]; }; enum sensors { SEN_OV2610, SEN_OV2610AE, SEN_OV3610, SEN_OV6620, SEN_OV6630, SEN_OV66308AF, SEN_OV7610, SEN_OV7620, SEN_OV7620AE, SEN_OV7640, SEN_OV7648, SEN_OV7660, SEN_OV7670, SEN_OV76BE, SEN_OV8610, SEN_OV9600, }; /* Note this is a bit of a hack, but the w9968cf driver needs the code for all the ov sensors which is already present here. When we have the time we really should move the sensor drivers to v4l2 sub drivers. */ #include "w996Xcf.c" /* table of the disabled controls */ struct ctrl_valid { unsigned int has_brightness:1; unsigned int has_contrast:1; unsigned int has_exposure:1; unsigned int has_autogain:1; unsigned int has_sat:1; unsigned int has_hvflip:1; unsigned int has_autobright:1; unsigned int has_freq:1; }; static const struct ctrl_valid valid_controls[] = { [SEN_OV2610] = { .has_exposure = 1, .has_autogain = 1, }, [SEN_OV2610AE] = { .has_exposure = 1, .has_autogain = 1, }, [SEN_OV3610] = { /* No controls */ }, [SEN_OV6620] = { .has_brightness = 1, .has_contrast = 1, .has_sat = 1, .has_autobright = 1, .has_freq = 1, }, [SEN_OV6630] = { .has_brightness = 1, .has_contrast = 1, .has_sat = 1, .has_autobright = 1, .has_freq = 1, }, [SEN_OV66308AF] = { .has_brightness = 1, .has_contrast = 1, .has_sat = 1, .has_autobright = 1, .has_freq = 1, }, [SEN_OV7610] = { .has_brightness = 1, .has_contrast = 1, .has_sat = 1, .has_autobright = 1, .has_freq = 1, }, [SEN_OV7620] = { .has_brightness = 1, .has_contrast = 1, .has_sat = 1, .has_autobright = 1, .has_freq = 1, }, [SEN_OV7620AE] = { .has_brightness = 1, .has_contrast = 1, .has_sat = 1, .has_autobright = 1, .has_freq = 1, }, [SEN_OV7640] = { .has_brightness = 1, .has_sat = 1, .has_freq = 1, }, [SEN_OV7648] = { .has_brightness = 1, .has_sat = 1, .has_freq = 1, }, [SEN_OV7660] = { .has_brightness = 1, .has_contrast = 1, .has_sat = 1, .has_hvflip = 1, .has_freq = 1, }, [SEN_OV7670] = { .has_brightness = 1, .has_contrast = 1, .has_hvflip = 1, .has_freq = 1, }, [SEN_OV76BE] = { .has_brightness = 1, .has_contrast = 1, .has_sat = 1, .has_autobright = 1, .has_freq = 1, }, [SEN_OV8610] = { .has_brightness = 1, .has_contrast = 1, .has_sat = 1, .has_autobright = 1, }, [SEN_OV9600] = { .has_exposure = 1, .has_autogain = 1, }, }; static const struct v4l2_pix_format ov519_vga_mode[] = { {320, 240, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE, .bytesperline = 320, .sizeimage = 320 * 240 * 3 / 8 + 590, .colorspace = V4L2_COLORSPACE_JPEG, .priv = 1}, {640, 480, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE, .bytesperline = 640, .sizeimage = 640 * 480 * 3 / 8 + 590, .colorspace = V4L2_COLORSPACE_JPEG, .priv = 0}, }; static const struct v4l2_pix_format ov519_sif_mode[] = { {160, 120, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE, .bytesperline = 160, .sizeimage = 160 * 120 * 3 / 8 + 590, .colorspace = V4L2_COLORSPACE_JPEG, .priv = 3}, {176, 144, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE, .bytesperline = 176, .sizeimage = 176 * 144 * 3 / 8 + 590, .colorspace = V4L2_COLORSPACE_JPEG, .priv = 1}, {320, 240, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE, .bytesperline = 320, .sizeimage = 320 * 240 * 3 / 8 + 590, .colorspace = V4L2_COLORSPACE_JPEG, .priv = 2}, {352, 288, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE, .bytesperline = 352, .sizeimage = 352 * 288 * 3 / 8 + 590, .colorspace = V4L2_COLORSPACE_JPEG, .priv = 0}, }; /* Note some of the sizeimage values for the ov511 / ov518 may seem larger then necessary, however they need to be this big as the ov511 / ov518 always fills the entire isoc frame, using 0 padding bytes when it doesn't have any data. So with low framerates the amount of data transferred can become quite large (libv4l will remove all the 0 padding in userspace). */ static const struct v4l2_pix_format ov518_vga_mode[] = { {320, 240, V4L2_PIX_FMT_OV518, V4L2_FIELD_NONE, .bytesperline = 320, .sizeimage = 320 * 240 * 3, .colorspace = V4L2_COLORSPACE_JPEG, .priv = 1}, {640, 480, V4L2_PIX_FMT_OV518, V4L2_FIELD_NONE, .bytesperline = 640, .sizeimage = 640 * 480 * 2, .colorspace = V4L2_COLORSPACE_JPEG, .priv = 0}, }; static const struct v4l2_pix_format ov518_sif_mode[] = { {160, 120, V4L2_PIX_FMT_OV518, V4L2_FIELD_NONE, .bytesperline = 160, .sizeimage = 70000, .colorspace = V4L2_COLORSPACE_JPEG, .priv = 3}, {176, 144, V4L2_PIX_FMT_OV518, V4L2_FIELD_NONE, .bytesperline = 176, .sizeimage = 70000, .colorspace = V4L2_COLORSPACE_JPEG, .priv = 1}, {320, 240, V4L2_PIX_FMT_OV518, V4L2_FIELD_NONE, .bytesperline = 320, .sizeimage = 320 * 240 * 3, .colorspace = V4L2_COLORSPACE_JPEG, .priv = 2}, {352, 288, V4L2_PIX_FMT_OV518, V4L2_FIELD_NONE, .bytesperline = 352, .sizeimage = 352 * 288 * 3, .colorspace = V4L2_COLORSPACE_JPEG, .priv = 0}, }; static const struct v4l2_pix_format ov511_vga_mode[] = { {320, 240, V4L2_PIX_FMT_OV511, V4L2_FIELD_NONE, .bytesperline = 320, .sizeimage = 320 * 240 * 3, .colorspace = V4L2_COLORSPACE_JPEG, .priv = 1}, {640, 480, V4L2_PIX_FMT_OV511, V4L2_FIELD_NONE, .bytesperline = 640, .sizeimage = 640 * 480 * 2, .colorspace = V4L2_COLORSPACE_JPEG, .priv = 0}, }; static const struct v4l2_pix_format ov511_sif_mode[] = { {160, 120, V4L2_PIX_FMT_OV511, V4L2_FIELD_NONE, .bytesperline = 160, .sizeimage = 70000, .colorspace = V4L2_COLORSPACE_JPEG, .priv = 3}, {176, 144, V4L2_PIX_FMT_OV511, V4L2_FIELD_NONE, .bytesperline = 176, .sizeimage = 70000, .colorspace = V4L2_COLORSPACE_JPEG, .priv = 1}, {320, 240, V4L2_PIX_FMT_OV511, V4L2_FIELD_NONE, .bytesperline = 320, .sizeimage = 320 * 240 * 3, .colorspace = V4L2_COLORSPACE_JPEG, .priv = 2}, {352, 288, V4L2_PIX_FMT_OV511, V4L2_FIELD_NONE, .bytesperline = 352, .sizeimage = 352 * 288 * 3, .colorspace = V4L2_COLORSPACE_JPEG, .priv = 0}, }; static const struct v4l2_pix_format ovfx2_ov2610_mode[] = { {800, 600, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE, .bytesperline = 800, .sizeimage = 800 * 600, .colorspace = V4L2_COLORSPACE_SRGB, .priv = 1}, {1600, 1200, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE, .bytesperline = 1600, .sizeimage = 1600 * 1200, .colorspace = V4L2_COLORSPACE_SRGB}, }; static const struct v4l2_pix_format ovfx2_ov3610_mode[] = { {640, 480, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE, .bytesperline = 640, .sizeimage = 640 * 480, .colorspace = V4L2_COLORSPACE_SRGB, .priv = 1}, {800, 600, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE, .bytesperline = 800, .sizeimage = 800 * 600, .colorspace = V4L2_COLORSPACE_SRGB, .priv = 1}, {1024, 768, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE, .bytesperline = 1024, .sizeimage = 1024 * 768, .colorspace = V4L2_COLORSPACE_SRGB, .priv = 1}, {1600, 1200, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE, .bytesperline = 1600, .sizeimage = 1600 * 1200, .colorspace = V4L2_COLORSPACE_SRGB, .priv = 0}, {2048, 1536, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE, .bytesperline = 2048, .sizeimage = 2048 * 1536, .colorspace = V4L2_COLORSPACE_SRGB, .priv = 0}, }; static const struct v4l2_pix_format ovfx2_ov9600_mode[] = { {640, 480, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE, .bytesperline = 640, .sizeimage = 640 * 480, .colorspace = V4L2_COLORSPACE_SRGB, .priv = 1}, {1280, 1024, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE, .bytesperline = 1280, .sizeimage = 1280 * 1024, .colorspace = V4L2_COLORSPACE_SRGB}, }; /* Registers common to OV511 / OV518 */ #define R51x_FIFO_PSIZE 0x30 /* 2 bytes wide w/ OV518(+) */ #define R51x_SYS_RESET 0x50 /* Reset type flags */ #define OV511_RESET_OMNICE 0x08 #define R51x_SYS_INIT 0x53 #define R51x_SYS_SNAP 0x52 #define R51x_SYS_CUST_ID 0x5f #define R51x_COMP_LUT_BEGIN 0x80 /* OV511 Camera interface register numbers */ #define R511_CAM_DELAY 0x10 #define R511_CAM_EDGE 0x11 #define R511_CAM_PXCNT 0x12 #define R511_CAM_LNCNT 0x13 #define R511_CAM_PXDIV 0x14 #define R511_CAM_LNDIV 0x15 #define R511_CAM_UV_EN 0x16 #define R511_CAM_LINE_MODE 0x17 #define R511_CAM_OPTS 0x18 #define R511_SNAP_FRAME 0x19 #define R511_SNAP_PXCNT 0x1a #define R511_SNAP_LNCNT 0x1b #define R511_SNAP_PXDIV 0x1c #define R511_SNAP_LNDIV 0x1d #define R511_SNAP_UV_EN 0x1e #define R511_SNAP_OPTS 0x1f #define R511_DRAM_FLOW_CTL 0x20 #define R511_FIFO_OPTS 0x31 #define R511_I2C_CTL 0x40 #define R511_SYS_LED_CTL 0x55 /* OV511+ only */ #define R511_COMP_EN 0x78 #define R511_COMP_LUT_EN 0x79 /* OV518 Camera interface register numbers */ #define R518_GPIO_OUT 0x56 /* OV518(+) only */ #define R518_GPIO_CTL 0x57 /* OV518(+) only */ /* OV519 Camera interface register numbers */ #define OV519_R10_H_SIZE 0x10 #define OV519_R11_V_SIZE 0x11 #define OV519_R12_X_OFFSETL 0x12 #define OV519_R13_X_OFFSETH 0x13 #define OV519_R14_Y_OFFSETL 0x14 #define OV519_R15_Y_OFFSETH 0x15 #define OV519_R16_DIVIDER 0x16 #define OV519_R20_DFR 0x20 #define OV519_R25_FORMAT 0x25 /* OV519 System Controller register numbers */ #define OV519_R51_RESET1 0x51 #define OV519_R54_EN_CLK1 0x54 #define OV519_R57_SNAPSHOT 0x57 #define OV519_GPIO_DATA_OUT0 0x71 #define OV519_GPIO_IO_CTRL0 0x72 /*#define OV511_ENDPOINT_ADDRESS 1 * Isoc endpoint number */ /* * The FX2 chip does not give us a zero length read at end of frame. * It does, however, give a short read at the end of a frame, if * necessary, rather than run two frames together. * * By choosing the right bulk transfer size, we are guaranteed to always * get a short read for the last read of each frame. Frame sizes are * always a composite number (width * height, or a multiple) so if we * choose a prime number, we are guaranteed that the last read of a * frame will be short. * * But it isn't that easy: the 2.6 kernel requires a multiple of 4KB, * otherwise EOVERFLOW "babbling" errors occur. I have not been able * to figure out why. [PMiller] * * The constant (13 * 4096) is the largest "prime enough" number less than 64KB. * * It isn't enough to know the number of bytes per frame, in case we * have data dropouts or buffer overruns (even though the FX2 double * buffers, there are some pretty strict real time constraints for * isochronous transfer for larger frame sizes). */ /*jfm: this value does not work for 800x600 - see isoc_init */ #define OVFX2_BULK_SIZE (13 * 4096) /* I2C registers */ #define R51x_I2C_W_SID 0x41 #define R51x_I2C_SADDR_3 0x42 #define R51x_I2C_SADDR_2 0x43 #define R51x_I2C_R_SID 0x44 #define R51x_I2C_DATA 0x45 #define R518_I2C_CTL 0x47 /* OV518(+) only */ #define OVFX2_I2C_ADDR 0x00 /* I2C ADDRESSES */ #define OV7xx0_SID 0x42 #define OV_HIRES_SID 0x60 /* OV9xxx / OV2xxx / OV3xxx */ #define OV8xx0_SID 0xa0 #define OV6xx0_SID 0xc0 /* OV7610 registers */ #define OV7610_REG_GAIN 0x00 /* gain setting (5:0) */ #define OV7610_REG_BLUE 0x01 /* blue channel balance */ #define OV7610_REG_RED 0x02 /* red channel balance */ #define OV7610_REG_SAT 0x03 /* saturation */ #define OV8610_REG_HUE 0x04 /* 04 reserved */ #define OV7610_REG_CNT 0x05 /* Y contrast */ #define OV7610_REG_BRT 0x06 /* Y brightness */ #define OV7610_REG_COM_C 0x14 /* misc common regs */ #define OV7610_REG_ID_HIGH 0x1c /* manufacturer ID MSB */ #define OV7610_REG_ID_LOW 0x1d /* manufacturer ID LSB */ #define OV7610_REG_COM_I 0x29 /* misc settings */ /* OV7660 and OV7670 registers */ #define OV7670_R00_GAIN 0x00 /* Gain lower 8 bits (rest in vref) */ #define OV7670_R01_BLUE 0x01 /* blue gain */ #define OV7670_R02_RED 0x02 /* red gain */ #define OV7670_R03_VREF 0x03 /* Pieces of GAIN, VSTART, VSTOP */ #define OV7670_R04_COM1 0x04 /* Control 1 */ /*#define OV7670_R07_AECHH 0x07 * AEC MS 5 bits */ #define OV7670_R0C_COM3 0x0c /* Control 3 */ #define OV7670_R0D_COM4 0x0d /* Control 4 */ #define OV7670_R0E_COM5 0x0e /* All "reserved" */ #define OV7670_R0F_COM6 0x0f /* Control 6 */ #define OV7670_R10_AECH 0x10 /* More bits of AEC value */ #define OV7670_R11_CLKRC 0x11 /* Clock control */ #define OV7670_R12_COM7 0x12 /* Control 7 */ #define OV7670_COM7_FMT_VGA 0x00 /*#define OV7670_COM7_YUV 0x00 * YUV */ #define OV7670_COM7_FMT_QVGA 0x10 /* QVGA format */ #define OV7670_COM7_FMT_MASK 0x38 #define OV7670_COM7_RESET 0x80 /* Register reset */ #define OV7670_R13_COM8 0x13 /* Control 8 */ #define OV7670_COM8_AEC 0x01 /* Auto exposure enable */ #define OV7670_COM8_AWB 0x02 /* White balance enable */ #define OV7670_COM8_AGC 0x04 /* Auto gain enable */ #define OV7670_COM8_BFILT 0x20 /* Band filter enable */ #define OV7670_COM8_AECSTEP 0x40 /* Unlimited AEC step size */ #define OV7670_COM8_FASTAEC 0x80 /* Enable fast AGC/AEC */ #define OV7670_R14_COM9 0x14 /* Control 9 - gain ceiling */ #define OV7670_R15_COM10 0x15 /* Control 10 */ #define OV7670_R17_HSTART 0x17 /* Horiz start high bits */ #define OV7670_R18_HSTOP 0x18 /* Horiz stop high bits */ #define OV7670_R19_VSTART 0x19 /* Vert start high bits */ #define OV7670_R1A_VSTOP 0x1a /* Vert stop high bits */ #define OV7670_R1E_MVFP 0x1e /* Mirror / vflip */ #define OV7670_MVFP_VFLIP 0x10 /* vertical flip */ #define OV7670_MVFP_MIRROR 0x20 /* Mirror image */ #define OV7670_R24_AEW 0x24 /* AGC upper limit */ #define OV7670_R25_AEB 0x25 /* AGC lower limit */ #define OV7670_R26_VPT 0x26 /* AGC/AEC fast mode op region */ #define OV7670_R32_HREF 0x32 /* HREF pieces */ #define OV7670_R3A_TSLB 0x3a /* lots of stuff */ #define OV7670_R3B_COM11 0x3b /* Control 11 */ #define OV7670_COM11_EXP 0x02 #define OV7670_COM11_HZAUTO 0x10 /* Auto detect 50/60 Hz */ #define OV7670_R3C_COM12 0x3c /* Control 12 */ #define OV7670_R3D_COM13 0x3d /* Control 13 */ #define OV7670_COM13_GAMMA 0x80 /* Gamma enable */ #define OV7670_COM13_UVSAT 0x40 /* UV saturation auto adjustment */ #define OV7670_R3E_COM14 0x3e /* Control 14 */ #define OV7670_R3F_EDGE 0x3f /* Edge enhancement factor */ #define OV7670_R40_COM15 0x40 /* Control 15 */ /*#define OV7670_COM15_R00FF 0xc0 * 00 to FF */ #define OV7670_R41_COM16 0x41 /* Control 16 */ #define OV7670_COM16_AWBGAIN 0x08 /* AWB gain enable */ /* end of ov7660 common registers */ #define OV7670_R55_BRIGHT 0x55 /* Brightness */ #define OV7670_R56_CONTRAS 0x56 /* Contrast control */ #define OV7670_R69_GFIX 0x69 /* Fix gain control */ /*#define OV7670_R8C_RGB444 0x8c * RGB 444 control */ #define OV7670_R9F_HAECC1 0x9f /* Hist AEC/AGC control 1 */ #define OV7670_RA0_HAECC2 0xa0 /* Hist AEC/AGC control 2 */ #define OV7670_RA5_BD50MAX 0xa5 /* 50hz banding step limit */ #define OV7670_RA6_HAECC3 0xa6 /* Hist AEC/AGC control 3 */ #define OV7670_RA7_HAECC4 0xa7 /* Hist AEC/AGC control 4 */ #define OV7670_RA8_HAECC5 0xa8 /* Hist AEC/AGC control 5 */ #define OV7670_RA9_HAECC6 0xa9 /* Hist AEC/AGC control 6 */ #define OV7670_RAA_HAECC7 0xaa /* Hist AEC/AGC control 7 */ #define OV7670_RAB_BD60MAX 0xab /* 60hz banding step limit */ struct ov_regvals { u8 reg; u8 val; }; struct ov_i2c_regvals { u8 reg; u8 val; }; /* Settings for OV2610 camera chip */ static const struct ov_i2c_regvals norm_2610[] = { { 0x12, 0x80 }, /* reset */ }; static const struct ov_i2c_regvals norm_2610ae[] = { {0x12, 0x80}, /* reset */ {0x13, 0xcd}, {0x09, 0x01}, {0x0d, 0x00}, {0x11, 0x80}, {0x12, 0x20}, /* 1600x1200 */ {0x33, 0x0c}, {0x35, 0x90}, {0x36, 0x37}, /* ms-win traces */ {0x11, 0x83}, /* clock / 3 ? */ {0x2d, 0x00}, /* 60 Hz filter */ {0x24, 0xb0}, /* normal colors */ {0x25, 0x90}, {0x10, 0x43}, }; static const struct ov_i2c_regvals norm_3620b[] = { /* * From the datasheet: "Note that after writing to register COMH * (0x12) to change the sensor mode, registers related to the * sensor's cropping window will be reset back to their default * values." * * "wait 4096 external clock ... to make sure the sensor is * stable and ready to access registers" i.e. 160us at 24MHz */ { 0x12, 0x80 }, /* COMH reset */ { 0x12, 0x00 }, /* QXGA, master */ /* * 11 CLKRC "Clock Rate Control" * [7] internal frequency doublers: on * [6] video port mode: master * [5:0] clock divider: 1 */ { 0x11, 0x80 }, /* * 13 COMI "Common Control I" * = 192 (0xC0) 11000000 * COMI[7] "AEC speed selection" * = 1 (0x01) 1....... "Faster AEC correction" * COMI[6] "AEC speed step selection" * = 1 (0x01) .1...... "Big steps, fast" * COMI[5] "Banding filter on off" * = 0 (0x00) ..0..... "Off" * COMI[4] "Banding filter option" * = 0 (0x00) ...0.... "Main clock is 48 MHz and * the PLL is ON" * COMI[3] "Reserved" * = 0 (0x00) ....0... * COMI[2] "AGC auto manual control selection" * = 0 (0x00) .....0.. "Manual" * COMI[1] "AWB auto manual control selection" * = 0 (0x00) ......0. "Manual" * COMI[0] "Exposure control" * = 0 (0x00) .......0 "Manual" */ { 0x13, 0xc0 }, /* * 09 COMC "Common Control C" * = 8 (0x08) 00001000 * COMC[7:5] "Reserved" * = 0 (0x00) 000..... * COMC[4] "Sleep Mode Enable" * = 0 (0x00) ...0.... "Normal mode" * COMC[3:2] "Sensor sampling reset timing selection" * = 2 (0x02) ....10.. "Longer reset time" * COMC[1:0] "Output drive current select" * = 0 (0x00) ......00 "Weakest" */ { 0x09, 0x08 }, /* * 0C COMD "Common Control D" * = 8 (0x08) 00001000 * COMD[7] "Reserved" * = 0 (0x00) 0....... * COMD[6] "Swap MSB and LSB at the output port" * = 0 (0x00) .0...... "False" * COMD[5:3] "Reserved" * = 1 (0x01) ..001... * COMD[2] "Output Average On Off" * = 0 (0x00) .....0.. "Output Normal" * COMD[1] "Sensor precharge voltage selection" * = 0 (0x00) ......0. "Selects internal * reference precharge * voltage" * COMD[0] "Snapshot option" * = 0 (0x00) .......0 "Enable live video output * after snapshot sequence" */ { 0x0c, 0x08 }, /* * 0D COME "Common Control E" * = 161 (0xA1) 10100001 * COME[7] "Output average option" * = 1 (0x01) 1....... "Output average of 4 pixels" * COME[6] "Anti-blooming control" * = 0 (0x00) .0...... "Off" * COME[5:3] "Reserved" * = 4 (0x04) ..100... * COME[2] "Clock output power down pin status" * = 0 (0x00) .....0.. "Tri-state data output pin * on power down" * COME[1] "Data output pin status selection at power down" * = 0 (0x00) ......0. "Tri-state VSYNC, PCLK, * HREF, and CHSYNC pins on * power down" * COME[0] "Auto zero circuit select" * = 1 (0x01) .......1 "On" */ { 0x0d, 0xa1 }, /* * 0E COMF "Common Control F" * = 112 (0x70) 01110000 * COMF[7] "System clock selection" * = 0 (0x00) 0....... "Use 24 MHz system clock" * COMF[6:4] "Reserved" * = 7 (0x07) .111.... * COMF[3] "Manual auto negative offset canceling selection" * = 0 (0x00) ....0... "Auto detect negative * offset and cancel it" * COMF[2:0] "Reserved" * = 0 (0x00) .....000 */ { 0x0e, 0x70 }, /* * 0F COMG "Common Control G" * = 66 (0x42) 01000010 * COMG[7] "Optical black output selection" * = 0 (0x00) 0....... "Disable" * COMG[6] "Black level calibrate selection" * = 1 (0x01) .1...... "Use optical black pixels * to calibrate" * COMG[5:4] "Reserved" * = 0 (0x00) ..00.... * COMG[3] "Channel offset adjustment" * = 0 (0x00) ....0... "Disable offset adjustment" * COMG[2] "ADC black level calibration option" * = 0 (0x00) .....0.. "Use B/G line and G/R * line to calibrate each * channel's black level" * COMG[1] "Reserved" * = 1 (0x01) ......1. * COMG[0] "ADC black level calibration enable" * = 0 (0x00) .......0 "Disable" */ { 0x0f, 0x42 }, /* * 14 COMJ "Common Control J" * = 198 (0xC6) 11000110 * COMJ[7:6] "AGC gain ceiling" * = 3 (0x03) 11...... "8x" * COMJ[5:4] "Reserved" * = 0 (0x00) ..00.... * COMJ[3] "Auto banding filter" * = 0 (0x00) ....0... "Banding filter is always * on off depending on * COMI[5] setting" * COMJ[2] "VSYNC drop option" * = 1 (0x01) .....1.. "SYNC is dropped if frame * data is dropped" * COMJ[1] "Frame data drop" * = 1 (0x01) ......1. "Drop frame data if * exposure is not within * tolerance. In AEC mode, * data is normally dropped * when data is out of * range." * COMJ[0] "Reserved" * = 0 (0x00) .......0 */ { 0x14, 0xc6 }, /* * 15 COMK "Common Control K" * = 2 (0x02) 00000010 * COMK[7] "CHSYNC pin output swap" * = 0 (0x00) 0....... "CHSYNC" * COMK[6] "HREF pin output swap" * = 0 (0x00) .0...... "HREF" * COMK[5] "PCLK output selection" * = 0 (0x00) ..0..... "PCLK always output" * COMK[4] "PCLK edge selection" * = 0 (0x00) ...0.... "Data valid on falling edge" * COMK[3] "HREF output polarity" * = 0 (0x00) ....0... "positive" * COMK[2] "Reserved" * = 0 (0x00) .....0.. * COMK[1] "VSYNC polarity" * = 1 (0x01) ......1. "negative" * COMK[0] "HSYNC polarity" * = 0 (0x00) .......0 "positive" */ { 0x15, 0x02 }, /* * 33 CHLF "Current Control" * = 9 (0x09) 00001001 * CHLF[7:6] "Sensor current control" * = 0 (0x00) 00...... * CHLF[5] "Sensor current range control" * = 0 (0x00) ..0..... "normal range" * CHLF[4] "Sensor current" * = 0 (0x00) ...0.... "normal current" * CHLF[3] "Sensor buffer current control" * = 1 (0x01) ....1... "half current" * CHLF[2] "Column buffer current control" * = 0 (0x00) .....0.. "normal current" * CHLF[1] "Analog DSP current control" * = 0 (0x00) ......0. "normal current" * CHLF[1] "ADC current control" * = 0 (0x00) ......0. "normal current" */ { 0x33, 0x09 }, /* * 34 VBLM "Blooming Control" * = 80 (0x50) 01010000 * VBLM[7] "Hard soft reset switch" * = 0 (0x00) 0....... "Hard reset" * VBLM[6:4] "Blooming voltage selection" * = 5 (0x05) .101.... * VBLM[3:0] "Sensor current control" * = 0 (0x00) ....0000 */ { 0x34, 0x50 }, /* * 36 VCHG "Sensor Precharge Voltage Control" * = 0 (0x00) 00000000 * VCHG[7] "Reserved" * = 0 (0x00) 0....... * VCHG[6:4] "Sensor precharge voltage control" * = 0 (0x00) .000.... * VCHG[3:0] "Sensor array common reference" * = 0 (0x00) ....0000 */ { 0x36, 0x00 }, /* * 37 ADC "ADC Reference Control" * = 4 (0x04) 00000100 * ADC[7:4] "Reserved" * = 0 (0x00) 0000.... * ADC[3] "ADC input signal range" * = 0 (0x00) ....0... "Input signal 1.0x" * ADC[2:0] "ADC range control" * = 4 (0x04) .....100 */ { 0x37, 0x04 }, /* * 38 ACOM "Analog Common Ground" * = 82 (0x52) 01010010 * ACOM[7] "Analog gain control" * = 0 (0x00) 0....... "Gain 1x" * ACOM[6] "Analog black level calibration" * = 1 (0x01) .1...... "On" * ACOM[5:0] "Reserved" * = 18 (0x12) ..010010 */ { 0x38, 0x52 }, /* * 3A FREFA "Internal Reference Adjustment" * = 0 (0x00) 00000000 * FREFA[7:0] "Range" * = 0 (0x00) 00000000 */ { 0x3a, 0x00 }, /* * 3C FVOPT "Internal Reference Adjustment" * = 31 (0x1F) 00011111 * FVOPT[7:0] "Range" * = 31 (0x1F) 00011111 */ { 0x3c, 0x1f }, /* * 44 Undocumented = 0 (0x00) 00000000 * 44[7:0] "It's a secret" * = 0 (0x00) 00000000 */ { 0x44, 0x00 }, /* * 40 Undocumented = 0 (0x00) 00000000 * 40[7:0] "It's a secret" * = 0 (0x00) 00000000 */ { 0x40, 0x00 }, /* * 41 Undocumented = 0 (0x00) 00000000 * 41[7:0] "It's a secret" * = 0 (0x00) 00000000 */ { 0x41, 0x00 }, /* * 42 Undocumented = 0 (0x00) 00000000 * 42[7:0] "It's a secret" * = 0 (0x00) 00000000 */ { 0x42, 0x00 }, /* * 43 Undocumented = 0 (0x00) 00000000 * 43[7:0] "It's a secret" * = 0 (0x00) 00000000 */ { 0x43, 0x00 }, /* * 45 Undocumented = 128 (0x80) 10000000 * 45[7:0] "It's a secret" * = 128 (0x80) 10000000 */ { 0x45, 0x80 }, /* * 48 Undocumented = 192 (0xC0) 11000000 * 48[7:0] "It's a secret" * = 192 (0xC0) 11000000 */ { 0x48, 0xc0 }, /* * 49 Undocumented = 25 (0x19) 00011001 * 49[7:0] "It's a secret" * = 25 (0x19) 00011001 */ { 0x49, 0x19 }, /* * 4B Undocumented = 128 (0x80) 10000000 * 4B[7:0] "It's a secret" * = 128 (0x80) 10000000 */ { 0x4b, 0x80 }, /* * 4D Undocumented = 196 (0xC4) 11000100 * 4D[7:0] "It's a secret" * = 196 (0xC4) 11000100 */ { 0x4d, 0xc4 }, /* * 35 VREF "Reference Voltage Control" * = 76 (0x4c) 01001100 * VREF[7:5] "Column high reference control" * = 2 (0x02) 010..... "higher voltage" * VREF[4:2] "Column low reference control" * = 3 (0x03) ...011.. "Highest voltage" * VREF[1:0] "Reserved" * = 0 (0x00) ......00 */ { 0x35, 0x4c }, /* * 3D Undocumented = 0 (0x00) 00000000 * 3D[7:0] "It's a secret" * = 0 (0x00) 00000000 */ { 0x3d, 0x00 }, /* * 3E Undocumented = 0 (0x00) 00000000 * 3E[7:0] "It's a secret" * = 0 (0x00) 00000000 */ { 0x3e, 0x00 }, /* * 3B FREFB "Internal Reference Adjustment" * = 24 (0x18) 00011000 * FREFB[7:0] "Range" * = 24 (0x18) 00011000 */ { 0x3b, 0x18 }, /* * 33 CHLF "Current Control" * = 25 (0x19) 00011001 * CHLF[7:6] "Sensor current control" * = 0 (0x00) 00...... * CHLF[5] "Sensor current range control" * = 0 (0x00) ..0..... "normal range" * CHLF[4] "Sensor current" * = 1 (0x01) ...1.... "double current" * CHLF[3] "Sensor buffer current control" * = 1 (0x01) ....1... "half current" * CHLF[2] "Column buffer current control" * = 0 (0x00) .....0.. "normal current" * CHLF[1] "Analog DSP current control" * = 0 (0x00) ......0. "normal current" * CHLF[1] "ADC current control" * = 0 (0x00) ......0. "normal current" */ { 0x33, 0x19 }, /* * 34 VBLM "Blooming Control" * = 90 (0x5A) 01011010 * VBLM[7] "Hard soft reset switch" * = 0 (0x00) 0....... "Hard reset" * VBLM[6:4] "Blooming voltage selection" * = 5 (0x05) .101.... * VBLM[3:0] "Sensor current control" * = 10 (0x0A) ....1010 */ { 0x34, 0x5a }, /* * 3B FREFB "Internal Reference Adjustment" * = 0 (0x00) 00000000 * FREFB[7:0] "Range" * = 0 (0x00) 00000000 */ { 0x3b, 0x00 }, /* * 33 CHLF "Current Control" * = 9 (0x09) 00001001 * CHLF[7:6] "Sensor current control" * = 0 (0x00) 00...... * CHLF[5] "Sensor current range control" * = 0 (0x00) ..0..... "normal range" * CHLF[4] "Sensor current" * = 0 (0x00) ...0.... "normal current" * CHLF[3] "Sensor buffer current control" * = 1 (0x01) ....1... "half current" * CHLF[2] "Column buffer current control" * = 0 (0x00) .....0.. "normal current" * CHLF[1] "Analog DSP current control" * = 0 (0x00) ......0. "normal current" * CHLF[1] "ADC current control" * = 0 (0x00) ......0. "normal current" */ { 0x33, 0x09 }, /* * 34 VBLM "Blooming Control" * = 80 (0x50) 01010000 * VBLM[7] "Hard soft reset switch" * = 0 (0x00) 0....... "Hard reset" * VBLM[6:4] "Blooming voltage selection" * = 5 (0x05) .101.... * VBLM[3:0] "Sensor current control" * = 0 (0x00) ....0000 */ { 0x34, 0x50 }, /* * 12 COMH "Common Control H" * = 64 (0x40) 01000000 * COMH[7] "SRST" * = 0 (0x00) 0....... "No-op" * COMH[6:4] "Resolution selection" * = 4 (0x04) .100.... "XGA" * COMH[3] "Master slave selection" * = 0 (0x00) ....0... "Master mode" * COMH[2] "Internal B/R channel option" * = 0 (0x00) .....0.. "B/R use same channel" * COMH[1] "Color bar test pattern" * = 0 (0x00) ......0. "Off" * COMH[0] "Reserved" * = 0 (0x00) .......0 */ { 0x12, 0x40 }, /* * 17 HREFST "Horizontal window start" * = 31 (0x1F) 00011111 * HREFST[7:0] "Horizontal window start, 8 MSBs" * = 31 (0x1F) 00011111 */ { 0x17, 0x1f }, /* * 18 HREFEND "Horizontal window end" * = 95 (0x5F) 01011111 * HREFEND[7:0] "Horizontal Window End, 8 MSBs" * = 95 (0x5F) 01011111 */ { 0x18, 0x5f }, /* * 19 VSTRT "Vertical window start" * = 0 (0x00) 00000000 * VSTRT[7:0] "Vertical Window Start, 8 MSBs" * = 0 (0x00) 00000000 */ { 0x19, 0x00 }, /* * 1A VEND "Vertical window end" * = 96 (0x60) 01100000 * VEND[7:0] "Vertical Window End, 8 MSBs" * = 96 (0x60) 01100000 */ { 0x1a, 0x60 }, /* * 32 COMM "Common Control M" * = 18 (0x12) 00010010 * COMM[7:6] "Pixel clock divide option" * = 0 (0x00) 00...... "/1" * COMM[5:3] "Horizontal window end position, 3 LSBs" * = 2 (0x02) ..010... * COMM[2:0] "Horizontal window start position, 3 LSBs" * = 2 (0x02) .....010 */ { 0x32, 0x12 }, /* * 03 COMA "Common Control A" * = 74 (0x4A) 01001010 * COMA[7:4] "AWB Update Threshold" * = 4 (0x04) 0100.... * COMA[3:2] "Vertical window end line control 2 LSBs" * = 2 (0x02) ....10.. * COMA[1:0] "Vertical window start line control 2 LSBs" * = 2 (0x02) ......10 */ { 0x03, 0x4a }, /* * 11 CLKRC "Clock Rate Control" * = 128 (0x80) 10000000 * CLKRC[7] "Internal frequency doublers on off seclection" * = 1 (0x01) 1....... "On" * CLKRC[6] "Digital video master slave selection" * = 0 (0x00) .0...... "Master mode, sensor * provides PCLK" * CLKRC[5:0] "Clock divider { CLK = PCLK/(1+CLKRC[5:0]) }" * = 0 (0x00) ..000000 */ { 0x11, 0x80 }, /* * 12 COMH "Common Control H" * = 0 (0x00) 00000000 * COMH[7] "SRST" * = 0 (0x00) 0....... "No-op" * COMH[6:4] "Resolution selection" * = 0 (0x00) .000.... "QXGA" * COMH[3] "Master slave selection" * = 0 (0x00) ....0... "Master mode" * COMH[2] "Internal B/R channel option" * = 0 (0x00) .....0.. "B/R use same channel" * COMH[1] "Color bar test pattern" * = 0 (0x00) ......0. "Off" * COMH[0] "Reserved" * = 0 (0x00) .......0 */ { 0x12, 0x00 }, /* * 12 COMH "Common Control H" * = 64 (0x40) 01000000 * COMH[7] "SRST" * = 0 (0x00) 0....... "No-op" * COMH[6:4] "Resolution selection" * = 4 (0x04) .100.... "XGA" * COMH[3] "Master slave selection" * = 0 (0x00) ....0... "Master mode" * COMH[2] "Internal B/R channel option" * = 0 (0x00) .....0.. "B/R use same channel" * COMH[1] "Color bar test pattern" * = 0 (0x00) ......0. "Off" * COMH[0] "Reserved" * = 0 (0x00) .......0 */ { 0x12, 0x40 }, /* * 17 HREFST "Horizontal window start" * = 31 (0x1F) 00011111 * HREFST[7:0] "Horizontal window start, 8 MSBs" * = 31 (0x1F) 00011111 */ { 0x17, 0x1f }, /* * 18 HREFEND "Horizontal window end" * = 95 (0x5F) 01011111 * HREFEND[7:0] "Horizontal Window End, 8 MSBs" * = 95 (0x5F) 01011111 */ { 0x18, 0x5f }, /* * 19 VSTRT "Vertical window start" * = 0 (0x00) 00000000 * VSTRT[7:0] "Vertical Window Start, 8 MSBs" * = 0 (0x00) 00000000 */ { 0x19, 0x00 }, /* * 1A VEND "Vertical window end" * = 96 (0x60) 01100000 * VEND[7:0] "Vertical Window End, 8 MSBs" * = 96 (0x60) 01100000 */ { 0x1a, 0x60 }, /* * 32 COMM "Common Control M" * = 18 (0x12) 00010010 * COMM[7:6] "Pixel clock divide option" * = 0 (0x00) 00...... "/1" * COMM[5:3] "Horizontal window end position, 3 LSBs" * = 2 (0x02) ..010... * COMM[2:0] "Horizontal window start position, 3 LSBs" * = 2 (0x02) .....010 */ { 0x32, 0x12 }, /* * 03 COMA "Common Control A" * = 74 (0x4A) 01001010 * COMA[7:4] "AWB Update Threshold" * = 4 (0x04) 0100.... * COMA[3:2] "Vertical window end line control 2 LSBs" * = 2 (0x02) ....10.. * COMA[1:0] "Vertical window start line control 2 LSBs" * = 2 (0x02) ......10 */ { 0x03, 0x4a }, /* * 02 RED "Red Gain Control" * = 175 (0xAF) 10101111 * RED[7] "Action" * = 1 (0x01) 1....... "gain = 1/(1+bitrev([6:0]))" * RED[6:0] "Value" * = 47 (0x2F) .0101111 */ { 0x02, 0xaf }, /* * 2D ADDVSL "VSYNC Pulse Width" * = 210 (0xD2) 11010010 * ADDVSL[7:0] "VSYNC pulse width, LSB" * = 210 (0xD2) 11010010 */ { 0x2d, 0xd2 }, /* * 00 GAIN = 24 (0x18) 00011000 * GAIN[7:6] "Reserved" * = 0 (0x00) 00...... * GAIN[5] "Double" * = 0 (0x00) ..0..... "False" * GAIN[4] "Double" * = 1 (0x01) ...1.... "True" * GAIN[3:0] "Range" * = 8 (0x08) ....1000 */ { 0x00, 0x18 }, /* * 01 BLUE "Blue Gain Control" * = 240 (0xF0) 11110000 * BLUE[7] "Action" * = 1 (0x01) 1....... "gain = 1/(1+bitrev([6:0]))" * BLUE[6:0] "Value" * = 112 (0x70) .1110000 */ { 0x01, 0xf0 }, /* * 10 AEC "Automatic Exposure Control" * = 10 (0x0A) 00001010 * AEC[7:0] "Automatic Exposure Control, 8 MSBs" * = 10 (0x0A) 00001010 */ { 0x10, 0x0a }, { 0xe1, 0x67 }, { 0xe3, 0x03 }, { 0xe4, 0x26 }, { 0xe5, 0x3e }, { 0xf8, 0x01 }, { 0xff, 0x01 }, }; static const struct ov_i2c_regvals norm_6x20[] = { { 0x12, 0x80 }, /* reset */ { 0x11, 0x01 }, { 0x03, 0x60 }, { 0x05, 0x7f }, /* For when autoadjust is off */ { 0x07, 0xa8 }, /* The ratio of 0x0c and 0x0d controls the white point */ { 0x0c, 0x24 }, { 0x0d, 0x24 }, { 0x0f, 0x15 }, /* COMS */ { 0x10, 0x75 }, /* AEC Exposure time */ { 0x12, 0x24 }, /* Enable AGC */ { 0x14, 0x04 }, /* 0x16: 0x06 helps frame stability with moving objects */ { 0x16, 0x06 }, /* { 0x20, 0x30 }, * Aperture correction enable */ { 0x26, 0xb2 }, /* BLC enable */ /* 0x28: 0x05 Selects RGB format if RGB on */ { 0x28, 0x05 }, { 0x2a, 0x04 }, /* Disable framerate adjust */ /* { 0x2b, 0xac }, * Framerate; Set 2a[7] first */ { 0x2d, 0x85 }, { 0x33, 0xa0 }, /* Color Processing Parameter */ { 0x34, 0xd2 }, /* Max A/D range */ { 0x38, 0x8b }, { 0x39, 0x40 }, { 0x3c, 0x39 }, /* Enable AEC mode changing */ { 0x3c, 0x3c }, /* Change AEC mode */ { 0x3c, 0x24 }, /* Disable AEC mode changing */ { 0x3d, 0x80 }, /* These next two registers (0x4a, 0x4b) are undocumented. * They control the color balance */ { 0x4a, 0x80 }, { 0x4b, 0x80 }, { 0x4d, 0xd2 }, /* This reduces noise a bit */ { 0x4e, 0xc1 }, { 0x4f, 0x04 }, /* Do 50-53 have any effect? */ /* Toggle 0x12[2] off and on here? */ }; static const struct ov_i2c_regvals norm_6x30[] = { { 0x12, 0x80 }, /* Reset */ { 0x00, 0x1f }, /* Gain */ { 0x01, 0x99 }, /* Blue gain */ { 0x02, 0x7c }, /* Red gain */ { 0x03, 0xc0 }, /* Saturation */ { 0x05, 0x0a }, /* Contrast */ { 0x06, 0x95 }, /* Brightness */ { 0x07, 0x2d }, /* Sharpness */ { 0x0c, 0x20 }, { 0x0d, 0x20 }, { 0x0e, 0xa0 }, /* Was 0x20, bit7 enables a 2x gain which we need */ { 0x0f, 0x05 }, { 0x10, 0x9a }, { 0x11, 0x00 }, /* Pixel clock = fastest */ { 0x12, 0x24 }, /* Enable AGC and AWB */ { 0x13, 0x21 }, { 0x14, 0x80 }, { 0x15, 0x01 }, { 0x16, 0x03 }, { 0x17, 0x38 }, { 0x18, 0xea }, { 0x19, 0x04 }, { 0x1a, 0x93 }, { 0x1b, 0x00 }, { 0x1e, 0xc4 }, { 0x1f, 0x04 }, { 0x20, 0x20 }, { 0x21, 0x10 }, { 0x22, 0x88 }, { 0x23, 0xc0 }, /* Crystal circuit power level */ { 0x25, 0x9a }, /* Increase AEC black ratio */ { 0x26, 0xb2 }, /* BLC enable */ { 0x27, 0xa2 }, { 0x28, 0x00 }, { 0x29, 0x00 }, { 0x2a, 0x84 }, /* 60 Hz power */ { 0x2b, 0xa8 }, /* 60 Hz power */ { 0x2c, 0xa0 }, { 0x2d, 0x95 }, /* Enable auto-brightness */ { 0x2e, 0x88 }, { 0x33, 0x26 }, { 0x34, 0x03 }, { 0x36, 0x8f }, { 0x37, 0x80 }, { 0x38, 0x83 }, { 0x39, 0x80 }, { 0x3a, 0x0f }, { 0x3b, 0x3c }, { 0x3c, 0x1a }, { 0x3d, 0x80 }, { 0x3e, 0x80 }, { 0x3f, 0x0e }, { 0x40, 0x00 }, /* White bal */ { 0x41, 0x00 }, /* White bal */ { 0x42, 0x80 }, { 0x43, 0x3f }, /* White bal */ { 0x44, 0x80 }, { 0x45, 0x20 }, { 0x46, 0x20 }, { 0x47, 0x80 }, { 0x48, 0x7f }, { 0x49, 0x00 }, { 0x4a, 0x00 }, { 0x4b, 0x80 }, { 0x4c, 0xd0 }, { 0x4d, 0x10 }, /* U = 0.563u, V = 0.714v */ { 0x4e, 0x40 }, { 0x4f, 0x07 }, /* UV avg., col. killer: max */ { 0x50, 0xff }, { 0x54, 0x23 }, /* Max AGC gain: 18dB */ { 0x55, 0xff }, { 0x56, 0x12 }, { 0x57, 0x81 }, { 0x58, 0x75 }, { 0x59, 0x01 }, /* AGC dark current comp.: +1 */ { 0x5a, 0x2c }, { 0x5b, 0x0f }, /* AWB chrominance levels */ { 0x5c, 0x10 }, { 0x3d, 0x80 }, { 0x27, 0xa6 }, { 0x12, 0x20 }, /* Toggle AWB */ { 0x12, 0x24 }, }; /* Lawrence Glaister <lg@jfm.bc.ca> reports: * * Register 0x0f in the 7610 has the following effects: * * 0x85 (AEC method 1): Best overall, good contrast range * 0x45 (AEC method 2): Very overexposed * 0xa5 (spec sheet default): Ok, but the black level is * shifted resulting in loss of contrast * 0x05 (old driver setting): very overexposed, too much * contrast */ static const struct ov_i2c_regvals norm_7610[] = { { 0x10, 0xff }, { 0x16, 0x06 }, { 0x28, 0x24 }, { 0x2b, 0xac }, { 0x12, 0x00 }, { 0x38, 0x81 }, { 0x28, 0x24 }, /* 0c */ { 0x0f, 0x85 }, /* lg's setting */ { 0x15, 0x01 }, { 0x20, 0x1c }, { 0x23, 0x2a }, { 0x24, 0x10 }, { 0x25, 0x8a }, { 0x26, 0xa2 }, { 0x27, 0xc2 }, { 0x2a, 0x04 }, { 0x2c, 0xfe }, { 0x2d, 0x93 }, { 0x30, 0x71 }, { 0x31, 0x60 }, { 0x32, 0x26 }, { 0x33, 0x20 }, { 0x34, 0x48 }, { 0x12, 0x24 }, { 0x11, 0x01 }, { 0x0c, 0x24 }, { 0x0d, 0x24 }, }; static const struct ov_i2c_regvals norm_7620[] = { { 0x12, 0x80 }, /* reset */ { 0x00, 0x00 }, /* gain */ { 0x01, 0x80 }, /* blue gain */ { 0x02, 0x80 }, /* red gain */ { 0x03, 0xc0 }, /* OV7670_R03_VREF */ { 0x06, 0x60 }, { 0x07, 0x00 }, { 0x0c, 0x24 }, { 0x0c, 0x24 }, { 0x0d, 0x24 }, { 0x11, 0x01 }, { 0x12, 0x24 }, { 0x13, 0x01 }, { 0x14, 0x84 }, { 0x15, 0x01 }, { 0x16, 0x03 }, { 0x17, 0x2f }, { 0x18, 0xcf }, { 0x19, 0x06 }, { 0x1a, 0xf5 }, { 0x1b, 0x00 }, { 0x20, 0x18 }, { 0x21, 0x80 }, { 0x22, 0x80 }, { 0x23, 0x00 }, { 0x26, 0xa2 }, { 0x27, 0xea }, { 0x28, 0x22 }, /* Was 0x20, bit1 enables a 2x gain which we need */ { 0x29, 0x00 }, { 0x2a, 0x10 }, { 0x2b, 0x00 }, { 0x2c, 0x88 }, { 0x2d, 0x91 }, { 0x2e, 0x80 }, { 0x2f, 0x44 }, { 0x60, 0x27 }, { 0x61, 0x02 }, { 0x62, 0x5f }, { 0x63, 0xd5 }, { 0x64, 0x57 }, { 0x65, 0x83 }, { 0x66, 0x55 }, { 0x67, 0x92 }, { 0x68, 0xcf }, { 0x69, 0x76 }, { 0x6a, 0x22 }, { 0x6b, 0x00 }, { 0x6c, 0x02 }, { 0x6d, 0x44 }, { 0x6e, 0x80 }, { 0x6f, 0x1d }, { 0x70, 0x8b }, { 0x71, 0x00 }, { 0x72, 0x14 }, { 0x73, 0x54 }, { 0x74, 0x00 }, { 0x75, 0x8e }, { 0x76, 0x00 }, { 0x77, 0xff }, { 0x78, 0x80 }, { 0x79, 0x80 }, { 0x7a, 0x80 }, { 0x7b, 0xe2 }, { 0x7c, 0x00 }, }; /* 7640 and 7648. The defaults should be OK for most registers. */ static const struct ov_i2c_regvals norm_7640[] = { { 0x12, 0x80 }, { 0x12, 0x14 }, }; static const struct ov_regvals init_519_ov7660[] = { { 0x5d, 0x03 }, /* Turn off suspend mode */ { 0x53, 0x9b }, /* 0x9f enables the (unused) microcontroller */ { 0x54, 0x0f }, /* bit2 (jpeg enable) */ { 0xa2, 0x20 }, /* a2-a5 are undocumented */ { 0xa3, 0x18 }, { 0xa4, 0x04 }, { 0xa5, 0x28 }, { 0x37, 0x00 }, /* SetUsbInit */ { 0x55, 0x02 }, /* 4.096 Mhz audio clock */ /* Enable both fields, YUV Input, disable defect comp (why?) */ { 0x20, 0x0c }, /* 0x0d does U <-> V swap */ { 0x21, 0x38 }, { 0x22, 0x1d }, { 0x17, 0x50 }, /* undocumented */ { 0x37, 0x00 }, /* undocumented */ { 0x40, 0xff }, /* I2C timeout counter */ { 0x46, 0x00 }, /* I2C clock prescaler */ }; static const struct ov_i2c_regvals norm_7660[] = { {OV7670_R12_COM7, OV7670_COM7_RESET}, {OV7670_R11_CLKRC, 0x81}, {0x92, 0x00}, /* DM_LNL */ {0x93, 0x00}, /* DM_LNH */ {0x9d, 0x4c}, /* BD50ST */ {0x9e, 0x3f}, /* BD60ST */ {OV7670_R3B_COM11, 0x02}, {OV7670_R13_COM8, 0xf5}, {OV7670_R10_AECH, 0x00}, {OV7670_R00_GAIN, 0x00}, {OV7670_R01_BLUE, 0x7c}, {OV7670_R02_RED, 0x9d}, {OV7670_R12_COM7, 0x00}, {OV7670_R04_COM1, 00}, {OV7670_R18_HSTOP, 0x01}, {OV7670_R17_HSTART, 0x13}, {OV7670_R32_HREF, 0x92}, {OV7670_R19_VSTART, 0x02}, {OV7670_R1A_VSTOP, 0x7a}, {OV7670_R03_VREF, 0x00}, {OV7670_R0E_COM5, 0x04}, {OV7670_R0F_COM6, 0x62}, {OV7670_R15_COM10, 0x00}, {0x16, 0x02}, /* RSVD */ {0x1b, 0x00}, /* PSHFT */ {OV7670_R1E_MVFP, 0x01}, {0x29, 0x3c}, /* RSVD */ {0x33, 0x00}, /* CHLF */ {0x34, 0x07}, /* ARBLM */ {0x35, 0x84}, /* RSVD */ {0x36, 0x00}, /* RSVD */ {0x37, 0x04}, /* ADC */ {0x39, 0x43}, /* OFON */ {OV7670_R3A_TSLB, 0x00}, {OV7670_R3C_COM12, 0x6c}, {OV7670_R3D_COM13, 0x98}, {OV7670_R3F_EDGE, 0x23}, {OV7670_R40_COM15, 0xc1}, {OV7670_R41_COM16, 0x22}, {0x6b, 0x0a}, /* DBLV */ {0xa1, 0x08}, /* RSVD */ {0x69, 0x80}, /* HV */ {0x43, 0xf0}, /* RSVD.. */ {0x44, 0x10}, {0x45, 0x78}, {0x46, 0xa8}, {0x47, 0x60}, {0x48, 0x80}, {0x59, 0xba}, {0x5a, 0x9a}, {0x5b, 0x22}, {0x5c, 0xb9}, {0x5d, 0x9b}, {0x5e, 0x10}, {0x5f, 0xe0}, {0x60, 0x85}, {0x61, 0x60}, {0x9f, 0x9d}, /* RSVD */ {0xa0, 0xa0}, /* DSPC2 */ {0x4f, 0x60}, /* matrix */ {0x50, 0x64}, {0x51, 0x04}, {0x52, 0x18}, {0x53, 0x3c}, {0x54, 0x54}, {0x55, 0x40}, {0x56, 0x40}, {0x57, 0x40}, {0x58, 0x0d}, /* matrix sign */ {0x8b, 0xcc}, /* RSVD */ {0x8c, 0xcc}, {0x8d, 0xcf}, {0x6c, 0x40}, /* gamma curve */ {0x6d, 0xe0}, {0x6e, 0xa0}, {0x6f, 0x80}, {0x70, 0x70}, {0x71, 0x80}, {0x72, 0x60}, {0x73, 0x60}, {0x74, 0x50}, {0x75, 0x40}, {0x76, 0x38}, {0x77, 0x3c}, {0x78, 0x32}, {0x79, 0x1a}, {0x7a, 0x28}, {0x7b, 0x24}, {0x7c, 0x04}, /* gamma curve */ {0x7d, 0x12}, {0x7e, 0x26}, {0x7f, 0x46}, {0x80, 0x54}, {0x81, 0x64}, {0x82, 0x70}, {0x83, 0x7c}, {0x84, 0x86}, {0x85, 0x8e}, {0x86, 0x9c}, {0x87, 0xab}, {0x88, 0xc4}, {0x89, 0xd1}, {0x8a, 0xe5}, {OV7670_R14_COM9, 0x1e}, {OV7670_R24_AEW, 0x80}, {OV7670_R25_AEB, 0x72}, {OV7670_R26_VPT, 0xb3}, {0x62, 0x80}, /* LCC1 */ {0x63, 0x80}, /* LCC2 */ {0x64, 0x06}, /* LCC3 */ {0x65, 0x00}, /* LCC4 */ {0x66, 0x01}, /* LCC5 */ {0x94, 0x0e}, /* RSVD.. */ {0x95, 0x14}, {OV7670_R13_COM8, OV7670_COM8_FASTAEC | OV7670_COM8_AECSTEP | OV7670_COM8_BFILT | 0x10 | OV7670_COM8_AGC | OV7670_COM8_AWB | OV7670_COM8_AEC}, {0xa1, 0xc8} }; static const struct ov_i2c_regvals norm_9600[] = { {0x12, 0x80}, {0x0c, 0x28}, {0x11, 0x80}, {0x13, 0xb5}, {0x14, 0x3e}, {0x1b, 0x04}, {0x24, 0xb0}, {0x25, 0x90}, {0x26, 0x94}, {0x35, 0x90}, {0x37, 0x07}, {0x38, 0x08}, {0x01, 0x8e}, {0x02, 0x85} }; /* 7670. Defaults taken from OmniVision provided data, * as provided by Jonathan Corbet of OLPC */ static const struct ov_i2c_regvals norm_7670[] = { { OV7670_R12_COM7, OV7670_COM7_RESET }, { OV7670_R3A_TSLB, 0x04 }, /* OV */ { OV7670_R12_COM7, OV7670_COM7_FMT_VGA }, /* VGA */ { OV7670_R11_CLKRC, 0x01 }, /* * Set the hardware window. These values from OV don't entirely * make sense - hstop is less than hstart. But they work... */ { OV7670_R17_HSTART, 0x13 }, { OV7670_R18_HSTOP, 0x01 }, { OV7670_R32_HREF, 0xb6 }, { OV7670_R19_VSTART, 0x02 }, { OV7670_R1A_VSTOP, 0x7a }, { OV7670_R03_VREF, 0x0a }, { OV7670_R0C_COM3, 0x00 }, { OV7670_R3E_COM14, 0x00 }, /* Mystery scaling numbers */ { 0x70, 0x3a }, { 0x71, 0x35 }, { 0x72, 0x11 }, { 0x73, 0xf0 }, { 0xa2, 0x02 }, /* { OV7670_R15_COM10, 0x0 }, */ /* Gamma curve values */ { 0x7a, 0x20 }, { 0x7b, 0x10 }, { 0x7c, 0x1e }, { 0x7d, 0x35 }, { 0x7e, 0x5a }, { 0x7f, 0x69 }, { 0x80, 0x76 }, { 0x81, 0x80 }, { 0x82, 0x88 }, { 0x83, 0x8f }, { 0x84, 0x96 }, { 0x85, 0xa3 }, { 0x86, 0xaf }, { 0x87, 0xc4 }, { 0x88, 0xd7 }, { 0x89, 0xe8 }, /* AGC and AEC parameters. Note we start by disabling those features, then turn them only after tweaking the values. */ { OV7670_R13_COM8, OV7670_COM8_FASTAEC | OV7670_COM8_AECSTEP | OV7670_COM8_BFILT }, { OV7670_R00_GAIN, 0x00 }, { OV7670_R10_AECH, 0x00 }, { OV7670_R0D_COM4, 0x40 }, /* magic reserved bit */ { OV7670_R14_COM9, 0x18 }, /* 4x gain + magic rsvd bit */ { OV7670_RA5_BD50MAX, 0x05 }, { OV7670_RAB_BD60MAX, 0x07 }, { OV7670_R24_AEW, 0x95 }, { OV7670_R25_AEB, 0x33 }, { OV7670_R26_VPT, 0xe3 }, { OV7670_R9F_HAECC1, 0x78 }, { OV7670_RA0_HAECC2, 0x68 }, { 0xa1, 0x03 }, /* magic */ { OV7670_RA6_HAECC3, 0xd8 }, { OV7670_RA7_HAECC4, 0xd8 }, { OV7670_RA8_HAECC5, 0xf0 }, { OV7670_RA9_HAECC6, 0x90 }, { OV7670_RAA_HAECC7, 0x94 }, { OV7670_R13_COM8, OV7670_COM8_FASTAEC | OV7670_COM8_AECSTEP | OV7670_COM8_BFILT | OV7670_COM8_AGC | OV7670_COM8_AEC }, /* Almost all of these are magic "reserved" values. */ { OV7670_R0E_COM5, 0x61 }, { OV7670_R0F_COM6, 0x4b }, { 0x16, 0x02 }, { OV7670_R1E_MVFP, 0x07 }, { 0x21, 0x02 }, { 0x22, 0x91 }, { 0x29, 0x07 }, { 0x33, 0x0b }, { 0x35, 0x0b }, { 0x37, 0x1d }, { 0x38, 0x71 }, { 0x39, 0x2a }, { OV7670_R3C_COM12, 0x78 }, { 0x4d, 0x40 }, { 0x4e, 0x20 }, { OV7670_R69_GFIX, 0x00 }, { 0x6b, 0x4a }, { 0x74, 0x10 }, { 0x8d, 0x4f }, { 0x8e, 0x00 }, { 0x8f, 0x00 }, { 0x90, 0x00 }, { 0x91, 0x00 }, { 0x96, 0x00 }, { 0x9a, 0x00 }, { 0xb0, 0x84 }, { 0xb1, 0x0c }, { 0xb2, 0x0e }, { 0xb3, 0x82 }, { 0xb8, 0x0a }, /* More reserved magic, some of which tweaks white balance */ { 0x43, 0x0a }, { 0x44, 0xf0 }, { 0x45, 0x34 }, { 0x46, 0x58 }, { 0x47, 0x28 }, { 0x48, 0x3a }, { 0x59, 0x88 }, { 0x5a, 0x88 }, { 0x5b, 0x44 }, { 0x5c, 0x67 }, { 0x5d, 0x49 }, { 0x5e, 0x0e }, { 0x6c, 0x0a }, { 0x6d, 0x55 }, { 0x6e, 0x11 }, { 0x6f, 0x9f }, /* "9e for advance AWB" */ { 0x6a, 0x40 }, { OV7670_R01_BLUE, 0x40 }, { OV7670_R02_RED, 0x60 }, { OV7670_R13_COM8, OV7670_COM8_FASTAEC | OV7670_COM8_AECSTEP | OV7670_COM8_BFILT | OV7670_COM8_AGC | OV7670_COM8_AEC | OV7670_COM8_AWB }, /* Matrix coefficients */ { 0x4f, 0x80 }, { 0x50, 0x80 }, { 0x51, 0x00 }, { 0x52, 0x22 }, { 0x53, 0x5e }, { 0x54, 0x80 }, { 0x58, 0x9e }, { OV7670_R41_COM16, OV7670_COM16_AWBGAIN }, { OV7670_R3F_EDGE, 0x00 }, { 0x75, 0x05 }, { 0x76, 0xe1 }, { 0x4c, 0x00 }, { 0x77, 0x01 }, { OV7670_R3D_COM13, OV7670_COM13_GAMMA | OV7670_COM13_UVSAT | 2}, /* was 3 */ { 0x4b, 0x09 }, { 0xc9, 0x60 }, { OV7670_R41_COM16, 0x38 }, { 0x56, 0x40 }, { 0x34, 0x11 }, { OV7670_R3B_COM11, OV7670_COM11_EXP|OV7670_COM11_HZAUTO }, { 0xa4, 0x88 }, { 0x96, 0x00 }, { 0x97, 0x30 }, { 0x98, 0x20 }, { 0x99, 0x30 }, { 0x9a, 0x84 }, { 0x9b, 0x29 }, { 0x9c, 0x03 }, { 0x9d, 0x4c }, { 0x9e, 0x3f }, { 0x78, 0x04 }, /* Extra-weird stuff. Some sort of multiplexor register */ { 0x79, 0x01 }, { 0xc8, 0xf0 }, { 0x79, 0x0f }, { 0xc8, 0x00 }, { 0x79, 0x10 }, { 0xc8, 0x7e }, { 0x79, 0x0a }, { 0xc8, 0x80 }, { 0x79, 0x0b }, { 0xc8, 0x01 }, { 0x79, 0x0c }, { 0xc8, 0x0f }, { 0x79, 0x0d }, { 0xc8, 0x20 }, { 0x79, 0x09 }, { 0xc8, 0x80 }, { 0x79, 0x02 }, { 0xc8, 0xc0 }, { 0x79, 0x03 }, { 0xc8, 0x40 }, { 0x79, 0x05 }, { 0xc8, 0x30 }, { 0x79, 0x26 }, }; static const struct ov_i2c_regvals norm_8610[] = { { 0x12, 0x80 }, { 0x00, 0x00 }, { 0x01, 0x80 }, { 0x02, 0x80 }, { 0x03, 0xc0 }, { 0x04, 0x30 }, { 0x05, 0x30 }, /* was 0x10, new from windrv 090403 */ { 0x06, 0x70 }, /* was 0x80, new from windrv 090403 */ { 0x0a, 0x86 }, { 0x0b, 0xb0 }, { 0x0c, 0x20 }, { 0x0d, 0x20 }, { 0x11, 0x01 }, { 0x12, 0x25 }, { 0x13, 0x01 }, { 0x14, 0x04 }, { 0x15, 0x01 }, /* Lin and Win think different about UV order */ { 0x16, 0x03 }, { 0x17, 0x38 }, /* was 0x2f, new from windrv 090403 */ { 0x18, 0xea }, /* was 0xcf, new from windrv 090403 */ { 0x19, 0x02 }, /* was 0x06, new from windrv 090403 */ { 0x1a, 0xf5 }, { 0x1b, 0x00 }, { 0x20, 0xd0 }, /* was 0x90, new from windrv 090403 */ { 0x23, 0xc0 }, /* was 0x00, new from windrv 090403 */ { 0x24, 0x30 }, /* was 0x1d, new from windrv 090403 */ { 0x25, 0x50 }, /* was 0x57, new from windrv 090403 */ { 0x26, 0xa2 }, { 0x27, 0xea }, { 0x28, 0x00 }, { 0x29, 0x00 }, { 0x2a, 0x80 }, { 0x2b, 0xc8 }, /* was 0xcc, new from windrv 090403 */ { 0x2c, 0xac }, { 0x2d, 0x45 }, /* was 0xd5, new from windrv 090403 */ { 0x2e, 0x80 }, { 0x2f, 0x14 }, /* was 0x01, new from windrv 090403 */ { 0x4c, 0x00 }, { 0x4d, 0x30 }, /* was 0x10, new from windrv 090403 */ { 0x60, 0x02 }, /* was 0x01, new from windrv 090403 */ { 0x61, 0x00 }, /* was 0x09, new from windrv 090403 */ { 0x62, 0x5f }, /* was 0xd7, new from windrv 090403 */ { 0x63, 0xff }, { 0x64, 0x53 }, /* new windrv 090403 says 0x57, * maybe that's wrong */ { 0x65, 0x00 }, { 0x66, 0x55 }, { 0x67, 0xb0 }, { 0x68, 0xc0 }, /* was 0xaf, new from windrv 090403 */ { 0x69, 0x02 }, { 0x6a, 0x22 }, { 0x6b, 0x00 }, { 0x6c, 0x99 }, /* was 0x80, old windrv says 0x00, but * deleting bit7 colors the first images red */ { 0x6d, 0x11 }, /* was 0x00, new from windrv 090403 */ { 0x6e, 0x11 }, /* was 0x00, new from windrv 090403 */ { 0x6f, 0x01 }, { 0x70, 0x8b }, { 0x71, 0x00 }, { 0x72, 0x14 }, { 0x73, 0x54 }, { 0x74, 0x00 },/* 0x60? - was 0x00, new from windrv 090403 */ { 0x75, 0x0e }, { 0x76, 0x02 }, /* was 0x02, new from windrv 090403 */ { 0x77, 0xff }, { 0x78, 0x80 }, { 0x79, 0x80 }, { 0x7a, 0x80 }, { 0x7b, 0x10 }, /* was 0x13, new from windrv 090403 */ { 0x7c, 0x00 }, { 0x7d, 0x08 }, /* was 0x09, new from windrv 090403 */ { 0x7e, 0x08 }, /* was 0xc0, new from windrv 090403 */ { 0x7f, 0xfb }, { 0x80, 0x28 }, { 0x81, 0x00 }, { 0x82, 0x23 }, { 0x83, 0x0b }, { 0x84, 0x00 }, { 0x85, 0x62 }, /* was 0x61, new from windrv 090403 */ { 0x86, 0xc9 }, { 0x87, 0x00 }, { 0x88, 0x00 }, { 0x89, 0x01 }, { 0x12, 0x20 }, { 0x12, 0x25 }, /* was 0x24, new from windrv 090403 */ }; static unsigned char ov7670_abs_to_sm(unsigned char v) { if (v > 127) return v & 0x7f; return (128 - v) | 0x80; } /* Write a OV519 register */ static void reg_w(struct sd *sd, u16 index, u16 value) { struct gspca_dev *gspca_dev = (struct gspca_dev *)sd; int ret, req = 0; if (sd->gspca_dev.usb_err < 0) return; /* Avoid things going to fast for the bridge with a xhci host */ udelay(150); switch (sd->bridge) { case BRIDGE_OV511: case BRIDGE_OV511PLUS: req = 2; break; case BRIDGE_OVFX2: req = 0x0a; fallthrough; case BRIDGE_W9968CF: gspca_dbg(gspca_dev, D_USBO, "SET %02x %04x %04x\n", req, value, index); ret = usb_control_msg(sd->gspca_dev.dev, usb_sndctrlpipe(sd->gspca_dev.dev, 0), req, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, value, index, NULL, 0, 500); goto leave; default: req = 1; } gspca_dbg(gspca_dev, D_USBO, "SET %02x 0000 %04x %02x\n", req, index, value); sd->gspca_dev.usb_buf[0] = value; ret = usb_control_msg(sd->gspca_dev.dev, usb_sndctrlpipe(sd->gspca_dev.dev, 0), req, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 0, index, sd->gspca_dev.usb_buf, 1, 500); leave: if (ret < 0) { gspca_err(gspca_dev, "reg_w %02x failed %d\n", index, ret); sd->gspca_dev.usb_err = ret; return; } } /* Read from a OV519 register, note not valid for the w9968cf!! */ /* returns: negative is error, pos or zero is data */ static int reg_r(struct sd *sd, u16 index) { struct gspca_dev *gspca_dev = (struct gspca_dev *)sd; int ret; int req; if (sd->gspca_dev.usb_err < 0) return -1; switch (sd->bridge) { case BRIDGE_OV511: case BRIDGE_OV511PLUS: req = 3; break; case BRIDGE_OVFX2: req = 0x0b; break; default: req = 1; } /* Avoid things going to fast for the bridge with a xhci host */ udelay(150); ret = usb_control_msg(sd->gspca_dev.dev, usb_rcvctrlpipe(sd->gspca_dev.dev, 0), req, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 0, index, sd->gspca_dev.usb_buf, 1, 500); if (ret >= 0) { ret = sd->gspca_dev.usb_buf[0]; gspca_dbg(gspca_dev, D_USBI, "GET %02x 0000 %04x %02x\n", req, index, ret); } else { gspca_err(gspca_dev, "reg_r %02x failed %d\n", index, ret); sd->gspca_dev.usb_err = ret; /* * Make sure the result is zeroed to avoid uninitialized * values. */ gspca_dev->usb_buf[0] = 0; } return ret; } /* Read 8 values from a OV519 register */ static int reg_r8(struct sd *sd, u16 index) { struct gspca_dev *gspca_dev = (struct gspca_dev *)sd; int ret; if (sd->gspca_dev.usb_err < 0) return -1; /* Avoid things going to fast for the bridge with a xhci host */ udelay(150); ret = usb_control_msg(sd->gspca_dev.dev, usb_rcvctrlpipe(sd->gspca_dev.dev, 0), 1, /* REQ_IO */ USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 0, index, sd->gspca_dev.usb_buf, 8, 500); if (ret >= 0) { ret = sd->gspca_dev.usb_buf[0]; } else { gspca_err(gspca_dev, "reg_r8 %02x failed %d\n", index, ret); sd->gspca_dev.usb_err = ret; /* * Make sure the buffer is zeroed to avoid uninitialized * values. */ memset(gspca_dev->usb_buf, 0, 8); } return ret; } /* * Writes bits at positions specified by mask to an OV51x reg. Bits that are in * the same position as 1's in "mask" are cleared and set to "value". Bits * that are in the same position as 0's in "mask" are preserved, regardless * of their respective state in "value". */ static void reg_w_mask(struct sd *sd, u16 index, u8 value, u8 mask) { int ret; u8 oldval; if (mask != 0xff) { value &= mask; /* Enforce mask on value */ ret = reg_r(sd, index); if (ret < 0) return; oldval = ret & ~mask; /* Clear the masked bits */ value |= oldval; /* Set the desired bits */ } reg_w(sd, index, value); } /* * Writes multiple (n) byte value to a single register. Only valid with certain * registers (0x30 and 0xc4 - 0xce). */ static void ov518_reg_w32(struct sd *sd, u16 index, u32 value, int n) { struct gspca_dev *gspca_dev = (struct gspca_dev *)sd; int ret; if (sd->gspca_dev.usb_err < 0) return; *((__le32 *) sd->gspca_dev.usb_buf) = __cpu_to_le32(value); /* Avoid things going to fast for the bridge with a xhci host */ udelay(150); ret = usb_control_msg(sd->gspca_dev.dev, usb_sndctrlpipe(sd->gspca_dev.dev, 0), 1 /* REG_IO */, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 0, index, sd->gspca_dev.usb_buf, n, 500); if (ret < 0) { gspca_err(gspca_dev, "reg_w32 %02x failed %d\n", index, ret); sd->gspca_dev.usb_err = ret; } } static void ov511_i2c_w(struct sd *sd, u8 reg, u8 value) { struct gspca_dev *gspca_dev = (struct gspca_dev *)sd; int rc, retries; gspca_dbg(gspca_dev, D_USBO, "ov511_i2c_w %02x %02x\n", reg, value); /* Three byte write cycle */ for (retries = 6; ; ) { /* Select camera register */ reg_w(sd, R51x_I2C_SADDR_3, reg); /* Write "value" to I2C data port of OV511 */ reg_w(sd, R51x_I2C_DATA, value); /* Initiate 3-byte write cycle */ reg_w(sd, R511_I2C_CTL, 0x01); do { rc = reg_r(sd, R511_I2C_CTL); } while (rc > 0 && ((rc & 1) == 0)); /* Retry until idle */ if (rc < 0) return; if ((rc & 2) == 0) /* Ack? */ break; if (--retries < 0) { gspca_dbg(gspca_dev, D_USBO, "i2c write retries exhausted\n"); return; } } } static int ov511_i2c_r(struct sd *sd, u8 reg) { struct gspca_dev *gspca_dev = (struct gspca_dev *)sd; int rc, value, retries; /* Two byte write cycle */ for (retries = 6; ; ) { /* Select camera register */ reg_w(sd, R51x_I2C_SADDR_2, reg); /* Initiate 2-byte write cycle */ reg_w(sd, R511_I2C_CTL, 0x03); do { rc = reg_r(sd, R511_I2C_CTL); } while (rc > 0 && ((rc & 1) == 0)); /* Retry until idle */ if (rc < 0) return rc; if ((rc & 2) == 0) /* Ack? */ break; /* I2C abort */ reg_w(sd, R511_I2C_CTL, 0x10); if (--retries < 0) { gspca_dbg(gspca_dev, D_USBI, "i2c write retries exhausted\n"); return -1; } } /* Two byte read cycle */ for (retries = 6; ; ) { /* Initiate 2-byte read cycle */ reg_w(sd, R511_I2C_CTL, 0x05); do { rc = reg_r(sd, R511_I2C_CTL); } while (rc > 0 && ((rc & 1) == 0)); /* Retry until idle */ if (rc < 0) return rc; if ((rc & 2) == 0) /* Ack? */ break; /* I2C abort */ reg_w(sd, R511_I2C_CTL, 0x10); if (--retries < 0) { gspca_dbg(gspca_dev, D_USBI, "i2c read retries exhausted\n"); return -1; } } value = reg_r(sd, R51x_I2C_DATA); gspca_dbg(gspca_dev, D_USBI, "ov511_i2c_r %02x %02x\n", reg, value); /* This is needed to make i2c_w() work */ reg_w(sd, R511_I2C_CTL, 0x05); return value; } /* * The OV518 I2C I/O procedure is different, hence, this function. * This is normally only called from i2c_w(). Note that this function * always succeeds regardless of whether the sensor is present and working. */ static void ov518_i2c_w(struct sd *sd, u8 reg, u8 value) { struct gspca_dev *gspca_dev = (struct gspca_dev *)sd; gspca_dbg(gspca_dev, D_USBO, "ov518_i2c_w %02x %02x\n", reg, value); /* Select camera register */ reg_w(sd, R51x_I2C_SADDR_3, reg); /* Write "value" to I2C data port of OV511 */ reg_w(sd, R51x_I2C_DATA, value); /* Initiate 3-byte write cycle */ reg_w(sd, R518_I2C_CTL, 0x01); /* wait for write complete */ msleep(4); reg_r8(sd, R518_I2C_CTL); } /* * returns: negative is error, pos or zero is data * * The OV518 I2C I/O procedure is different, hence, this function. * This is normally only called from i2c_r(). Note that this function * always succeeds regardless of whether the sensor is present and working. */ static int ov518_i2c_r(struct sd *sd, u8 reg) { struct gspca_dev *gspca_dev = (struct gspca_dev *)sd; int value; /* Select camera register */ reg_w(sd, R51x_I2C_SADDR_2, reg); /* Initiate 2-byte write cycle */ reg_w(sd, R518_I2C_CTL, 0x03); reg_r8(sd, R518_I2C_CTL); /* Initiate 2-byte read cycle */ reg_w(sd, R518_I2C_CTL, 0x05); reg_r8(sd, R518_I2C_CTL); value = reg_r(sd, R51x_I2C_DATA); gspca_dbg(gspca_dev, D_USBI, "ov518_i2c_r %02x %02x\n", reg, value); return value; } static void ovfx2_i2c_w(struct sd *sd, u8 reg, u8 value) { struct gspca_dev *gspca_dev = (struct gspca_dev *)sd; int ret; if (sd->gspca_dev.usb_err < 0) return; ret = usb_control_msg(sd->gspca_dev.dev, usb_sndctrlpipe(sd->gspca_dev.dev, 0), 0x02, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, (u16) value, (u16) reg, NULL, 0, 500); if (ret < 0) { gspca_err(gspca_dev, "ovfx2_i2c_w %02x failed %d\n", reg, ret); sd->gspca_dev.usb_err = ret; } gspca_dbg(gspca_dev, D_USBO, "ovfx2_i2c_w %02x %02x\n", reg, value); } static int ovfx2_i2c_r(struct sd *sd, u8 reg) { struct gspca_dev *gspca_dev = (struct gspca_dev *)sd; int ret; if (sd->gspca_dev.usb_err < 0) return -1; ret = usb_control_msg(sd->gspca_dev.dev, usb_rcvctrlpipe(sd->gspca_dev.dev, 0), 0x03, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 0, (u16) reg, sd->gspca_dev.usb_buf, 1, 500); if (ret >= 0) { ret = sd->gspca_dev.usb_buf[0]; gspca_dbg(gspca_dev, D_USBI, "ovfx2_i2c_r %02x %02x\n", reg, ret); } else { gspca_err(gspca_dev, "ovfx2_i2c_r %02x failed %d\n", reg, ret); sd->gspca_dev.usb_err = ret; } return ret; } static void i2c_w(struct sd *sd, u8 reg, u8 value) { if (sd->sensor_reg_cache[reg] == value) return; switch (sd->bridge) { case BRIDGE_OV511: case BRIDGE_OV511PLUS: ov511_i2c_w(sd, reg, value); break; case BRIDGE_OV518: case BRIDGE_OV518PLUS: case BRIDGE_OV519: ov518_i2c_w(sd, reg, value); break; case BRIDGE_OVFX2: ovfx2_i2c_w(sd, reg, value); break; case BRIDGE_W9968CF: w9968cf_i2c_w(sd, reg, value); break; } if (sd->gspca_dev.usb_err >= 0) { /* Up on sensor reset empty the register cache */ if (reg == 0x12 && (value & 0x80)) memset(sd->sensor_reg_cache, -1, sizeof(sd->sensor_reg_cache)); else sd->sensor_reg_cache[reg] = value; } } static int i2c_r(struct sd *sd, u8 reg) { int ret = -1; if (sd->sensor_reg_cache[reg] != -1) return sd->sensor_reg_cache[reg]; switch (sd->bridge) { case BRIDGE_OV511: case BRIDGE_OV511PLUS: ret = ov511_i2c_r(sd, reg); break; case BRIDGE_OV518: case BRIDGE_OV518PLUS: case BRIDGE_OV519: ret = ov518_i2c_r(sd, reg); break; case BRIDGE_OVFX2: ret = ovfx2_i2c_r(sd, reg); break; case BRIDGE_W9968CF: ret = w9968cf_i2c_r(sd, reg); break; } if (ret >= 0) sd->sensor_reg_cache[reg] = ret; return ret; } /* Writes bits at positions specified by mask to an I2C reg. Bits that are in * the same position as 1's in "mask" are cleared and set to "value". Bits * that are in the same position as 0's in "mask" are preserved, regardless * of their respective state in "value". */ static void i2c_w_mask(struct sd *sd, u8 reg, u8 value, u8 mask) { int rc; u8 oldval; value &= mask; /* Enforce mask on value */ rc = i2c_r(sd, reg); if (rc < 0) return; oldval = rc & ~mask; /* Clear the masked bits */ value |= oldval; /* Set the desired bits */ i2c_w(sd, reg, value); } /* Temporarily stops OV511 from functioning. Must do this before changing * registers while the camera is streaming */ static inline void ov51x_stop(struct sd *sd) { struct gspca_dev *gspca_dev = (struct gspca_dev *)sd; gspca_dbg(gspca_dev, D_STREAM, "stopping\n"); sd->stopped = 1; switch (sd->bridge) { case BRIDGE_OV511: case BRIDGE_OV511PLUS: reg_w(sd, R51x_SYS_RESET, 0x3d); break; case BRIDGE_OV518: case BRIDGE_OV518PLUS: reg_w_mask(sd, R51x_SYS_RESET, 0x3a, 0x3a); break; case BRIDGE_OV519: reg_w(sd, OV519_R51_RESET1, 0x0f); reg_w(sd, OV519_R51_RESET1, 0x00); reg_w(sd, 0x22, 0x00); /* FRAR */ break; case BRIDGE_OVFX2: reg_w_mask(sd, 0x0f, 0x00, 0x02); break; case BRIDGE_W9968CF: reg_w(sd, 0x3c, 0x0a05); /* stop USB transfer */ break; } } /* Restarts OV511 after ov511_stop() is called. Has no effect if it is not * actually stopped (for performance). */ static inline void ov51x_restart(struct sd *sd) { struct gspca_dev *gspca_dev = (struct gspca_dev *)sd; gspca_dbg(gspca_dev, D_STREAM, "restarting\n"); if (!sd->stopped) return; sd->stopped = 0; /* Reinitialize the stream */ switch (sd->bridge) { case BRIDGE_OV511: case BRIDGE_OV511PLUS: reg_w(sd, R51x_SYS_RESET, 0x00); break; case BRIDGE_OV518: case BRIDGE_OV518PLUS: reg_w(sd, 0x2f, 0x80); reg_w(sd, R51x_SYS_RESET, 0x00); break; case BRIDGE_OV519: reg_w(sd, OV519_R51_RESET1, 0x0f); reg_w(sd, OV519_R51_RESET1, 0x00); reg_w(sd, 0x22, 0x1d); /* FRAR */ break; case BRIDGE_OVFX2: reg_w_mask(sd, 0x0f, 0x02, 0x02); break; case BRIDGE_W9968CF: reg_w(sd, 0x3c, 0x8a05); /* USB FIFO enable */ break; } } static void ov51x_set_slave_ids(struct sd *sd, u8 slave); /* This does an initial reset of an OmniVision sensor and ensures that I2C * is synchronized. Returns <0 on failure. */ static int init_ov_sensor(struct sd *sd, u8 slave) { int i; struct gspca_dev *gspca_dev = (struct gspca_dev *)sd; ov51x_set_slave_ids(sd, slave); /* Reset the sensor */ i2c_w(sd, 0x12, 0x80); /* Wait for it to initialize */ msleep(150); for (i = 0; i < i2c_detect_tries; i++) { if (i2c_r(sd, OV7610_REG_ID_HIGH) == 0x7f && i2c_r(sd, OV7610_REG_ID_LOW) == 0xa2) { gspca_dbg(gspca_dev, D_PROBE, "I2C synced in %d attempt(s)\n", i); return 0; } /* Reset the sensor */ i2c_w(sd, 0x12, 0x80); /* Wait for it to initialize */ msleep(150); /* Dummy read to sync I2C */ if (i2c_r(sd, 0x00) < 0) return -1; } return -1; } /* Set the read and write slave IDs. The "slave" argument is the write slave, * and the read slave will be set to (slave + 1). * This should not be called from outside the i2c I/O functions. * Sets I2C read and write slave IDs. Returns <0 for error */ static void ov51x_set_slave_ids(struct sd *sd, u8 slave) { switch (sd->bridge) { case BRIDGE_OVFX2: reg_w(sd, OVFX2_I2C_ADDR, slave); return; case BRIDGE_W9968CF: sd->sensor_addr = slave; return; } reg_w(sd, R51x_I2C_W_SID, slave); reg_w(sd, R51x_I2C_R_SID, slave + 1); } static void write_regvals(struct sd *sd, const struct ov_regvals *regvals, int n) { while (--n >= 0) { reg_w(sd, regvals->reg, regvals->val); regvals++; } } static void write_i2c_regvals(struct sd *sd, const struct ov_i2c_regvals *regvals, int n) { while (--n >= 0) { i2c_w(sd, regvals->reg, regvals->val); regvals++; } } /**************************************************************************** * * OV511 and sensor configuration * ***************************************************************************/ /* This initializes the OV2x10 / OV3610 / OV3620 / OV9600 */ static void ov_hires_configure(struct sd *sd) { struct gspca_dev *gspca_dev = (struct gspca_dev *)sd; int high, low; if (sd->bridge != BRIDGE_OVFX2) { gspca_err(gspca_dev, "error hires sensors only supported with ovfx2\n"); return; } gspca_dbg(gspca_dev, D_PROBE, "starting ov hires configuration\n"); /* Detect sensor (sub)type */ high = i2c_r(sd, 0x0a); low = i2c_r(sd, 0x0b); /* info("%x, %x", high, low); */ switch (high) { case 0x96: switch (low) { case 0x40: gspca_dbg(gspca_dev, D_PROBE, "Sensor is a OV2610\n"); sd->sensor = SEN_OV2610; return; case 0x41: gspca_dbg(gspca_dev, D_PROBE, "Sensor is a OV2610AE\n"); sd->sensor = SEN_OV2610AE; return; case 0xb1: gspca_dbg(gspca_dev, D_PROBE, "Sensor is a OV9600\n"); sd->sensor = SEN_OV9600; return; } break; case 0x36: if ((low & 0x0f) == 0x00) { gspca_dbg(gspca_dev, D_PROBE, "Sensor is a OV3610\n"); sd->sensor = SEN_OV3610; return; } break; } gspca_err(gspca_dev, "Error unknown sensor type: %02x%02x\n", high, low); } /* This initializes the OV8110, OV8610 sensor. The OV8110 uses * the same register settings as the OV8610, since they are very similar. */ static void ov8xx0_configure(struct sd *sd) { struct gspca_dev *gspca_dev = (struct gspca_dev *)sd; int rc; gspca_dbg(gspca_dev, D_PROBE, "starting ov8xx0 configuration\n"); /* Detect sensor (sub)type */ rc = i2c_r(sd, OV7610_REG_COM_I); if (rc < 0) { gspca_err(gspca_dev, "Error detecting sensor type\n"); return; } if ((rc & 3) == 1) sd->sensor = SEN_OV8610; else gspca_err(gspca_dev, "Unknown image sensor version: %d\n", rc & 3); } /* This initializes the OV7610, OV7620, or OV76BE sensor. The OV76BE uses * the same register settings as the OV7610, since they are very similar. */ static void ov7xx0_configure(struct sd *sd) { struct gspca_dev *gspca_dev = (struct gspca_dev *)sd; int rc, high, low; gspca_dbg(gspca_dev, D_PROBE, "starting OV7xx0 configuration\n"); /* Detect sensor (sub)type */ rc = i2c_r(sd, OV7610_REG_COM_I); /* add OV7670 here * it appears to be wrongly detected as a 7610 by default */ if (rc < 0) { gspca_err(gspca_dev, "Error detecting sensor type\n"); return; } if ((rc & 3) == 3) { /* quick hack to make OV7670s work */ high = i2c_r(sd, 0x0a); low = i2c_r(sd, 0x0b); /* info("%x, %x", high, low); */ if (high == 0x76 && (low & 0xf0) == 0x70) { gspca_dbg(gspca_dev, D_PROBE, "Sensor is an OV76%02x\n", low); sd->sensor = SEN_OV7670; } else { gspca_dbg(gspca_dev, D_PROBE, "Sensor is an OV7610\n"); sd->sensor = SEN_OV7610; } } else if ((rc & 3) == 1) { /* I don't know what's different about the 76BE yet. */ if (i2c_r(sd, 0x15) & 1) { gspca_dbg(gspca_dev, D_PROBE, "Sensor is an OV7620AE\n"); sd->sensor = SEN_OV7620AE; } else { gspca_dbg(gspca_dev, D_PROBE, "Sensor is an OV76BE\n"); sd->sensor = SEN_OV76BE; } } else if ((rc & 3) == 0) { /* try to read product id registers */ high = i2c_r(sd, 0x0a); if (high < 0) { gspca_err(gspca_dev, "Error detecting camera chip PID\n"); return; } low = i2c_r(sd, 0x0b); if (low < 0) { gspca_err(gspca_dev, "Error detecting camera chip VER\n"); return; } if (high == 0x76) { switch (low) { case 0x30: gspca_err(gspca_dev, "Sensor is an OV7630/OV7635\n"); gspca_err(gspca_dev, "7630 is not supported by this driver\n"); return; case 0x40: gspca_dbg(gspca_dev, D_PROBE, "Sensor is an OV7645\n"); sd->sensor = SEN_OV7640; /* FIXME */ break; case 0x45: gspca_dbg(gspca_dev, D_PROBE, "Sensor is an OV7645B\n"); sd->sensor = SEN_OV7640; /* FIXME */ break; case 0x48: gspca_dbg(gspca_dev, D_PROBE, "Sensor is an OV7648\n"); sd->sensor = SEN_OV7648; break; case 0x60: gspca_dbg(gspca_dev, D_PROBE, "Sensor is a OV7660\n"); sd->sensor = SEN_OV7660; break; default: gspca_err(gspca_dev, "Unknown sensor: 0x76%02x\n", low); return; } } else { gspca_dbg(gspca_dev, D_PROBE, "Sensor is an OV7620\n"); sd->sensor = SEN_OV7620; } } else { gspca_err(gspca_dev, "Unknown image sensor version: %d\n", rc & 3); } } /* This initializes the OV6620, OV6630, OV6630AE, or OV6630AF sensor. */ static void ov6xx0_configure(struct sd *sd) { struct gspca_dev *gspca_dev = (struct gspca_dev *)sd; int rc; gspca_dbg(gspca_dev, D_PROBE, "starting OV6xx0 configuration\n"); /* Detect sensor (sub)type */ rc = i2c_r(sd, OV7610_REG_COM_I); if (rc < 0) { gspca_err(gspca_dev, "Error detecting sensor type\n"); return; } /* Ugh. The first two bits are the version bits, but * the entire register value must be used. I guess OVT * underestimated how many variants they would make. */ switch (rc) { case 0x00: sd->sensor = SEN_OV6630; pr_warn("WARNING: Sensor is an OV66308. Your camera may have been misdetected in previous driver versions.\n"); break; case 0x01: sd->sensor = SEN_OV6620; gspca_dbg(gspca_dev, D_PROBE, "Sensor is an OV6620\n"); break; case 0x02: sd->sensor = SEN_OV6630; gspca_dbg(gspca_dev, D_PROBE, "Sensor is an OV66308AE\n"); break; case 0x03: sd->sensor = SEN_OV66308AF; gspca_dbg(gspca_dev, D_PROBE, "Sensor is an OV66308AF\n"); break; case 0x90: sd->sensor = SEN_OV6630; pr_warn("WARNING: Sensor is an OV66307. Your camera may have been misdetected in previous driver versions.\n"); break; default: gspca_err(gspca_dev, "FATAL: Unknown sensor version: 0x%02x\n", rc); return; } /* Set sensor-specific vars */ sd->sif = 1; } /* Turns on or off the LED. Only has an effect with OV511+/OV518(+)/OV519 */ static void ov51x_led_control(struct sd *sd, int on) { if (sd->invert_led) on = !on; switch (sd->bridge) { /* OV511 has no LED control */ case BRIDGE_OV511PLUS: reg_w(sd, R511_SYS_LED_CTL, on); break; case BRIDGE_OV518: case BRIDGE_OV518PLUS: reg_w_mask(sd, R518_GPIO_OUT, 0x02 * on, 0x02); break; case BRIDGE_OV519: reg_w_mask(sd, OV519_GPIO_DATA_OUT0, on, 1); break; } } static void sd_reset_snapshot(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; if (!sd->snapshot_needs_reset) return; /* Note it is important that we clear sd->snapshot_needs_reset, before actually clearing the snapshot state in the bridge otherwise we might race with the pkt_scan interrupt handler */ sd->snapshot_needs_reset = 0; switch (sd->bridge) { case BRIDGE_OV511: case BRIDGE_OV511PLUS: reg_w(sd, R51x_SYS_SNAP, 0x02); reg_w(sd, R51x_SYS_SNAP, 0x00); break; case BRIDGE_OV518: case BRIDGE_OV518PLUS: reg_w(sd, R51x_SYS_SNAP, 0x02); /* Reset */ reg_w(sd, R51x_SYS_SNAP, 0x01); /* Enable */ break; case BRIDGE_OV519: reg_w(sd, R51x_SYS_RESET, 0x40); reg_w(sd, R51x_SYS_RESET, 0x00); break; } } static void ov51x_upload_quan_tables(struct sd *sd) { static const unsigned char yQuanTable511[] = { 0, 1, 1, 2, 2, 3, 3, 4, 1, 1, 1, 2, 2, 3, 4, 4, 1, 1, 2, 2, 3, 4, 4, 4, 2, 2, 2, 3, 4, 4, 4, 4, 2, 2, 3, 4, 4, 5, 5, 5, 3, 3, 4, 4, 5, 5, 5, 5, 3, 4, 4, 4, 5, 5, 5, 5, 4, 4, 4, 4, 5, 5, 5, 5 }; static const unsigned char uvQuanTable511[] = { 0, 2, 2, 3, 4, 4, 4, 4, 2, 2, 2, 4, 4, 4, 4, 4, 2, 2, 3, 4, 4, 4, 4, 4, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4 }; /* OV518 quantization tables are 8x4 (instead of 8x8) */ static const unsigned char yQuanTable518[] = { 5, 4, 5, 6, 6, 7, 7, 7, 5, 5, 5, 5, 6, 7, 7, 7, 6, 6, 6, 6, 7, 7, 7, 8, 7, 7, 6, 7, 7, 7, 8, 8 }; static const unsigned char uvQuanTable518[] = { 6, 6, 6, 7, 7, 7, 7, 7, 6, 6, 6, 7, 7, 7, 7, 7, 6, 6, 6, 7, 7, 7, 7, 8, 7, 7, 7, 7, 7, 7, 8, 8 }; struct gspca_dev *gspca_dev = (struct gspca_dev *)sd; const unsigned char *pYTable, *pUVTable; unsigned char val0, val1; int i, size, reg = R51x_COMP_LUT_BEGIN; gspca_dbg(gspca_dev, D_PROBE, "Uploading quantization tables\n"); if (sd->bridge == BRIDGE_OV511 || sd->bridge == BRIDGE_OV511PLUS) { pYTable = yQuanTable511; pUVTable = uvQuanTable511; size = 32; } else { pYTable = yQuanTable518; pUVTable = uvQuanTable518; size = 16; } for (i = 0; i < size; i++) { val0 = *pYTable++; val1 = *pYTable++; val0 &= 0x0f; val1 &= 0x0f; val0 |= val1 << 4; reg_w(sd, reg, val0); val0 = *pUVTable++; val1 = *pUVTable++; val0 &= 0x0f; val1 &= 0x0f; val0 |= val1 << 4; reg_w(sd, reg + size, val0); reg++; } } /* This initializes the OV511/OV511+ and the sensor */ static void ov511_configure(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; /* For 511 and 511+ */ static const struct ov_regvals init_511[] = { { R51x_SYS_RESET, 0x7f }, { R51x_SYS_INIT, 0x01 }, { R51x_SYS_RESET, 0x7f }, { R51x_SYS_INIT, 0x01 }, { R51x_SYS_RESET, 0x3f }, { R51x_SYS_INIT, 0x01 }, { R51x_SYS_RESET, 0x3d }, }; static const struct ov_regvals norm_511[] = { { R511_DRAM_FLOW_CTL, 0x01 }, { R51x_SYS_SNAP, 0x00 }, { R51x_SYS_SNAP, 0x02 }, { R51x_SYS_SNAP, 0x00 }, { R511_FIFO_OPTS, 0x1f }, { R511_COMP_EN, 0x00 }, { R511_COMP_LUT_EN, 0x03 }, }; static const struct ov_regvals norm_511_p[] = { { R511_DRAM_FLOW_CTL, 0xff }, { R51x_SYS_SNAP, 0x00 }, { R51x_SYS_SNAP, 0x02 }, { R51x_SYS_SNAP, 0x00 }, { R511_FIFO_OPTS, 0xff }, { R511_COMP_EN, 0x00 }, { R511_COMP_LUT_EN, 0x03 }, }; static const struct ov_regvals compress_511[] = { { 0x70, 0x1f }, { 0x71, 0x05 }, { 0x72, 0x06 }, { 0x73, 0x06 }, { 0x74, 0x14 }, { 0x75, 0x03 }, { 0x76, 0x04 }, { 0x77, 0x04 }, }; gspca_dbg(gspca_dev, D_PROBE, "Device custom id %x\n", reg_r(sd, R51x_SYS_CUST_ID)); write_regvals(sd, init_511, ARRAY_SIZE(init_511)); switch (sd->bridge) { case BRIDGE_OV511: write_regvals(sd, norm_511, ARRAY_SIZE(norm_511)); break; case BRIDGE_OV511PLUS: write_regvals(sd, norm_511_p, ARRAY_SIZE(norm_511_p)); break; } /* Init compression */ write_regvals(sd, compress_511, ARRAY_SIZE(compress_511)); ov51x_upload_quan_tables(sd); } /* This initializes the OV518/OV518+ and the sensor */ static void ov518_configure(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; /* For 518 and 518+ */ static const struct ov_regvals init_518[] = { { R51x_SYS_RESET, 0x40 }, { R51x_SYS_INIT, 0xe1 }, { R51x_SYS_RESET, 0x3e }, { R51x_SYS_INIT, 0xe1 }, { R51x_SYS_RESET, 0x00 }, { R51x_SYS_INIT, 0xe1 }, { 0x46, 0x00 }, { 0x5d, 0x03 }, }; static const struct ov_regvals norm_518[] = { { R51x_SYS_SNAP, 0x02 }, /* Reset */ { R51x_SYS_SNAP, 0x01 }, /* Enable */ { 0x31, 0x0f }, { 0x5d, 0x03 }, { 0x24, 0x9f }, { 0x25, 0x90 }, { 0x20, 0x00 }, { 0x51, 0x04 }, { 0x71, 0x19 }, { 0x2f, 0x80 }, }; static const struct ov_regvals norm_518_p[] = { { R51x_SYS_SNAP, 0x02 }, /* Reset */ { R51x_SYS_SNAP, 0x01 }, /* Enable */ { 0x31, 0x0f }, { 0x5d, 0x03 }, { 0x24, 0x9f }, { 0x25, 0x90 }, { 0x20, 0x60 }, { 0x51, 0x02 }, { 0x71, 0x19 }, { 0x40, 0xff }, { 0x41, 0x42 }, { 0x46, 0x00 }, { 0x33, 0x04 }, { 0x21, 0x19 }, { 0x3f, 0x10 }, { 0x2f, 0x80 }, }; /* First 5 bits of custom ID reg are a revision ID on OV518 */ sd->revision = reg_r(sd, R51x_SYS_CUST_ID) & 0x1f; gspca_dbg(gspca_dev, D_PROBE, "Device revision %d\n", sd->revision); write_regvals(sd, init_518, ARRAY_SIZE(init_518)); /* Set LED GPIO pin to output mode */ reg_w_mask(sd, R518_GPIO_CTL, 0x00, 0x02); switch (sd->bridge) { case BRIDGE_OV518: write_regvals(sd, norm_518, ARRAY_SIZE(norm_518)); break; case BRIDGE_OV518PLUS: write_regvals(sd, norm_518_p, ARRAY_SIZE(norm_518_p)); break; } ov51x_upload_quan_tables(sd); reg_w(sd, 0x2f, 0x80); } static void ov519_configure(struct sd *sd) { static const struct ov_regvals init_519[] = { { 0x5a, 0x6d }, /* EnableSystem */ { 0x53, 0x9b }, /* don't enable the microcontroller */ { OV519_R54_EN_CLK1, 0xff }, /* set bit2 to enable jpeg */ { 0x5d, 0x03 }, { 0x49, 0x01 }, { 0x48, 0x00 }, /* Set LED pin to output mode. Bit 4 must be cleared or sensor * detection will fail. This deserves further investigation. */ { OV519_GPIO_IO_CTRL0, 0xee }, { OV519_R51_RESET1, 0x0f }, { OV519_R51_RESET1, 0x00 }, { 0x22, 0x00 }, /* windows reads 0x55 at this point*/ }; write_regvals(sd, init_519, ARRAY_SIZE(init_519)); } static void ovfx2_configure(struct sd *sd) { static const struct ov_regvals init_fx2[] = { { 0x00, 0x60 }, { 0x02, 0x01 }, { 0x0f, 0x1d }, { 0xe9, 0x82 }, { 0xea, 0xc7 }, { 0xeb, 0x10 }, { 0xec, 0xf6 }, }; sd->stopped = 1; write_regvals(sd, init_fx2, ARRAY_SIZE(init_fx2)); } /* set the mode */ /* This function works for ov7660 only */ static void ov519_set_mode(struct sd *sd) { static const struct ov_regvals bridge_ov7660[2][10] = { {{0x10, 0x14}, {0x11, 0x1e}, {0x12, 0x00}, {0x13, 0x00}, {0x14, 0x00}, {0x15, 0x00}, {0x16, 0x00}, {0x20, 0x0c}, {0x25, 0x01}, {0x26, 0x00}}, {{0x10, 0x28}, {0x11, 0x3c}, {0x12, 0x00}, {0x13, 0x00}, {0x14, 0x00}, {0x15, 0x00}, {0x16, 0x00}, {0x20, 0x0c}, {0x25, 0x03}, {0x26, 0x00}} }; static const struct ov_i2c_regvals sensor_ov7660[2][3] = { {{0x12, 0x00}, {0x24, 0x00}, {0x0c, 0x0c}}, {{0x12, 0x00}, {0x04, 0x00}, {0x0c, 0x00}} }; static const struct ov_i2c_regvals sensor_ov7660_2[] = { {OV7670_R17_HSTART, 0x13}, {OV7670_R18_HSTOP, 0x01}, {OV7670_R32_HREF, 0x92}, {OV7670_R19_VSTART, 0x02}, {OV7670_R1A_VSTOP, 0x7a}, {OV7670_R03_VREF, 0x00}, /* {0x33, 0x00}, */ /* {0x34, 0x07}, */ /* {0x36, 0x00}, */ /* {0x6b, 0x0a}, */ }; write_regvals(sd, bridge_ov7660[sd->gspca_dev.curr_mode], ARRAY_SIZE(bridge_ov7660[0])); write_i2c_regvals(sd, sensor_ov7660[sd->gspca_dev.curr_mode], ARRAY_SIZE(sensor_ov7660[0])); write_i2c_regvals(sd, sensor_ov7660_2, ARRAY_SIZE(sensor_ov7660_2)); } /* set the frame rate */ /* This function works for sensors ov7640, ov7648 ov7660 and ov7670 only */ static void ov519_set_fr(struct sd *sd) { int fr; u8 clock; /* frame rate table with indices: * - mode = 0: 320x240, 1: 640x480 * - fr rate = 0: 30, 1: 25, 2: 20, 3: 15, 4: 10, 5: 5 * - reg = 0: bridge a4, 1: bridge 23, 2: sensor 11 (clock) */ static const u8 fr_tb[2][6][3] = { {{0x04, 0xff, 0x00}, {0x04, 0x1f, 0x00}, {0x04, 0x1b, 0x00}, {0x04, 0x15, 0x00}, {0x04, 0x09, 0x00}, {0x04, 0x01, 0x00}}, {{0x0c, 0xff, 0x00}, {0x0c, 0x1f, 0x00}, {0x0c, 0x1b, 0x00}, {0x04, 0xff, 0x01}, {0x04, 0x1f, 0x01}, {0x04, 0x1b, 0x01}}, }; if (frame_rate > 0) sd->frame_rate = frame_rate; if (sd->frame_rate >= 30) fr = 0; else if (sd->frame_rate >= 25) fr = 1; else if (sd->frame_rate >= 20) fr = 2; else if (sd->frame_rate >= 15) fr = 3; else if (sd->frame_rate >= 10) fr = 4; else fr = 5; reg_w(sd, 0xa4, fr_tb[sd->gspca_dev.curr_mode][fr][0]); reg_w(sd, 0x23, fr_tb[sd->gspca_dev.curr_mode][fr][1]); clock = fr_tb[sd->gspca_dev.curr_mode][fr][2]; if (sd->sensor == SEN_OV7660) clock |= 0x80; /* enable double clock */ ov518_i2c_w(sd, OV7670_R11_CLKRC, clock); } static void setautogain(struct gspca_dev *gspca_dev, s32 val) { struct sd *sd = (struct sd *) gspca_dev; i2c_w_mask(sd, 0x13, val ? 0x05 : 0x00, 0x05); } /* this function is called at probe time */ static int sd_config(struct gspca_dev *gspca_dev, const struct usb_device_id *id) { struct sd *sd = (struct sd *) gspca_dev; struct cam *cam = &gspca_dev->cam; sd->bridge = id->driver_info & BRIDGE_MASK; sd->invert_led = (id->driver_info & BRIDGE_INVERT_LED) != 0; switch (sd->bridge) { case BRIDGE_OV511: case BRIDGE_OV511PLUS: cam->cam_mode = ov511_vga_mode; cam->nmodes = ARRAY_SIZE(ov511_vga_mode); break; case BRIDGE_OV518: case BRIDGE_OV518PLUS: cam->cam_mode = ov518_vga_mode; cam->nmodes = ARRAY_SIZE(ov518_vga_mode); break; case BRIDGE_OV519: cam->cam_mode = ov519_vga_mode; cam->nmodes = ARRAY_SIZE(ov519_vga_mode); break; case BRIDGE_OVFX2: cam->cam_mode = ov519_vga_mode; cam->nmodes = ARRAY_SIZE(ov519_vga_mode); cam->bulk_size = OVFX2_BULK_SIZE; cam->bulk_nurbs = MAX_NURBS; cam->bulk = 1; break; case BRIDGE_W9968CF: cam->cam_mode = w9968cf_vga_mode; cam->nmodes = ARRAY_SIZE(w9968cf_vga_mode); break; } sd->frame_rate = 15; return 0; } /* this function is called at probe and resume time */ static int sd_init(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; struct cam *cam = &gspca_dev->cam; switch (sd->bridge) { case BRIDGE_OV511: case BRIDGE_OV511PLUS: ov511_configure(gspca_dev); break; case BRIDGE_OV518: case BRIDGE_OV518PLUS: ov518_configure(gspca_dev); break; case BRIDGE_OV519: ov519_configure(sd); break; case BRIDGE_OVFX2: ovfx2_configure(sd); break; case BRIDGE_W9968CF: w9968cf_configure(sd); break; } /* The OV519 must be more aggressive about sensor detection since * I2C write will never fail if the sensor is not present. We have * to try to initialize the sensor to detect its presence */ sd->sensor = -1; /* Test for 76xx */ if (init_ov_sensor(sd, OV7xx0_SID) >= 0) { ov7xx0_configure(sd); /* Test for 6xx0 */ } else if (init_ov_sensor(sd, OV6xx0_SID) >= 0) { ov6xx0_configure(sd); /* Test for 8xx0 */ } else if (init_ov_sensor(sd, OV8xx0_SID) >= 0) { ov8xx0_configure(sd); /* Test for 3xxx / 2xxx */ } else if (init_ov_sensor(sd, OV_HIRES_SID) >= 0) { ov_hires_configure(sd); } else { gspca_err(gspca_dev, "Can't determine sensor slave IDs\n"); goto error; } if (sd->sensor < 0) goto error; ov51x_led_control(sd, 0); /* turn LED off */ switch (sd->bridge) { case BRIDGE_OV511: case BRIDGE_OV511PLUS: if (sd->sif) { cam->cam_mode = ov511_sif_mode; cam->nmodes = ARRAY_SIZE(ov511_sif_mode); } break; case BRIDGE_OV518: case BRIDGE_OV518PLUS: if (sd->sif) { cam->cam_mode = ov518_sif_mode; cam->nmodes = ARRAY_SIZE(ov518_sif_mode); } break; case BRIDGE_OV519: if (sd->sif) { cam->cam_mode = ov519_sif_mode; cam->nmodes = ARRAY_SIZE(ov519_sif_mode); } break; case BRIDGE_OVFX2: switch (sd->sensor) { case SEN_OV2610: case SEN_OV2610AE: cam->cam_mode = ovfx2_ov2610_mode; cam->nmodes = ARRAY_SIZE(ovfx2_ov2610_mode); break; case SEN_OV3610: cam->cam_mode = ovfx2_ov3610_mode; cam->nmodes = ARRAY_SIZE(ovfx2_ov3610_mode); break; case SEN_OV9600: cam->cam_mode = ovfx2_ov9600_mode; cam->nmodes = ARRAY_SIZE(ovfx2_ov9600_mode); break; default: if (sd->sif) { cam->cam_mode = ov519_sif_mode; cam->nmodes = ARRAY_SIZE(ov519_sif_mode); } break; } break; case BRIDGE_W9968CF: if (sd->sif) cam->nmodes = ARRAY_SIZE(w9968cf_vga_mode) - 1; /* w9968cf needs initialisation once the sensor is known */ w9968cf_init(sd); break; } /* initialize the sensor */ switch (sd->sensor) { case SEN_OV2610: write_i2c_regvals(sd, norm_2610, ARRAY_SIZE(norm_2610)); /* Enable autogain, autoexpo, awb, bandfilter */ i2c_w_mask(sd, 0x13, 0x27, 0x27); break; case SEN_OV2610AE: write_i2c_regvals(sd, norm_2610ae, ARRAY_SIZE(norm_2610ae)); /* enable autoexpo */ i2c_w_mask(sd, 0x13, 0x05, 0x05); break; case SEN_OV3610: write_i2c_regvals(sd, norm_3620b, ARRAY_SIZE(norm_3620b)); /* Enable autogain, autoexpo, awb, bandfilter */ i2c_w_mask(sd, 0x13, 0x27, 0x27); break; case SEN_OV6620: write_i2c_regvals(sd, norm_6x20, ARRAY_SIZE(norm_6x20)); break; case SEN_OV6630: case SEN_OV66308AF: write_i2c_regvals(sd, norm_6x30, ARRAY_SIZE(norm_6x30)); break; default: /* case SEN_OV7610: */ /* case SEN_OV76BE: */ write_i2c_regvals(sd, norm_7610, ARRAY_SIZE(norm_7610)); i2c_w_mask(sd, 0x0e, 0x00, 0x40); break; case SEN_OV7620: case SEN_OV7620AE: write_i2c_regvals(sd, norm_7620, ARRAY_SIZE(norm_7620)); break; case SEN_OV7640: case SEN_OV7648: write_i2c_regvals(sd, norm_7640, ARRAY_SIZE(norm_7640)); break; case SEN_OV7660: i2c_w(sd, OV7670_R12_COM7, OV7670_COM7_RESET); msleep(14); reg_w(sd, OV519_R57_SNAPSHOT, 0x23); write_regvals(sd, init_519_ov7660, ARRAY_SIZE(init_519_ov7660)); write_i2c_regvals(sd, norm_7660, ARRAY_SIZE(norm_7660)); sd->gspca_dev.curr_mode = 1; /* 640x480 */ ov519_set_mode(sd); ov519_set_fr(sd); sd_reset_snapshot(gspca_dev); ov51x_restart(sd); ov51x_stop(sd); /* not in win traces */ ov51x_led_control(sd, 0); break; case SEN_OV7670: write_i2c_regvals(sd, norm_7670, ARRAY_SIZE(norm_7670)); break; case SEN_OV8610: write_i2c_regvals(sd, norm_8610, ARRAY_SIZE(norm_8610)); break; case SEN_OV9600: write_i2c_regvals(sd, norm_9600, ARRAY_SIZE(norm_9600)); /* enable autoexpo */ /* i2c_w_mask(sd, 0x13, 0x05, 0x05); */ break; } return gspca_dev->usb_err; error: gspca_err(gspca_dev, "OV519 Config failed\n"); return -EINVAL; } /* function called at start time before URB creation */ static int sd_isoc_init(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; switch (sd->bridge) { case BRIDGE_OVFX2: if (gspca_dev->pixfmt.width != 800) gspca_dev->cam.bulk_size = OVFX2_BULK_SIZE; else gspca_dev->cam.bulk_size = 7 * 4096; break; } return 0; } /* Set up the OV511/OV511+ with the given image parameters. * * Do not put any sensor-specific code in here (including I2C I/O functions) */ static void ov511_mode_init_regs(struct sd *sd) { struct gspca_dev *gspca_dev = (struct gspca_dev *)sd; int hsegs, vsegs, packet_size, fps, needed; int interlaced = 0; struct usb_host_interface *alt; struct usb_interface *intf; intf = usb_ifnum_to_if(sd->gspca_dev.dev, sd->gspca_dev.iface); alt = usb_altnum_to_altsetting(intf, sd->gspca_dev.alt); if (!alt) { gspca_err(gspca_dev, "Couldn't get altsetting\n"); sd->gspca_dev.usb_err = -EIO; return; } if (alt->desc.bNumEndpoints < 1) { sd->gspca_dev.usb_err = -ENODEV; return; } packet_size = le16_to_cpu(alt->endpoint[0].desc.wMaxPacketSize); reg_w(sd, R51x_FIFO_PSIZE, packet_size >> 5); reg_w(sd, R511_CAM_UV_EN, 0x01); reg_w(sd, R511_SNAP_UV_EN, 0x01); reg_w(sd, R511_SNAP_OPTS, 0x03); /* Here I'm assuming that snapshot size == image size. * I hope that's always true. --claudio */ hsegs = (sd->gspca_dev.pixfmt.width >> 3) - 1; vsegs = (sd->gspca_dev.pixfmt.height >> 3) - 1; reg_w(sd, R511_CAM_PXCNT, hsegs); reg_w(sd, R511_CAM_LNCNT, vsegs); reg_w(sd, R511_CAM_PXDIV, 0x00); reg_w(sd, R511_CAM_LNDIV, 0x00); /* YUV420, low pass filter on */ reg_w(sd, R511_CAM_OPTS, 0x03); /* Snapshot additions */ reg_w(sd, R511_SNAP_PXCNT, hsegs); reg_w(sd, R511_SNAP_LNCNT, vsegs); reg_w(sd, R511_SNAP_PXDIV, 0x00); reg_w(sd, R511_SNAP_LNDIV, 0x00); /******** Set the framerate ********/ if (frame_rate > 0) sd->frame_rate = frame_rate; switch (sd->sensor) { case SEN_OV6620: /* No framerate control, doesn't like higher rates yet */ sd->clockdiv = 3; break; /* Note once the FIXME's in mode_init_ov_sensor_regs() are fixed for more sensors we need to do this for them too */ case SEN_OV7620: case SEN_OV7620AE: case SEN_OV7640: case SEN_OV7648: case SEN_OV76BE: if (sd->gspca_dev.pixfmt.width == 320) interlaced = 1; fallthrough; case SEN_OV6630: case SEN_OV7610: case SEN_OV7670: switch (sd->frame_rate) { case 30: case 25: /* Not enough bandwidth to do 640x480 @ 30 fps */ if (sd->gspca_dev.pixfmt.width != 640) { sd->clockdiv = 0; break; } /* For 640x480 case */ fallthrough; default: /* case 20: */ /* case 15: */ sd->clockdiv = 1; break; case 10: sd->clockdiv = 2; break; case 5: sd->clockdiv = 5; break; } if (interlaced) { sd->clockdiv = (sd->clockdiv + 1) * 2 - 1; /* Higher then 10 does not work */ if (sd->clockdiv > 10) sd->clockdiv = 10; } break; case SEN_OV8610: /* No framerate control ?? */ sd->clockdiv = 0; break; } /* Check if we have enough bandwidth to disable compression */ fps = (interlaced ? 60 : 30) / (sd->clockdiv + 1) + 1; needed = fps * sd->gspca_dev.pixfmt.width * sd->gspca_dev.pixfmt.height * 3 / 2; /* 1000 isoc packets/sec */ if (needed > 1000 * packet_size) { /* Enable Y and UV quantization and compression */ reg_w(sd, R511_COMP_EN, 0x07); reg_w(sd, R511_COMP_LUT_EN, 0x03); } else { reg_w(sd, R511_COMP_EN, 0x06); reg_w(sd, R511_COMP_LUT_EN, 0x00); } reg_w(sd, R51x_SYS_RESET, OV511_RESET_OMNICE); reg_w(sd, R51x_SYS_RESET, 0); } /* Sets up the OV518/OV518+ with the given image parameters * * OV518 needs a completely different approach, until we can figure out what * the individual registers do. Also, only 15 FPS is supported now. * * Do not put any sensor-specific code in here (including I2C I/O functions) */ static void ov518_mode_init_regs(struct sd *sd) { struct gspca_dev *gspca_dev = (struct gspca_dev *)sd; int hsegs, vsegs, packet_size; struct usb_host_interface *alt; struct usb_interface *intf; intf = usb_ifnum_to_if(sd->gspca_dev.dev, sd->gspca_dev.iface); alt = usb_altnum_to_altsetting(intf, sd->gspca_dev.alt); if (!alt) { gspca_err(gspca_dev, "Couldn't get altsetting\n"); sd->gspca_dev.usb_err = -EIO; return; } if (alt->desc.bNumEndpoints < 1) { sd->gspca_dev.usb_err = -ENODEV; return; } packet_size = le16_to_cpu(alt->endpoint[0].desc.wMaxPacketSize); ov518_reg_w32(sd, R51x_FIFO_PSIZE, packet_size & ~7, 2); /******** Set the mode ********/ reg_w(sd, 0x2b, 0); reg_w(sd, 0x2c, 0); reg_w(sd, 0x2d, 0); reg_w(sd, 0x2e, 0); reg_w(sd, 0x3b, 0); reg_w(sd, 0x3c, 0); reg_w(sd, 0x3d, 0); reg_w(sd, 0x3e, 0); if (sd->bridge == BRIDGE_OV518) { /* Set 8-bit (YVYU) input format */ reg_w_mask(sd, 0x20, 0x08, 0x08); /* Set 12-bit (4:2:0) output format */ reg_w_mask(sd, 0x28, 0x80, 0xf0); reg_w_mask(sd, 0x38, 0x80, 0xf0); } else { reg_w(sd, 0x28, 0x80); reg_w(sd, 0x38, 0x80); } hsegs = sd->gspca_dev.pixfmt.width / 16; vsegs = sd->gspca_dev.pixfmt.height / 4; reg_w(sd, 0x29, hsegs); reg_w(sd, 0x2a, vsegs); reg_w(sd, 0x39, hsegs); reg_w(sd, 0x3a, vsegs); /* Windows driver does this here; who knows why */ reg_w(sd, 0x2f, 0x80); /******** Set the framerate ********/ if (sd->bridge == BRIDGE_OV518PLUS && sd->revision == 0 && sd->sensor == SEN_OV7620AE) sd->clockdiv = 0; else sd->clockdiv = 1; /* Mode independent, but framerate dependent, regs */ /* 0x51: Clock divider; Only works on some cams which use 2 crystals */ reg_w(sd, 0x51, 0x04); reg_w(sd, 0x22, 0x18); reg_w(sd, 0x23, 0xff); if (sd->bridge == BRIDGE_OV518PLUS) { switch (sd->sensor) { case SEN_OV7620AE: /* * HdG: 640x480 needs special handling on device * revision 2, we check for device revision > 0 to * avoid regressions, as we don't know the correct * thing todo for revision 1. * * Also this likely means we don't need to * differentiate between the OV7620 and OV7620AE, * earlier testing hitting this same problem likely * happened to be with revision < 2 cams using an * OV7620 and revision 2 cams using an OV7620AE. */ if (sd->revision > 0 && sd->gspca_dev.pixfmt.width == 640) { reg_w(sd, 0x20, 0x60); reg_w(sd, 0x21, 0x1f); } else { reg_w(sd, 0x20, 0x00); reg_w(sd, 0x21, 0x19); } break; case SEN_OV7620: reg_w(sd, 0x20, 0x00); reg_w(sd, 0x21, 0x19); break; default: reg_w(sd, 0x21, 0x19); } } else reg_w(sd, 0x71, 0x17); /* Compression-related? */ /* FIXME: Sensor-specific */ /* Bit 5 is what matters here. Of course, it is "reserved" */ i2c_w(sd, 0x54, 0x23); reg_w(sd, 0x2f, 0x80); if (sd->bridge == BRIDGE_OV518PLUS) { reg_w(sd, 0x24, 0x94); reg_w(sd, 0x25, 0x90); ov518_reg_w32(sd, 0xc4, 400, 2); /* 190h */ ov518_reg_w32(sd, 0xc6, 540, 2); /* 21ch */ ov518_reg_w32(sd, 0xc7, 540, 2); /* 21ch */ ov518_reg_w32(sd, 0xc8, 108, 2); /* 6ch */ ov518_reg_w32(sd, 0xca, 131098, 3); /* 2001ah */ ov518_reg_w32(sd, 0xcb, 532, 2); /* 214h */ ov518_reg_w32(sd, 0xcc, 2400, 2); /* 960h */ ov518_reg_w32(sd, 0xcd, 32, 2); /* 20h */ ov518_reg_w32(sd, 0xce, 608, 2); /* 260h */ } else { reg_w(sd, 0x24, 0x9f); reg_w(sd, 0x25, 0x90); ov518_reg_w32(sd, 0xc4, 400, 2); /* 190h */ ov518_reg_w32(sd, 0xc6, 381, 2); /* 17dh */ ov518_reg_w32(sd, 0xc7, 381, 2); /* 17dh */ ov518_reg_w32(sd, 0xc8, 128, 2); /* 80h */ ov518_reg_w32(sd, 0xca, 183331, 3); /* 2cc23h */ ov518_reg_w32(sd, 0xcb, 746, 2); /* 2eah */ ov518_reg_w32(sd, 0xcc, 1750, 2); /* 6d6h */ ov518_reg_w32(sd, 0xcd, 45, 2); /* 2dh */ ov518_reg_w32(sd, 0xce, 851, 2); /* 353h */ } reg_w(sd, 0x2f, 0x80); } /* Sets up the OV519 with the given image parameters * * OV519 needs a completely different approach, until we can figure out what * the individual registers do. * * Do not put any sensor-specific code in here (including I2C I/O functions) */ static void ov519_mode_init_regs(struct sd *sd) { static const struct ov_regvals mode_init_519_ov7670[] = { { 0x5d, 0x03 }, /* Turn off suspend mode */ { 0x53, 0x9f }, /* was 9b in 1.65-1.08 */ { OV519_R54_EN_CLK1, 0x0f }, /* bit2 (jpeg enable) */ { 0xa2, 0x20 }, /* a2-a5 are undocumented */ { 0xa3, 0x18 }, { 0xa4, 0x04 }, { 0xa5, 0x28 }, { 0x37, 0x00 }, /* SetUsbInit */ { 0x55, 0x02 }, /* 4.096 Mhz audio clock */ /* Enable both fields, YUV Input, disable defect comp (why?) */ { 0x20, 0x0c }, { 0x21, 0x38 }, { 0x22, 0x1d }, { 0x17, 0x50 }, /* undocumented */ { 0x37, 0x00 }, /* undocumented */ { 0x40, 0xff }, /* I2C timeout counter */ { 0x46, 0x00 }, /* I2C clock prescaler */ { 0x59, 0x04 }, /* new from windrv 090403 */ { 0xff, 0x00 }, /* undocumented */ /* windows reads 0x55 at this point, why? */ }; static const struct ov_regvals mode_init_519[] = { { 0x5d, 0x03 }, /* Turn off suspend mode */ { 0x53, 0x9f }, /* was 9b in 1.65-1.08 */ { OV519_R54_EN_CLK1, 0x0f }, /* bit2 (jpeg enable) */ { 0xa2, 0x20 }, /* a2-a5 are undocumented */ { 0xa3, 0x18 }, { 0xa4, 0x04 }, { 0xa5, 0x28 }, { 0x37, 0x00 }, /* SetUsbInit */ { 0x55, 0x02 }, /* 4.096 Mhz audio clock */ /* Enable both fields, YUV Input, disable defect comp (why?) */ { 0x22, 0x1d }, { 0x17, 0x50 }, /* undocumented */ { 0x37, 0x00 }, /* undocumented */ { 0x40, 0xff }, /* I2C timeout counter */ { 0x46, 0x00 }, /* I2C clock prescaler */ { 0x59, 0x04 }, /* new from windrv 090403 */ { 0xff, 0x00 }, /* undocumented */ /* windows reads 0x55 at this point, why? */ }; struct gspca_dev *gspca_dev = (struct gspca_dev *)sd; /******** Set the mode ********/ switch (sd->sensor) { default: write_regvals(sd, mode_init_519, ARRAY_SIZE(mode_init_519)); if (sd->sensor == SEN_OV7640 || sd->sensor == SEN_OV7648) { /* Select 8-bit input mode */ reg_w_mask(sd, OV519_R20_DFR, 0x10, 0x10); } break; case SEN_OV7660: return; /* done by ov519_set_mode/fr() */ case SEN_OV7670: write_regvals(sd, mode_init_519_ov7670, ARRAY_SIZE(mode_init_519_ov7670)); break; } reg_w(sd, OV519_R10_H_SIZE, sd->gspca_dev.pixfmt.width >> 4); reg_w(sd, OV519_R11_V_SIZE, sd->gspca_dev.pixfmt.height >> 3); if (sd->sensor == SEN_OV7670 && sd->gspca_dev.cam.cam_mode[sd->gspca_dev.curr_mode].priv) reg_w(sd, OV519_R12_X_OFFSETL, 0x04); else if (sd->sensor == SEN_OV7648 && sd->gspca_dev.cam.cam_mode[sd->gspca_dev.curr_mode].priv) reg_w(sd, OV519_R12_X_OFFSETL, 0x01); else reg_w(sd, OV519_R12_X_OFFSETL, 0x00); reg_w(sd, OV519_R13_X_OFFSETH, 0x00); reg_w(sd, OV519_R14_Y_OFFSETL, 0x00); reg_w(sd, OV519_R15_Y_OFFSETH, 0x00); reg_w(sd, OV519_R16_DIVIDER, 0x00); reg_w(sd, OV519_R25_FORMAT, 0x03); /* YUV422 */ reg_w(sd, 0x26, 0x00); /* Undocumented */ /******** Set the framerate ********/ if (frame_rate > 0) sd->frame_rate = frame_rate; /* FIXME: These are only valid at the max resolution. */ sd->clockdiv = 0; switch (sd->sensor) { case SEN_OV7640: case SEN_OV7648: switch (sd->frame_rate) { default: /* case 30: */ reg_w(sd, 0xa4, 0x0c); reg_w(sd, 0x23, 0xff); break; case 25: reg_w(sd, 0xa4, 0x0c); reg_w(sd, 0x23, 0x1f); break; case 20: reg_w(sd, 0xa4, 0x0c); reg_w(sd, 0x23, 0x1b); break; case 15: reg_w(sd, 0xa4, 0x04); reg_w(sd, 0x23, 0xff); sd->clockdiv = 1; break; case 10: reg_w(sd, 0xa4, 0x04); reg_w(sd, 0x23, 0x1f); sd->clockdiv = 1; break; case 5: reg_w(sd, 0xa4, 0x04); reg_w(sd, 0x23, 0x1b); sd->clockdiv = 1; break; } break; case SEN_OV8610: switch (sd->frame_rate) { default: /* 15 fps */ /* case 15: */ reg_w(sd, 0xa4, 0x06); reg_w(sd, 0x23, 0xff); break; case 10: reg_w(sd, 0xa4, 0x06); reg_w(sd, 0x23, 0x1f); break; case 5: reg_w(sd, 0xa4, 0x06); reg_w(sd, 0x23, 0x1b); break; } break; case SEN_OV7670: /* guesses, based on 7640 */ gspca_dbg(gspca_dev, D_STREAM, "Setting framerate to %d fps\n", (sd->frame_rate == 0) ? 15 : sd->frame_rate); reg_w(sd, 0xa4, 0x10); switch (sd->frame_rate) { case 30: reg_w(sd, 0x23, 0xff); break; case 20: reg_w(sd, 0x23, 0x1b); break; default: /* case 15: */ reg_w(sd, 0x23, 0xff); sd->clockdiv = 1; break; } break; } } static void mode_init_ov_sensor_regs(struct sd *sd) { struct gspca_dev *gspca_dev = (struct gspca_dev *)sd; int qvga, xstart, xend, ystart, yend; u8 v; qvga = gspca_dev->cam.cam_mode[gspca_dev->curr_mode].priv & 1; /******** Mode (VGA/QVGA) and sensor specific regs ********/ switch (sd->sensor) { case SEN_OV2610: i2c_w_mask(sd, 0x14, qvga ? 0x20 : 0x00, 0x20); i2c_w_mask(sd, 0x28, qvga ? 0x00 : 0x20, 0x20); i2c_w(sd, 0x24, qvga ? 0x20 : 0x3a); i2c_w(sd, 0x25, qvga ? 0x30 : 0x60); i2c_w_mask(sd, 0x2d, qvga ? 0x40 : 0x00, 0x40); i2c_w_mask(sd, 0x67, qvga ? 0xf0 : 0x90, 0xf0); i2c_w_mask(sd, 0x74, qvga ? 0x20 : 0x00, 0x20); return; case SEN_OV2610AE: { u8 v; /* frame rates: * 10fps / 5 fps for 1600x1200 * 40fps / 20fps for 800x600 */ v = 80; if (qvga) { if (sd->frame_rate < 25) v = 0x81; } else { if (sd->frame_rate < 10) v = 0x81; } i2c_w(sd, 0x11, v); i2c_w(sd, 0x12, qvga ? 0x60 : 0x20); return; } case SEN_OV3610: if (qvga) { xstart = (1040 - gspca_dev->pixfmt.width) / 2 + (0x1f << 4); ystart = (776 - gspca_dev->pixfmt.height) / 2; } else { xstart = (2076 - gspca_dev->pixfmt.width) / 2 + (0x10 << 4); ystart = (1544 - gspca_dev->pixfmt.height) / 2; } xend = xstart + gspca_dev->pixfmt.width; yend = ystart + gspca_dev->pixfmt.height; /* Writing to the COMH register resets the other windowing regs to their default values, so we must do this first. */ i2c_w_mask(sd, 0x12, qvga ? 0x40 : 0x00, 0xf0); i2c_w_mask(sd, 0x32, (((xend >> 1) & 7) << 3) | ((xstart >> 1) & 7), 0x3f); i2c_w_mask(sd, 0x03, (((yend >> 1) & 3) << 2) | ((ystart >> 1) & 3), 0x0f); i2c_w(sd, 0x17, xstart >> 4); i2c_w(sd, 0x18, xend >> 4); i2c_w(sd, 0x19, ystart >> 3); i2c_w(sd, 0x1a, yend >> 3); return; case SEN_OV8610: /* For OV8610 qvga means qsvga */ i2c_w_mask(sd, OV7610_REG_COM_C, qvga ? (1 << 5) : 0, 1 << 5); i2c_w_mask(sd, 0x13, 0x00, 0x20); /* Select 16 bit data bus */ i2c_w_mask(sd, 0x12, 0x04, 0x06); /* AWB: 1 Test pattern: 0 */ i2c_w_mask(sd, 0x2d, 0x00, 0x40); /* from windrv 090403 */ i2c_w_mask(sd, 0x28, 0x20, 0x20); /* progressive mode on */ break; case SEN_OV7610: i2c_w_mask(sd, 0x14, qvga ? 0x20 : 0x00, 0x20); i2c_w(sd, 0x35, qvga ? 0x1e : 0x9e); i2c_w_mask(sd, 0x13, 0x00, 0x20); /* Select 16 bit data bus */ i2c_w_mask(sd, 0x12, 0x04, 0x06); /* AWB: 1 Test pattern: 0 */ break; case SEN_OV7620: case SEN_OV7620AE: case SEN_OV76BE: i2c_w_mask(sd, 0x14, qvga ? 0x20 : 0x00, 0x20); i2c_w_mask(sd, 0x28, qvga ? 0x00 : 0x20, 0x20); i2c_w(sd, 0x24, qvga ? 0x20 : 0x3a); i2c_w(sd, 0x25, qvga ? 0x30 : 0x60); i2c_w_mask(sd, 0x2d, qvga ? 0x40 : 0x00, 0x40); i2c_w_mask(sd, 0x67, qvga ? 0xb0 : 0x90, 0xf0); i2c_w_mask(sd, 0x74, qvga ? 0x20 : 0x00, 0x20); i2c_w_mask(sd, 0x13, 0x00, 0x20); /* Select 16 bit data bus */ i2c_w_mask(sd, 0x12, 0x04, 0x06); /* AWB: 1 Test pattern: 0 */ if (sd->sensor == SEN_OV76BE) i2c_w(sd, 0x35, qvga ? 0x1e : 0x9e); break; case SEN_OV7640: case SEN_OV7648: i2c_w_mask(sd, 0x14, qvga ? 0x20 : 0x00, 0x20); i2c_w_mask(sd, 0x28, qvga ? 0x00 : 0x20, 0x20); /* Setting this undocumented bit in qvga mode removes a very annoying vertical shaking of the image */ i2c_w_mask(sd, 0x2d, qvga ? 0x40 : 0x00, 0x40); /* Unknown */ i2c_w_mask(sd, 0x67, qvga ? 0xf0 : 0x90, 0xf0); /* Allow higher automatic gain (to allow higher framerates) */ i2c_w_mask(sd, 0x74, qvga ? 0x20 : 0x00, 0x20); i2c_w_mask(sd, 0x12, 0x04, 0x04); /* AWB: 1 */ break; case SEN_OV7670: /* set COM7_FMT_VGA or COM7_FMT_QVGA * do we need to set anything else? * HSTART etc are set in set_ov_sensor_window itself */ i2c_w_mask(sd, OV7670_R12_COM7, qvga ? OV7670_COM7_FMT_QVGA : OV7670_COM7_FMT_VGA, OV7670_COM7_FMT_MASK); i2c_w_mask(sd, 0x13, 0x00, 0x20); /* Select 16 bit data bus */ i2c_w_mask(sd, OV7670_R13_COM8, OV7670_COM8_AWB, OV7670_COM8_AWB); if (qvga) { /* QVGA from ov7670.c by * Jonathan Corbet */ xstart = 164; xend = 28; ystart = 14; yend = 494; } else { /* VGA */ xstart = 158; xend = 14; ystart = 10; yend = 490; } /* OV7670 hardware window registers are split across * multiple locations */ i2c_w(sd, OV7670_R17_HSTART, xstart >> 3); i2c_w(sd, OV7670_R18_HSTOP, xend >> 3); v = i2c_r(sd, OV7670_R32_HREF); v = (v & 0xc0) | ((xend & 0x7) << 3) | (xstart & 0x07); msleep(10); /* need to sleep between read and write to * same reg! */ i2c_w(sd, OV7670_R32_HREF, v); i2c_w(sd, OV7670_R19_VSTART, ystart >> 2); i2c_w(sd, OV7670_R1A_VSTOP, yend >> 2); v = i2c_r(sd, OV7670_R03_VREF); v = (v & 0xc0) | ((yend & 0x3) << 2) | (ystart & 0x03); msleep(10); /* need to sleep between read and write to * same reg! */ i2c_w(sd, OV7670_R03_VREF, v); break; case SEN_OV6620: i2c_w_mask(sd, 0x14, qvga ? 0x20 : 0x00, 0x20); i2c_w_mask(sd, 0x13, 0x00, 0x20); /* Select 16 bit data bus */ i2c_w_mask(sd, 0x12, 0x04, 0x06); /* AWB: 1 Test pattern: 0 */ break; case SEN_OV6630: case SEN_OV66308AF: i2c_w_mask(sd, 0x14, qvga ? 0x20 : 0x00, 0x20); i2c_w_mask(sd, 0x12, 0x04, 0x06); /* AWB: 1 Test pattern: 0 */ break; case SEN_OV9600: { const struct ov_i2c_regvals *vals; static const struct ov_i2c_regvals sxga_15[] = { {0x11, 0x80}, {0x14, 0x3e}, {0x24, 0x85}, {0x25, 0x75} }; static const struct ov_i2c_regvals sxga_7_5[] = { {0x11, 0x81}, {0x14, 0x3e}, {0x24, 0x85}, {0x25, 0x75} }; static const struct ov_i2c_regvals vga_30[] = { {0x11, 0x81}, {0x14, 0x7e}, {0x24, 0x70}, {0x25, 0x60} }; static const struct ov_i2c_regvals vga_15[] = { {0x11, 0x83}, {0x14, 0x3e}, {0x24, 0x80}, {0x25, 0x70} }; /* frame rates: * 15fps / 7.5 fps for 1280x1024 * 30fps / 15fps for 640x480 */ i2c_w_mask(sd, 0x12, qvga ? 0x40 : 0x00, 0x40); if (qvga) vals = sd->frame_rate < 30 ? vga_15 : vga_30; else vals = sd->frame_rate < 15 ? sxga_7_5 : sxga_15; write_i2c_regvals(sd, vals, ARRAY_SIZE(sxga_15)); return; } default: return; } /******** Clock programming ********/ i2c_w(sd, 0x11, sd->clockdiv); } /* this function works for bridge ov519 and sensors ov7660 and ov7670 only */ static void sethvflip(struct gspca_dev *gspca_dev, s32 hflip, s32 vflip) { struct sd *sd = (struct sd *) gspca_dev; if (sd->gspca_dev.streaming) reg_w(sd, OV519_R51_RESET1, 0x0f); /* block stream */ i2c_w_mask(sd, OV7670_R1E_MVFP, OV7670_MVFP_MIRROR * hflip | OV7670_MVFP_VFLIP * vflip, OV7670_MVFP_MIRROR | OV7670_MVFP_VFLIP); if (sd->gspca_dev.streaming) reg_w(sd, OV519_R51_RESET1, 0x00); /* restart stream */ } static void set_ov_sensor_window(struct sd *sd) { struct gspca_dev *gspca_dev; int qvga, crop; int hwsbase, hwebase, vwsbase, vwebase, hwscale, vwscale; /* mode setup is fully handled in mode_init_ov_sensor_regs for these */ switch (sd->sensor) { case SEN_OV2610: case SEN_OV2610AE: case SEN_OV3610: case SEN_OV7670: case SEN_OV9600: mode_init_ov_sensor_regs(sd); return; case SEN_OV7660: ov519_set_mode(sd); ov519_set_fr(sd); return; } gspca_dev = &sd->gspca_dev; qvga = gspca_dev->cam.cam_mode[gspca_dev->curr_mode].priv & 1; crop = gspca_dev->cam.cam_mode[gspca_dev->curr_mode].priv & 2; /* The different sensor ICs handle setting up of window differently. * IF YOU SET IT WRONG, YOU WILL GET ALL ZERO ISOC DATA FROM OV51x!! */ switch (sd->sensor) { case SEN_OV8610: hwsbase = 0x1e; hwebase = 0x1e; vwsbase = 0x02; vwebase = 0x02; break; case SEN_OV7610: case SEN_OV76BE: hwsbase = 0x38; hwebase = 0x3a; vwsbase = vwebase = 0x05; break; case SEN_OV6620: case SEN_OV6630: case SEN_OV66308AF: hwsbase = 0x38; hwebase = 0x3a; vwsbase = 0x05; vwebase = 0x06; if (sd->sensor == SEN_OV66308AF && qvga) /* HDG: this fixes U and V getting swapped */ hwsbase++; if (crop) { hwsbase += 8; hwebase += 8; vwsbase += 11; vwebase += 11; } break; case SEN_OV7620: case SEN_OV7620AE: hwsbase = 0x2f; /* From 7620.SET (spec is wrong) */ hwebase = 0x2f; vwsbase = vwebase = 0x05; break; case SEN_OV7640: case SEN_OV7648: hwsbase = 0x1a; hwebase = 0x1a; vwsbase = vwebase = 0x03; break; default: return; } switch (sd->sensor) { case SEN_OV6620: case SEN_OV6630: case SEN_OV66308AF: if (qvga) { /* QCIF */ hwscale = 0; vwscale = 0; } else { /* CIF */ hwscale = 1; vwscale = 1; /* The datasheet says 0; * it's wrong */ } break; case SEN_OV8610: if (qvga) { /* QSVGA */ hwscale = 1; vwscale = 1; } else { /* SVGA */ hwscale = 2; vwscale = 2; } break; default: /* SEN_OV7xx0 */ if (qvga) { /* QVGA */ hwscale = 1; vwscale = 0; } else { /* VGA */ hwscale = 2; vwscale = 1; } } mode_init_ov_sensor_regs(sd); i2c_w(sd, 0x17, hwsbase); i2c_w(sd, 0x18, hwebase + (sd->sensor_width >> hwscale)); i2c_w(sd, 0x19, vwsbase); i2c_w(sd, 0x1a, vwebase + (sd->sensor_height >> vwscale)); } /* -- start the camera -- */ static int sd_start(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; /* Default for most bridges, allow bridge_mode_init_regs to override */ sd->sensor_width = sd->gspca_dev.pixfmt.width; sd->sensor_height = sd->gspca_dev.pixfmt.height; switch (sd->bridge) { case BRIDGE_OV511: case BRIDGE_OV511PLUS: ov511_mode_init_regs(sd); break; case BRIDGE_OV518: case BRIDGE_OV518PLUS: ov518_mode_init_regs(sd); break; case BRIDGE_OV519: ov519_mode_init_regs(sd); break; /* case BRIDGE_OVFX2: nothing to do */ case BRIDGE_W9968CF: w9968cf_mode_init_regs(sd); break; } set_ov_sensor_window(sd); /* Force clear snapshot state in case the snapshot button was pressed while we weren't streaming */ sd->snapshot_needs_reset = 1; sd_reset_snapshot(gspca_dev); sd->first_frame = 3; ov51x_restart(sd); ov51x_led_control(sd, 1); return gspca_dev->usb_err; } static void sd_stopN(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; ov51x_stop(sd); ov51x_led_control(sd, 0); } static void sd_stop0(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; if (!sd->gspca_dev.present) return; if (sd->bridge == BRIDGE_W9968CF) w9968cf_stop0(sd); #if IS_ENABLED(CONFIG_INPUT) /* If the last button state is pressed, release it now! */ if (sd->snapshot_pressed) { input_report_key(gspca_dev->input_dev, KEY_CAMERA, 0); input_sync(gspca_dev->input_dev); sd->snapshot_pressed = 0; } #endif if (sd->bridge == BRIDGE_OV519) reg_w(sd, OV519_R57_SNAPSHOT, 0x23); } static void ov51x_handle_button(struct gspca_dev *gspca_dev, u8 state) { struct sd *sd = (struct sd *) gspca_dev; if (sd->snapshot_pressed != state) { #if IS_ENABLED(CONFIG_INPUT) input_report_key(gspca_dev->input_dev, KEY_CAMERA, state); input_sync(gspca_dev->input_dev); #endif if (state) sd->snapshot_needs_reset = 1; sd->snapshot_pressed = state; } else { /* On the ov511 / ov519 we need to reset the button state multiple times, as resetting does not work as long as the button stays pressed */ switch (sd->bridge) { case BRIDGE_OV511: case BRIDGE_OV511PLUS: case BRIDGE_OV519: if (state) sd->snapshot_needs_reset = 1; break; } } } static void ov511_pkt_scan(struct gspca_dev *gspca_dev, u8 *in, /* isoc packet */ int len) /* iso packet length */ { struct sd *sd = (struct sd *) gspca_dev; /* SOF/EOF packets have 1st to 8th bytes zeroed and the 9th * byte non-zero. The EOF packet has image width/height in the * 10th and 11th bytes. The 9th byte is given as follows: * * bit 7: EOF * 6: compression enabled * 5: 422/420/400 modes * 4: 422/420/400 modes * 3: 1 * 2: snapshot button on * 1: snapshot frame * 0: even/odd field */ if (!(in[0] | in[1] | in[2] | in[3] | in[4] | in[5] | in[6] | in[7]) && (in[8] & 0x08)) { ov51x_handle_button(gspca_dev, (in[8] >> 2) & 1); if (in[8] & 0x80) { /* Frame end */ if ((in[9] + 1) * 8 != gspca_dev->pixfmt.width || (in[10] + 1) * 8 != gspca_dev->pixfmt.height) { gspca_err(gspca_dev, "Invalid frame size, got: %dx%d, requested: %dx%d\n", (in[9] + 1) * 8, (in[10] + 1) * 8, gspca_dev->pixfmt.width, gspca_dev->pixfmt.height); gspca_dev->last_packet_type = DISCARD_PACKET; return; } /* Add 11 byte footer to frame, might be useful */ gspca_frame_add(gspca_dev, LAST_PACKET, in, 11); return; } else { /* Frame start */ gspca_frame_add(gspca_dev, FIRST_PACKET, in, 0); sd->packet_nr = 0; } } /* Ignore the packet number */ len--; /* intermediate packet */ gspca_frame_add(gspca_dev, INTER_PACKET, in, len); } static void ov518_pkt_scan(struct gspca_dev *gspca_dev, u8 *data, /* isoc packet */ int len) /* iso packet length */ { struct sd *sd = (struct sd *) gspca_dev; /* A false positive here is likely, until OVT gives me * the definitive SOF/EOF format */ if ((!(data[0] | data[1] | data[2] | data[3] | data[5])) && data[6]) { ov51x_handle_button(gspca_dev, (data[6] >> 1) & 1); gspca_frame_add(gspca_dev, LAST_PACKET, NULL, 0); gspca_frame_add(gspca_dev, FIRST_PACKET, NULL, 0); sd->packet_nr = 0; } if (gspca_dev->last_packet_type == DISCARD_PACKET) return; /* Does this device use packet numbers ? */ if (len & 7) { len--; if (sd->packet_nr == data[len]) sd->packet_nr++; /* The last few packets of the frame (which are all 0's except that they may contain part of the footer), are numbered 0 */ else if (sd->packet_nr == 0 || data[len]) { gspca_err(gspca_dev, "Invalid packet nr: %d (expect: %d)\n", (int)data[len], (int)sd->packet_nr); gspca_dev->last_packet_type = DISCARD_PACKET; return; } } /* intermediate packet */ gspca_frame_add(gspca_dev, INTER_PACKET, data, len); } static void ov519_pkt_scan(struct gspca_dev *gspca_dev, u8 *data, /* isoc packet */ int len) /* iso packet length */ { /* Header of ov519 is 16 bytes: * Byte Value Description * 0 0xff magic * 1 0xff magic * 2 0xff magic * 3 0xXX 0x50 = SOF, 0x51 = EOF * 9 0xXX 0x01 initial frame without data, * 0x00 standard frame with image * 14 Lo in EOF: length of image data / 8 * 15 Hi */ if (data[0] == 0xff && data[1] == 0xff && data[2] == 0xff) { switch (data[3]) { case 0x50: /* start of frame */ /* Don't check the button state here, as the state usually (always ?) changes at EOF and checking it here leads to unnecessary snapshot state resets. */ #define HDRSZ 16 data += HDRSZ; len -= HDRSZ; #undef HDRSZ if (data[0] == 0xff || data[1] == 0xd8) gspca_frame_add(gspca_dev, FIRST_PACKET, data, len); else gspca_dev->last_packet_type = DISCARD_PACKET; return; case 0x51: /* end of frame */ ov51x_handle_button(gspca_dev, data[11] & 1); if (data[9] != 0) gspca_dev->last_packet_type = DISCARD_PACKET; gspca_frame_add(gspca_dev, LAST_PACKET, NULL, 0); return; } } /* intermediate packet */ gspca_frame_add(gspca_dev, INTER_PACKET, data, len); } static void ovfx2_pkt_scan(struct gspca_dev *gspca_dev, u8 *data, /* isoc packet */ int len) /* iso packet length */ { struct sd *sd = (struct sd *) gspca_dev; gspca_frame_add(gspca_dev, INTER_PACKET, data, len); /* A short read signals EOF */ if (len < gspca_dev->cam.bulk_size) { /* If the frame is short, and it is one of the first ones the sensor and bridge are still syncing, so drop it. */ if (sd->first_frame) { sd->first_frame--; if (gspca_dev->image_len < sd->gspca_dev.pixfmt.width * sd->gspca_dev.pixfmt.height) gspca_dev->last_packet_type = DISCARD_PACKET; } gspca_frame_add(gspca_dev, LAST_PACKET, NULL, 0); gspca_frame_add(gspca_dev, FIRST_PACKET, NULL, 0); } } static void sd_pkt_scan(struct gspca_dev *gspca_dev, u8 *data, /* isoc packet */ int len) /* iso packet length */ { struct sd *sd = (struct sd *) gspca_dev; switch (sd->bridge) { case BRIDGE_OV511: case BRIDGE_OV511PLUS: ov511_pkt_scan(gspca_dev, data, len); break; case BRIDGE_OV518: case BRIDGE_OV518PLUS: ov518_pkt_scan(gspca_dev, data, len); break; case BRIDGE_OV519: ov519_pkt_scan(gspca_dev, data, len); break; case BRIDGE_OVFX2: ovfx2_pkt_scan(gspca_dev, data, len); break; case BRIDGE_W9968CF: w9968cf_pkt_scan(gspca_dev, data, len); break; } } /* -- management routines -- */ static void setbrightness(struct gspca_dev *gspca_dev, s32 val) { struct sd *sd = (struct sd *) gspca_dev; static const struct ov_i2c_regvals brit_7660[][7] = { {{0x0f, 0x6a}, {0x24, 0x40}, {0x25, 0x2b}, {0x26, 0x90}, {0x27, 0xe0}, {0x28, 0xe0}, {0x2c, 0xe0}}, {{0x0f, 0x6a}, {0x24, 0x50}, {0x25, 0x40}, {0x26, 0xa1}, {0x27, 0xc0}, {0x28, 0xc0}, {0x2c, 0xc0}}, {{0x0f, 0x6a}, {0x24, 0x68}, {0x25, 0x58}, {0x26, 0xc2}, {0x27, 0xa0}, {0x28, 0xa0}, {0x2c, 0xa0}}, {{0x0f, 0x6a}, {0x24, 0x70}, {0x25, 0x68}, {0x26, 0xd3}, {0x27, 0x80}, {0x28, 0x80}, {0x2c, 0x80}}, {{0x0f, 0x6a}, {0x24, 0x80}, {0x25, 0x70}, {0x26, 0xd3}, {0x27, 0x20}, {0x28, 0x20}, {0x2c, 0x20}}, {{0x0f, 0x6a}, {0x24, 0x88}, {0x25, 0x78}, {0x26, 0xd3}, {0x27, 0x40}, {0x28, 0x40}, {0x2c, 0x40}}, {{0x0f, 0x6a}, {0x24, 0x90}, {0x25, 0x80}, {0x26, 0xd4}, {0x27, 0x60}, {0x28, 0x60}, {0x2c, 0x60}} }; switch (sd->sensor) { case SEN_OV8610: case SEN_OV7610: case SEN_OV76BE: case SEN_OV6620: case SEN_OV6630: case SEN_OV66308AF: case SEN_OV7640: case SEN_OV7648: i2c_w(sd, OV7610_REG_BRT, val); break; case SEN_OV7620: case SEN_OV7620AE: i2c_w(sd, OV7610_REG_BRT, val); break; case SEN_OV7660: write_i2c_regvals(sd, brit_7660[val], ARRAY_SIZE(brit_7660[0])); break; case SEN_OV7670: /*win trace * i2c_w_mask(sd, OV7670_R13_COM8, 0, OV7670_COM8_AEC); */ i2c_w(sd, OV7670_R55_BRIGHT, ov7670_abs_to_sm(val)); break; } } static void setcontrast(struct gspca_dev *gspca_dev, s32 val) { struct sd *sd = (struct sd *) gspca_dev; static const struct ov_i2c_regvals contrast_7660[][31] = { {{0x6c, 0xf0}, {0x6d, 0xf0}, {0x6e, 0xf8}, {0x6f, 0xa0}, {0x70, 0x58}, {0x71, 0x38}, {0x72, 0x30}, {0x73, 0x30}, {0x74, 0x28}, {0x75, 0x28}, {0x76, 0x24}, {0x77, 0x24}, {0x78, 0x22}, {0x79, 0x28}, {0x7a, 0x2a}, {0x7b, 0x34}, {0x7c, 0x0f}, {0x7d, 0x1e}, {0x7e, 0x3d}, {0x7f, 0x65}, {0x80, 0x70}, {0x81, 0x77}, {0x82, 0x7d}, {0x83, 0x83}, {0x84, 0x88}, {0x85, 0x8d}, {0x86, 0x96}, {0x87, 0x9f}, {0x88, 0xb0}, {0x89, 0xc4}, {0x8a, 0xd9}}, {{0x6c, 0xf0}, {0x6d, 0xf0}, {0x6e, 0xf8}, {0x6f, 0x94}, {0x70, 0x58}, {0x71, 0x40}, {0x72, 0x30}, {0x73, 0x30}, {0x74, 0x30}, {0x75, 0x30}, {0x76, 0x2c}, {0x77, 0x24}, {0x78, 0x22}, {0x79, 0x28}, {0x7a, 0x2a}, {0x7b, 0x31}, {0x7c, 0x0f}, {0x7d, 0x1e}, {0x7e, 0x3d}, {0x7f, 0x62}, {0x80, 0x6d}, {0x81, 0x75}, {0x82, 0x7b}, {0x83, 0x81}, {0x84, 0x87}, {0x85, 0x8d}, {0x86, 0x98}, {0x87, 0xa1}, {0x88, 0xb2}, {0x89, 0xc6}, {0x8a, 0xdb}}, {{0x6c, 0xf0}, {0x6d, 0xf0}, {0x6e, 0xf0}, {0x6f, 0x84}, {0x70, 0x58}, {0x71, 0x48}, {0x72, 0x40}, {0x73, 0x40}, {0x74, 0x28}, {0x75, 0x28}, {0x76, 0x28}, {0x77, 0x24}, {0x78, 0x26}, {0x79, 0x28}, {0x7a, 0x28}, {0x7b, 0x34}, {0x7c, 0x0f}, {0x7d, 0x1e}, {0x7e, 0x3c}, {0x7f, 0x5d}, {0x80, 0x68}, {0x81, 0x71}, {0x82, 0x79}, {0x83, 0x81}, {0x84, 0x86}, {0x85, 0x8b}, {0x86, 0x95}, {0x87, 0x9e}, {0x88, 0xb1}, {0x89, 0xc5}, {0x8a, 0xd9}}, {{0x6c, 0xf0}, {0x6d, 0xf0}, {0x6e, 0xf0}, {0x6f, 0x70}, {0x70, 0x58}, {0x71, 0x58}, {0x72, 0x48}, {0x73, 0x48}, {0x74, 0x38}, {0x75, 0x40}, {0x76, 0x34}, {0x77, 0x34}, {0x78, 0x2e}, {0x79, 0x28}, {0x7a, 0x24}, {0x7b, 0x22}, {0x7c, 0x0f}, {0x7d, 0x1e}, {0x7e, 0x3c}, {0x7f, 0x58}, {0x80, 0x63}, {0x81, 0x6e}, {0x82, 0x77}, {0x83, 0x80}, {0x84, 0x87}, {0x85, 0x8f}, {0x86, 0x9c}, {0x87, 0xa9}, {0x88, 0xc0}, {0x89, 0xd4}, {0x8a, 0xe6}}, {{0x6c, 0xa0}, {0x6d, 0xf0}, {0x6e, 0x90}, {0x6f, 0x80}, {0x70, 0x70}, {0x71, 0x80}, {0x72, 0x60}, {0x73, 0x60}, {0x74, 0x58}, {0x75, 0x60}, {0x76, 0x4c}, {0x77, 0x38}, {0x78, 0x38}, {0x79, 0x2a}, {0x7a, 0x20}, {0x7b, 0x0e}, {0x7c, 0x0a}, {0x7d, 0x14}, {0x7e, 0x26}, {0x7f, 0x46}, {0x80, 0x54}, {0x81, 0x64}, {0x82, 0x70}, {0x83, 0x7c}, {0x84, 0x87}, {0x85, 0x93}, {0x86, 0xa6}, {0x87, 0xb4}, {0x88, 0xd0}, {0x89, 0xe5}, {0x8a, 0xf5}}, {{0x6c, 0x60}, {0x6d, 0x80}, {0x6e, 0x60}, {0x6f, 0x80}, {0x70, 0x80}, {0x71, 0x80}, {0x72, 0x88}, {0x73, 0x30}, {0x74, 0x70}, {0x75, 0x68}, {0x76, 0x64}, {0x77, 0x50}, {0x78, 0x3c}, {0x79, 0x22}, {0x7a, 0x10}, {0x7b, 0x08}, {0x7c, 0x06}, {0x7d, 0x0e}, {0x7e, 0x1a}, {0x7f, 0x3a}, {0x80, 0x4a}, {0x81, 0x5a}, {0x82, 0x6b}, {0x83, 0x7b}, {0x84, 0x89}, {0x85, 0x96}, {0x86, 0xaf}, {0x87, 0xc3}, {0x88, 0xe1}, {0x89, 0xf2}, {0x8a, 0xfa}}, {{0x6c, 0x20}, {0x6d, 0x40}, {0x6e, 0x20}, {0x6f, 0x60}, {0x70, 0x88}, {0x71, 0xc8}, {0x72, 0xc0}, {0x73, 0xb8}, {0x74, 0xa8}, {0x75, 0xb8}, {0x76, 0x80}, {0x77, 0x5c}, {0x78, 0x26}, {0x79, 0x10}, {0x7a, 0x08}, {0x7b, 0x04}, {0x7c, 0x02}, {0x7d, 0x06}, {0x7e, 0x0a}, {0x7f, 0x22}, {0x80, 0x33}, {0x81, 0x4c}, {0x82, 0x64}, {0x83, 0x7b}, {0x84, 0x90}, {0x85, 0xa7}, {0x86, 0xc7}, {0x87, 0xde}, {0x88, 0xf1}, {0x89, 0xf9}, {0x8a, 0xfd}}, }; switch (sd->sensor) { case SEN_OV7610: case SEN_OV6620: i2c_w(sd, OV7610_REG_CNT, val); break; case SEN_OV6630: case SEN_OV66308AF: i2c_w_mask(sd, OV7610_REG_CNT, val >> 4, 0x0f); break; case SEN_OV8610: { static const u8 ctab[] = { 0x03, 0x09, 0x0b, 0x0f, 0x53, 0x6f, 0x35, 0x7f }; /* Use Y gamma control instead. Bit 0 enables it. */ i2c_w(sd, 0x64, ctab[val >> 5]); break; } case SEN_OV7620: case SEN_OV7620AE: { static const u8 ctab[] = { 0x01, 0x05, 0x09, 0x11, 0x15, 0x35, 0x37, 0x57, 0x5b, 0xa5, 0xa7, 0xc7, 0xc9, 0xcf, 0xef, 0xff }; /* Use Y gamma control instead. Bit 0 enables it. */ i2c_w(sd, 0x64, ctab[val >> 4]); break; } case SEN_OV7660: write_i2c_regvals(sd, contrast_7660[val], ARRAY_SIZE(contrast_7660[0])); break; case SEN_OV7670: /* check that this isn't just the same as ov7610 */ i2c_w(sd, OV7670_R56_CONTRAS, val >> 1); break; } } static void setexposure(struct gspca_dev *gspca_dev, s32 val) { struct sd *sd = (struct sd *) gspca_dev; i2c_w(sd, 0x10, val); } static void setcolors(struct gspca_dev *gspca_dev, s32 val) { struct sd *sd = (struct sd *) gspca_dev; static const struct ov_i2c_regvals colors_7660[][6] = { {{0x4f, 0x28}, {0x50, 0x2a}, {0x51, 0x02}, {0x52, 0x0a}, {0x53, 0x19}, {0x54, 0x23}}, {{0x4f, 0x47}, {0x50, 0x4a}, {0x51, 0x03}, {0x52, 0x11}, {0x53, 0x2c}, {0x54, 0x3e}}, {{0x4f, 0x66}, {0x50, 0x6b}, {0x51, 0x05}, {0x52, 0x19}, {0x53, 0x40}, {0x54, 0x59}}, {{0x4f, 0x84}, {0x50, 0x8b}, {0x51, 0x06}, {0x52, 0x20}, {0x53, 0x53}, {0x54, 0x73}}, {{0x4f, 0xa3}, {0x50, 0xab}, {0x51, 0x08}, {0x52, 0x28}, {0x53, 0x66}, {0x54, 0x8e}}, }; switch (sd->sensor) { case SEN_OV8610: case SEN_OV7610: case SEN_OV76BE: case SEN_OV6620: case SEN_OV6630: case SEN_OV66308AF: i2c_w(sd, OV7610_REG_SAT, val); break; case SEN_OV7620: case SEN_OV7620AE: /* Use UV gamma control instead. Bits 0 & 7 are reserved. */ /* rc = ov_i2c_write(sd->dev, 0x62, (val >> 9) & 0x7e); if (rc < 0) goto out; */ i2c_w(sd, OV7610_REG_SAT, val); break; case SEN_OV7640: case SEN_OV7648: i2c_w(sd, OV7610_REG_SAT, val & 0xf0); break; case SEN_OV7660: write_i2c_regvals(sd, colors_7660[val], ARRAY_SIZE(colors_7660[0])); break; case SEN_OV7670: /* supported later once I work out how to do it * transparently fail now! */ /* set REG_COM13 values for UV sat auto mode */ break; } } static void setautobright(struct gspca_dev *gspca_dev, s32 val) { struct sd *sd = (struct sd *) gspca_dev; i2c_w_mask(sd, 0x2d, val ? 0x10 : 0x00, 0x10); } static void setfreq_i(struct sd *sd, s32 val) { if (sd->sensor == SEN_OV7660 || sd->sensor == SEN_OV7670) { switch (val) { case 0: /* Banding filter disabled */ i2c_w_mask(sd, OV7670_R13_COM8, 0, OV7670_COM8_BFILT); break; case 1: /* 50 hz */ i2c_w_mask(sd, OV7670_R13_COM8, OV7670_COM8_BFILT, OV7670_COM8_BFILT); i2c_w_mask(sd, OV7670_R3B_COM11, 0x08, 0x18); break; case 2: /* 60 hz */ i2c_w_mask(sd, OV7670_R13_COM8, OV7670_COM8_BFILT, OV7670_COM8_BFILT); i2c_w_mask(sd, OV7670_R3B_COM11, 0x00, 0x18); break; case 3: /* Auto hz - ov7670 only */ i2c_w_mask(sd, OV7670_R13_COM8, OV7670_COM8_BFILT, OV7670_COM8_BFILT); i2c_w_mask(sd, OV7670_R3B_COM11, OV7670_COM11_HZAUTO, 0x18); break; } } else { switch (val) { case 0: /* Banding filter disabled */ i2c_w_mask(sd, 0x2d, 0x00, 0x04); i2c_w_mask(sd, 0x2a, 0x00, 0x80); break; case 1: /* 50 hz (filter on and framerate adj) */ i2c_w_mask(sd, 0x2d, 0x04, 0x04); i2c_w_mask(sd, 0x2a, 0x80, 0x80); /* 20 fps -> 16.667 fps */ if (sd->sensor == SEN_OV6620 || sd->sensor == SEN_OV6630 || sd->sensor == SEN_OV66308AF) i2c_w(sd, 0x2b, 0x5e); else i2c_w(sd, 0x2b, 0xac); break; case 2: /* 60 hz (filter on, ...) */ i2c_w_mask(sd, 0x2d, 0x04, 0x04); if (sd->sensor == SEN_OV6620 || sd->sensor == SEN_OV6630 || sd->sensor == SEN_OV66308AF) { /* 20 fps -> 15 fps */ i2c_w_mask(sd, 0x2a, 0x80, 0x80); i2c_w(sd, 0x2b, 0xa8); } else { /* no framerate adj. */ i2c_w_mask(sd, 0x2a, 0x00, 0x80); } break; } } } static void setfreq(struct gspca_dev *gspca_dev, s32 val) { struct sd *sd = (struct sd *) gspca_dev; setfreq_i(sd, val); /* Ugly but necessary */ if (sd->bridge == BRIDGE_W9968CF) w9968cf_set_crop_window(sd); } static int sd_get_jcomp(struct gspca_dev *gspca_dev, struct v4l2_jpegcompression *jcomp) { struct sd *sd = (struct sd *) gspca_dev; if (sd->bridge != BRIDGE_W9968CF) return -ENOTTY; memset(jcomp, 0, sizeof *jcomp); jcomp->quality = v4l2_ctrl_g_ctrl(sd->jpegqual); jcomp->jpeg_markers = V4L2_JPEG_MARKER_DHT | V4L2_JPEG_MARKER_DQT | V4L2_JPEG_MARKER_DRI; return 0; } static int sd_set_jcomp(struct gspca_dev *gspca_dev, const struct v4l2_jpegcompression *jcomp) { struct sd *sd = (struct sd *) gspca_dev; if (sd->bridge != BRIDGE_W9968CF) return -ENOTTY; v4l2_ctrl_s_ctrl(sd->jpegqual, jcomp->quality); return 0; } static int sd_g_volatile_ctrl(struct v4l2_ctrl *ctrl) { struct gspca_dev *gspca_dev = container_of(ctrl->handler, struct gspca_dev, ctrl_handler); struct sd *sd = (struct sd *)gspca_dev; gspca_dev->usb_err = 0; switch (ctrl->id) { case V4L2_CID_AUTOGAIN: gspca_dev->exposure->val = i2c_r(sd, 0x10); break; } return 0; } static int sd_s_ctrl(struct v4l2_ctrl *ctrl) { struct gspca_dev *gspca_dev = container_of(ctrl->handler, struct gspca_dev, ctrl_handler); struct sd *sd = (struct sd *)gspca_dev; gspca_dev->usb_err = 0; if (!gspca_dev->streaming) return 0; switch (ctrl->id) { case V4L2_CID_BRIGHTNESS: setbrightness(gspca_dev, ctrl->val); break; case V4L2_CID_CONTRAST: setcontrast(gspca_dev, ctrl->val); break; case V4L2_CID_POWER_LINE_FREQUENCY: setfreq(gspca_dev, ctrl->val); break; case V4L2_CID_AUTOBRIGHTNESS: if (ctrl->is_new) setautobright(gspca_dev, ctrl->val); if (!ctrl->val && sd->brightness->is_new) setbrightness(gspca_dev, sd->brightness->val); break; case V4L2_CID_SATURATION: setcolors(gspca_dev, ctrl->val); break; case V4L2_CID_HFLIP: sethvflip(gspca_dev, ctrl->val, sd->vflip->val); break; case V4L2_CID_AUTOGAIN: if (ctrl->is_new) setautogain(gspca_dev, ctrl->val); if (!ctrl->val && gspca_dev->exposure->is_new) setexposure(gspca_dev, gspca_dev->exposure->val); break; case V4L2_CID_JPEG_COMPRESSION_QUALITY: return -EBUSY; /* Should never happen, as we grab the ctrl */ } return gspca_dev->usb_err; } static const struct v4l2_ctrl_ops sd_ctrl_ops = { .g_volatile_ctrl = sd_g_volatile_ctrl, .s_ctrl = sd_s_ctrl, }; static int sd_init_controls(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *)gspca_dev; struct v4l2_ctrl_handler *hdl = &gspca_dev->ctrl_handler; gspca_dev->vdev.ctrl_handler = hdl; v4l2_ctrl_handler_init(hdl, 10); if (valid_controls[sd->sensor].has_brightness) sd->brightness = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_BRIGHTNESS, 0, sd->sensor == SEN_OV7660 ? 6 : 255, 1, sd->sensor == SEN_OV7660 ? 3 : 127); if (valid_controls[sd->sensor].has_contrast) { if (sd->sensor == SEN_OV7660) v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_CONTRAST, 0, 6, 1, 3); else v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_CONTRAST, 0, 255, 1, (sd->sensor == SEN_OV6630 || sd->sensor == SEN_OV66308AF) ? 200 : 127); } if (valid_controls[sd->sensor].has_sat) v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_SATURATION, 0, sd->sensor == SEN_OV7660 ? 4 : 255, 1, sd->sensor == SEN_OV7660 ? 2 : 127); if (valid_controls[sd->sensor].has_exposure) gspca_dev->exposure = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_EXPOSURE, 0, 255, 1, 127); if (valid_controls[sd->sensor].has_hvflip) { sd->hflip = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_HFLIP, 0, 1, 1, 0); sd->vflip = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_VFLIP, 0, 1, 1, 0); } if (valid_controls[sd->sensor].has_autobright) sd->autobright = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_AUTOBRIGHTNESS, 0, 1, 1, 1); if (valid_controls[sd->sensor].has_autogain) gspca_dev->autogain = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_AUTOGAIN, 0, 1, 1, 1); if (valid_controls[sd->sensor].has_freq) { if (sd->sensor == SEN_OV7670) sd->freq = v4l2_ctrl_new_std_menu(hdl, &sd_ctrl_ops, V4L2_CID_POWER_LINE_FREQUENCY, V4L2_CID_POWER_LINE_FREQUENCY_AUTO, 0, V4L2_CID_POWER_LINE_FREQUENCY_AUTO); else sd->freq = v4l2_ctrl_new_std_menu(hdl, &sd_ctrl_ops, V4L2_CID_POWER_LINE_FREQUENCY, V4L2_CID_POWER_LINE_FREQUENCY_60HZ, 0, 0); } if (sd->bridge == BRIDGE_W9968CF) sd->jpegqual = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_JPEG_COMPRESSION_QUALITY, QUALITY_MIN, QUALITY_MAX, 1, QUALITY_DEF); if (hdl->error) { gspca_err(gspca_dev, "Could not initialize controls\n"); return hdl->error; } if (gspca_dev->autogain) v4l2_ctrl_auto_cluster(3, &gspca_dev->autogain, 0, true); if (sd->autobright) v4l2_ctrl_auto_cluster(2, &sd->autobright, 0, false); if (sd->hflip) v4l2_ctrl_cluster(2, &sd->hflip); return 0; } /* sub-driver description */ static const struct sd_desc sd_desc = { .name = MODULE_NAME, .config = sd_config, .init = sd_init, .init_controls = sd_init_controls, .isoc_init = sd_isoc_init, .start = sd_start, .stopN = sd_stopN, .stop0 = sd_stop0, .pkt_scan = sd_pkt_scan, .dq_callback = sd_reset_snapshot, .get_jcomp = sd_get_jcomp, .set_jcomp = sd_set_jcomp, #if IS_ENABLED(CONFIG_INPUT) .other_input = 1, #endif }; /* -- module initialisation -- */ static const struct usb_device_id device_table[] = { {USB_DEVICE(0x041e, 0x4003), .driver_info = BRIDGE_W9968CF }, {USB_DEVICE(0x041e, 0x4052), .driver_info = BRIDGE_OV519 | BRIDGE_INVERT_LED }, {USB_DEVICE(0x041e, 0x405f), .driver_info = BRIDGE_OV519 }, {USB_DEVICE(0x041e, 0x4060), .driver_info = BRIDGE_OV519 }, {USB_DEVICE(0x041e, 0x4061), .driver_info = BRIDGE_OV519 }, {USB_DEVICE(0x041e, 0x4064), .driver_info = BRIDGE_OV519 }, {USB_DEVICE(0x041e, 0x4067), .driver_info = BRIDGE_OV519 }, {USB_DEVICE(0x041e, 0x4068), .driver_info = BRIDGE_OV519 }, {USB_DEVICE(0x045e, 0x028c), .driver_info = BRIDGE_OV519 | BRIDGE_INVERT_LED }, {USB_DEVICE(0x054c, 0x0154), .driver_info = BRIDGE_OV519 }, {USB_DEVICE(0x054c, 0x0155), .driver_info = BRIDGE_OV519 }, {USB_DEVICE(0x05a9, 0x0511), .driver_info = BRIDGE_OV511 }, {USB_DEVICE(0x05a9, 0x0518), .driver_info = BRIDGE_OV518 }, {USB_DEVICE(0x05a9, 0x0519), .driver_info = BRIDGE_OV519 | BRIDGE_INVERT_LED }, {USB_DEVICE(0x05a9, 0x0530), .driver_info = BRIDGE_OV519 | BRIDGE_INVERT_LED }, {USB_DEVICE(0x05a9, 0x2800), .driver_info = BRIDGE_OVFX2 }, {USB_DEVICE(0x05a9, 0x4519), .driver_info = BRIDGE_OV519 }, {USB_DEVICE(0x05a9, 0x8519), .driver_info = BRIDGE_OV519 }, {USB_DEVICE(0x05a9, 0xa511), .driver_info = BRIDGE_OV511PLUS }, {USB_DEVICE(0x05a9, 0xa518), .driver_info = BRIDGE_OV518PLUS }, {USB_DEVICE(0x0813, 0x0002), .driver_info = BRIDGE_OV511PLUS }, {USB_DEVICE(0x0b62, 0x0059), .driver_info = BRIDGE_OVFX2 }, {USB_DEVICE(0x0e96, 0xc001), .driver_info = BRIDGE_OVFX2 }, {USB_DEVICE(0x1046, 0x9967), .driver_info = BRIDGE_W9968CF }, {USB_DEVICE(0x8020, 0xef04), .driver_info = BRIDGE_OVFX2 }, {} }; MODULE_DEVICE_TABLE(usb, device_table); /* -- device connect -- */ static int sd_probe(struct usb_interface *intf, const struct usb_device_id *id) { return gspca_dev_probe(intf, id, &sd_desc, sizeof(struct sd), THIS_MODULE); } static struct usb_driver sd_driver = { .name = MODULE_NAME, .id_table = device_table, .probe = sd_probe, .disconnect = gspca_disconnect, #ifdef CONFIG_PM .suspend = gspca_suspend, .resume = gspca_resume, .reset_resume = gspca_resume, #endif }; module_usb_driver(sd_driver); module_param(frame_rate, int, 0644); MODULE_PARM_DESC(frame_rate, "Frame rate (5, 10, 15, 20 or 30 fps)");
266 15 266 1 1 266 265 142 266 141 1 68 266 125 47 267 142 125 265 104 11 222 15 222 1 162 266 162 266 104 222 222 222 51 80 131 5 14 80 51 130 1 1 126 104 104 221 4 223 130 131 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 // SPDX-License-Identifier: GPL-2.0-only /* * unicode.c * * PURPOSE * Routines for converting between UTF-8 and OSTA Compressed Unicode. * Also handles filename mangling * * DESCRIPTION * OSTA Compressed Unicode is explained in the OSTA UDF specification. * http://www.osta.org/ * UTF-8 is explained in the IETF RFC XXXX. * ftp://ftp.internic.net/rfc/rfcxxxx.txt * */ #include "udfdecl.h" #include <linux/kernel.h> #include <linux/string.h> /* for memset */ #include <linux/nls.h> #include <linux/crc-itu-t.h> #include <linux/slab.h> #include "udf_sb.h" #define PLANE_SIZE 0x10000 #define UNICODE_MAX 0x10ffff #define SURROGATE_MASK 0xfffff800 #define SURROGATE_PAIR 0x0000d800 #define SURROGATE_LOW 0x00000400 #define SURROGATE_CHAR_BITS 10 #define SURROGATE_CHAR_MASK ((1 << SURROGATE_CHAR_BITS) - 1) #define ILLEGAL_CHAR_MARK '_' #define EXT_MARK '.' #define CRC_MARK '#' #define EXT_SIZE 5 /* Number of chars we need to store generated CRC to make filename unique */ #define CRC_LEN 5 static unicode_t get_utf16_char(const uint8_t *str_i, int str_i_max_len, int str_i_idx, int u_ch, unicode_t *ret) { unicode_t c; int start_idx = str_i_idx; /* Expand OSTA compressed Unicode to Unicode */ c = str_i[str_i_idx++]; if (u_ch > 1) c = (c << 8) | str_i[str_i_idx++]; if ((c & SURROGATE_MASK) == SURROGATE_PAIR) { unicode_t next; /* Trailing surrogate char */ if (str_i_idx >= str_i_max_len) { c = UNICODE_MAX + 1; goto out; } /* Low surrogate must follow the high one... */ if (c & SURROGATE_LOW) { c = UNICODE_MAX + 1; goto out; } WARN_ON_ONCE(u_ch != 2); next = str_i[str_i_idx++] << 8; next |= str_i[str_i_idx++]; if ((next & SURROGATE_MASK) != SURROGATE_PAIR || !(next & SURROGATE_LOW)) { c = UNICODE_MAX + 1; goto out; } c = PLANE_SIZE + ((c & SURROGATE_CHAR_MASK) << SURROGATE_CHAR_BITS) + (next & SURROGATE_CHAR_MASK); } out: *ret = c; return str_i_idx - start_idx; } static int udf_name_conv_char(uint8_t *str_o, int str_o_max_len, int *str_o_idx, const uint8_t *str_i, int str_i_max_len, int *str_i_idx, int u_ch, int *needsCRC, int (*conv_f)(wchar_t, unsigned char *, int), int translate) { unicode_t c; int illChar = 0; int len, gotch = 0; while (!gotch && *str_i_idx < str_i_max_len) { if (*str_o_idx >= str_o_max_len) { *needsCRC = 1; return gotch; } len = get_utf16_char(str_i, str_i_max_len, *str_i_idx, u_ch, &c); /* These chars cannot be converted. Replace them. */ if (c == 0 || c > UNICODE_MAX || (conv_f && c > MAX_WCHAR_T) || (translate && c == '/')) { illChar = 1; if (!translate) gotch = 1; } else if (illChar) break; else gotch = 1; *str_i_idx += len; } if (illChar) { *needsCRC = 1; c = ILLEGAL_CHAR_MARK; gotch = 1; } if (gotch) { if (conv_f) { len = conv_f(c, &str_o[*str_o_idx], str_o_max_len - *str_o_idx); } else { len = utf32_to_utf8(c, &str_o[*str_o_idx], str_o_max_len - *str_o_idx); if (len < 0) len = -ENAMETOOLONG; } /* Valid character? */ if (len >= 0) *str_o_idx += len; else if (len == -ENAMETOOLONG) { *needsCRC = 1; gotch = 0; } else { str_o[(*str_o_idx)++] = ILLEGAL_CHAR_MARK; *needsCRC = 1; } } return gotch; } static int udf_name_from_CS0(struct super_block *sb, uint8_t *str_o, int str_max_len, const uint8_t *ocu, int ocu_len, int translate) { uint32_t c; uint8_t cmp_id; int idx, len; int u_ch; int needsCRC = 0; int ext_i_len, ext_max_len; int str_o_len = 0; /* Length of resulting output */ int ext_o_len = 0; /* Extension output length */ int ext_crc_len = 0; /* Extension output length if used with CRC */ int i_ext = -1; /* Extension position in input buffer */ int o_crc = 0; /* Rightmost possible output pos for CRC+ext */ unsigned short valueCRC; uint8_t ext[EXT_SIZE * NLS_MAX_CHARSET_SIZE + 1]; uint8_t crc[CRC_LEN]; int (*conv_f)(wchar_t, unsigned char *, int); if (str_max_len <= 0) return 0; if (ocu_len == 0) { memset(str_o, 0, str_max_len); return 0; } if (UDF_SB(sb)->s_nls_map) conv_f = UDF_SB(sb)->s_nls_map->uni2char; else conv_f = NULL; cmp_id = ocu[0]; if (cmp_id != 8 && cmp_id != 16) { memset(str_o, 0, str_max_len); pr_err("unknown compression code (%u)\n", cmp_id); return -EINVAL; } u_ch = cmp_id >> 3; ocu++; ocu_len--; if (ocu_len % u_ch) { pr_err("incorrect filename length (%d)\n", ocu_len + 1); return -EINVAL; } if (translate) { /* Look for extension */ for (idx = ocu_len - u_ch, ext_i_len = 0; (idx >= 0) && (ext_i_len < EXT_SIZE); idx -= u_ch, ext_i_len++) { c = ocu[idx]; if (u_ch > 1) c = (c << 8) | ocu[idx + 1]; if (c == EXT_MARK) { if (ext_i_len) i_ext = idx; break; } } if (i_ext >= 0) { /* Convert extension */ ext_max_len = min_t(int, sizeof(ext), str_max_len); ext[ext_o_len++] = EXT_MARK; idx = i_ext + u_ch; while (udf_name_conv_char(ext, ext_max_len, &ext_o_len, ocu, ocu_len, &idx, u_ch, &needsCRC, conv_f, translate)) { if ((ext_o_len + CRC_LEN) < str_max_len) ext_crc_len = ext_o_len; } } } idx = 0; while (1) { if (translate && (idx == i_ext)) { if (str_o_len > (str_max_len - ext_o_len)) needsCRC = 1; break; } if (!udf_name_conv_char(str_o, str_max_len, &str_o_len, ocu, ocu_len, &idx, u_ch, &needsCRC, conv_f, translate)) break; if (translate && (str_o_len <= (str_max_len - ext_o_len - CRC_LEN))) o_crc = str_o_len; } if (translate) { if (str_o_len > 0 && str_o_len <= 2 && str_o[0] == '.' && (str_o_len == 1 || str_o[1] == '.')) needsCRC = 1; if (needsCRC) { str_o_len = o_crc; valueCRC = crc_itu_t(0, ocu, ocu_len); crc[0] = CRC_MARK; crc[1] = hex_asc_upper_hi(valueCRC >> 8); crc[2] = hex_asc_upper_lo(valueCRC >> 8); crc[3] = hex_asc_upper_hi(valueCRC); crc[4] = hex_asc_upper_lo(valueCRC); len = min_t(int, CRC_LEN, str_max_len - str_o_len); memcpy(&str_o[str_o_len], crc, len); str_o_len += len; ext_o_len = ext_crc_len; } if (ext_o_len > 0) { memcpy(&str_o[str_o_len], ext, ext_o_len); str_o_len += ext_o_len; } } return str_o_len; } static int udf_name_to_CS0(struct super_block *sb, uint8_t *ocu, int ocu_max_len, const uint8_t *str_i, int str_len) { int i, len; unsigned int max_val; int u_len, u_ch; unicode_t uni_char; int (*conv_f)(const unsigned char *, int, wchar_t *); if (ocu_max_len <= 0) return 0; if (UDF_SB(sb)->s_nls_map) conv_f = UDF_SB(sb)->s_nls_map->char2uni; else conv_f = NULL; memset(ocu, 0, ocu_max_len); ocu[0] = 8; max_val = 0xff; u_ch = 1; try_again: u_len = 1; for (i = 0; i < str_len; i += len) { /* Name didn't fit? */ if (u_len + u_ch > ocu_max_len) return 0; if (conv_f) { wchar_t wchar; len = conv_f(&str_i[i], str_len - i, &wchar); if (len > 0) uni_char = wchar; } else { len = utf8_to_utf32(&str_i[i], str_len - i, &uni_char); } /* Invalid character, deal with it */ if (len <= 0 || uni_char > UNICODE_MAX) { len = 1; uni_char = '?'; } if (uni_char > max_val) { unicode_t c; if (max_val == 0xff) { max_val = 0xffff; ocu[0] = 0x10; u_ch = 2; goto try_again; } /* * Use UTF-16 encoding for chars outside we * cannot encode directly. */ if (u_len + 2 * u_ch > ocu_max_len) return 0; uni_char -= PLANE_SIZE; c = SURROGATE_PAIR | ((uni_char >> SURROGATE_CHAR_BITS) & SURROGATE_CHAR_MASK); ocu[u_len++] = (uint8_t)(c >> 8); ocu[u_len++] = (uint8_t)(c & 0xff); uni_char = SURROGATE_PAIR | SURROGATE_LOW | (uni_char & SURROGATE_CHAR_MASK); } if (max_val == 0xffff) ocu[u_len++] = (uint8_t)(uni_char >> 8); ocu[u_len++] = (uint8_t)(uni_char & 0xff); } return u_len; } /* * Convert CS0 dstring to output charset. Warning: This function may truncate * input string if it is too long as it is used for informational strings only * and it is better to truncate the string than to refuse mounting a media. */ int udf_dstrCS0toChar(struct super_block *sb, uint8_t *utf_o, int o_len, const uint8_t *ocu_i, int i_len) { int s_len = 0; if (i_len > 0) { s_len = ocu_i[i_len - 1]; if (s_len >= i_len) { pr_warn("incorrect dstring lengths (%d/%d)," " truncating\n", s_len, i_len); s_len = i_len - 1; /* 2-byte encoding? Need to round properly... */ if (ocu_i[0] == 16) s_len -= (s_len - 1) & 2; } } return udf_name_from_CS0(sb, utf_o, o_len, ocu_i, s_len, 0); } int udf_get_filename(struct super_block *sb, const uint8_t *sname, int slen, uint8_t *dname, int dlen) { int ret; if (!slen) return -EIO; if (dlen <= 0) return 0; ret = udf_name_from_CS0(sb, dname, dlen, sname, slen, 1); /* Zero length filename isn't valid... */ if (ret == 0) ret = -EINVAL; return ret; } int udf_put_filename(struct super_block *sb, const uint8_t *sname, int slen, uint8_t *dname, int dlen) { return udf_name_to_CS0(sb, dname, dlen, sname, slen); }
19 20 19 20 20 20 19 1 1 20 1 18 20 20 20 20 19 20 20 1 16 4 20 20 20 20 20 20 19 1 20 20 20 20 20 20 2 2 2 2 1 2 2 1 1 1 1 2 1 1 20 2 18 20 20 20 1 1 19 20 20 20 19 20 18 19 6 19 19 18 19 19 19 6 13 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 2 2 1 2 2 2 2 2 1 2 1 2 1 1 2 2 2 1 1 1 1 1 20 1 18 1 1 1 1 1 1 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 3422 3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434 3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456 3457 3458 3459 3460 3461 3462 3463 3464 3465 3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479 3480 3481 3482 3483 3484 3485 3486 3487 3488 3489 3490 3491 3492 3493 3494 3495 3496 3497 3498 3499 3500 3501 3502 3503 3504 3505 3506 3507 3508 3509 3510 3511 3512 3513 3514 3515 3516 3517 3518 3519 3520 3521 3522 3523 3524 3525 3526 3527 3528 3529 3530 3531 3532 3533 3534 3535 3536 3537 3538 3539 3540 3541 3542 3543 3544 3545 3546 3547 3548 3549 3550 3551 3552 3553 3554 3555 3556 3557 3558 3559 3560 3561 3562 3563 3564 3565 3566 3567 // SPDX-License-Identifier: GPL-2.0 /* * Copyright (c) 2000-2006 Silicon Graphics, Inc. * All Rights Reserved. */ #include "xfs.h" #include "xfs_fs.h" #include "xfs_shared.h" #include "xfs_format.h" #include "xfs_log_format.h" #include "xfs_trans_resv.h" #include "xfs_bit.h" #include "xfs_sb.h" #include "xfs_mount.h" #include "xfs_defer.h" #include "xfs_inode.h" #include "xfs_trans.h" #include "xfs_log.h" #include "xfs_log_priv.h" #include "xfs_log_recover.h" #include "xfs_trans_priv.h" #include "xfs_alloc.h" #include "xfs_ialloc.h" #include "xfs_trace.h" #include "xfs_icache.h" #include "xfs_error.h" #include "xfs_buf_item.h" #include "xfs_ag.h" #include "xfs_quota.h" #include "xfs_reflink.h" #define BLK_AVG(blk1, blk2) ((blk1+blk2) >> 1) STATIC int xlog_find_zeroed( struct xlog *, xfs_daddr_t *); STATIC int xlog_clear_stale_blocks( struct xlog *, xfs_lsn_t); STATIC int xlog_do_recovery_pass( struct xlog *, xfs_daddr_t, xfs_daddr_t, int, xfs_daddr_t *); /* * Sector aligned buffer routines for buffer create/read/write/access */ /* * Verify the log-relative block number and length in basic blocks are valid for * an operation involving the given XFS log buffer. Returns true if the fields * are valid, false otherwise. */ static inline bool xlog_verify_bno( struct xlog *log, xfs_daddr_t blk_no, int bbcount) { if (blk_no < 0 || blk_no >= log->l_logBBsize) return false; if (bbcount <= 0 || (blk_no + bbcount) > log->l_logBBsize) return false; return true; } /* * Allocate a buffer to hold log data. The buffer needs to be able to map to * a range of nbblks basic blocks at any valid offset within the log. */ static char * xlog_alloc_buffer( struct xlog *log, int nbblks) { /* * Pass log block 0 since we don't have an addr yet, buffer will be * verified on read. */ if (XFS_IS_CORRUPT(log->l_mp, !xlog_verify_bno(log, 0, nbblks))) { xfs_warn(log->l_mp, "Invalid block length (0x%x) for buffer", nbblks); return NULL; } /* * We do log I/O in units of log sectors (a power-of-2 multiple of the * basic block size), so we round up the requested size to accommodate * the basic blocks required for complete log sectors. * * In addition, the buffer may be used for a non-sector-aligned block * offset, in which case an I/O of the requested size could extend * beyond the end of the buffer. If the requested size is only 1 basic * block it will never straddle a sector boundary, so this won't be an * issue. Nor will this be a problem if the log I/O is done in basic * blocks (sector size 1). But otherwise we extend the buffer by one * extra log sector to ensure there's space to accommodate this * possibility. */ if (nbblks > 1 && log->l_sectBBsize > 1) nbblks += log->l_sectBBsize; nbblks = round_up(nbblks, log->l_sectBBsize); return kvzalloc(BBTOB(nbblks), GFP_KERNEL | __GFP_RETRY_MAYFAIL); } /* * Return the address of the start of the given block number's data * in a log buffer. The buffer covers a log sector-aligned region. */ static inline unsigned int xlog_align( struct xlog *log, xfs_daddr_t blk_no) { return BBTOB(blk_no & ((xfs_daddr_t)log->l_sectBBsize - 1)); } static int xlog_do_io( struct xlog *log, xfs_daddr_t blk_no, unsigned int nbblks, char *data, enum req_op op) { int error; if (XFS_IS_CORRUPT(log->l_mp, !xlog_verify_bno(log, blk_no, nbblks))) { xfs_warn(log->l_mp, "Invalid log block/length (0x%llx, 0x%x) for buffer", blk_no, nbblks); return -EFSCORRUPTED; } blk_no = round_down(blk_no, log->l_sectBBsize); nbblks = round_up(nbblks, log->l_sectBBsize); ASSERT(nbblks > 0); error = xfs_rw_bdev(log->l_targ->bt_bdev, log->l_logBBstart + blk_no, BBTOB(nbblks), data, op); if (error && !xlog_is_shutdown(log)) { xfs_alert(log->l_mp, "log recovery %s I/O error at daddr 0x%llx len %d error %d", op == REQ_OP_WRITE ? "write" : "read", blk_no, nbblks, error); } return error; } STATIC int xlog_bread_noalign( struct xlog *log, xfs_daddr_t blk_no, int nbblks, char *data) { return xlog_do_io(log, blk_no, nbblks, data, REQ_OP_READ); } STATIC int xlog_bread( struct xlog *log, xfs_daddr_t blk_no, int nbblks, char *data, char **offset) { int error; error = xlog_do_io(log, blk_no, nbblks, data, REQ_OP_READ); if (!error) *offset = data + xlog_align(log, blk_no); return error; } STATIC int xlog_bwrite( struct xlog *log, xfs_daddr_t blk_no, int nbblks, char *data) { return xlog_do_io(log, blk_no, nbblks, data, REQ_OP_WRITE); } #ifdef DEBUG /* * dump debug superblock and log record information */ STATIC void xlog_header_check_dump( xfs_mount_t *mp, xlog_rec_header_t *head) { xfs_debug(mp, "%s: SB : uuid = %pU, fmt = %d", __func__, &mp->m_sb.sb_uuid, XLOG_FMT); xfs_debug(mp, " log : uuid = %pU, fmt = %d", &head->h_fs_uuid, be32_to_cpu(head->h_fmt)); } #else #define xlog_header_check_dump(mp, head) #endif /* * check log record header for recovery */ STATIC int xlog_header_check_recover( xfs_mount_t *mp, xlog_rec_header_t *head) { ASSERT(head->h_magicno == cpu_to_be32(XLOG_HEADER_MAGIC_NUM)); /* * IRIX doesn't write the h_fmt field and leaves it zeroed * (XLOG_FMT_UNKNOWN). This stops us from trying to recover * a dirty log created in IRIX. */ if (XFS_IS_CORRUPT(mp, head->h_fmt != cpu_to_be32(XLOG_FMT))) { xfs_warn(mp, "dirty log written in incompatible format - can't recover"); xlog_header_check_dump(mp, head); return -EFSCORRUPTED; } if (XFS_IS_CORRUPT(mp, !uuid_equal(&mp->m_sb.sb_uuid, &head->h_fs_uuid))) { xfs_warn(mp, "dirty log entry has mismatched uuid - can't recover"); xlog_header_check_dump(mp, head); return -EFSCORRUPTED; } return 0; } /* * read the head block of the log and check the header */ STATIC int xlog_header_check_mount( xfs_mount_t *mp, xlog_rec_header_t *head) { ASSERT(head->h_magicno == cpu_to_be32(XLOG_HEADER_MAGIC_NUM)); if (uuid_is_null(&head->h_fs_uuid)) { /* * IRIX doesn't write the h_fs_uuid or h_fmt fields. If * h_fs_uuid is null, we assume this log was last mounted * by IRIX and continue. */ xfs_warn(mp, "null uuid in log - IRIX style log"); } else if (XFS_IS_CORRUPT(mp, !uuid_equal(&mp->m_sb.sb_uuid, &head->h_fs_uuid))) { xfs_warn(mp, "log has mismatched uuid - can't recover"); xlog_header_check_dump(mp, head); return -EFSCORRUPTED; } return 0; } /* * This routine finds (to an approximation) the first block in the physical * log which contains the given cycle. It uses a binary search algorithm. * Note that the algorithm can not be perfect because the disk will not * necessarily be perfect. */ STATIC int xlog_find_cycle_start( struct xlog *log, char *buffer, xfs_daddr_t first_blk, xfs_daddr_t *last_blk, uint cycle) { char *offset; xfs_daddr_t mid_blk; xfs_daddr_t end_blk; uint mid_cycle; int error; end_blk = *last_blk; mid_blk = BLK_AVG(first_blk, end_blk); while (mid_blk != first_blk && mid_blk != end_blk) { error = xlog_bread(log, mid_blk, 1, buffer, &offset); if (error) return error; mid_cycle = xlog_get_cycle(offset); if (mid_cycle == cycle) end_blk = mid_blk; /* last_half_cycle == mid_cycle */ else first_blk = mid_blk; /* first_half_cycle == mid_cycle */ mid_blk = BLK_AVG(first_blk, end_blk); } ASSERT((mid_blk == first_blk && mid_blk+1 == end_blk) || (mid_blk == end_blk && mid_blk-1 == first_blk)); *last_blk = end_blk; return 0; } /* * Check that a range of blocks does not contain stop_on_cycle_no. * Fill in *new_blk with the block offset where such a block is * found, or with -1 (an invalid block number) if there is no such * block in the range. The scan needs to occur from front to back * and the pointer into the region must be updated since a later * routine will need to perform another test. */ STATIC int xlog_find_verify_cycle( struct xlog *log, xfs_daddr_t start_blk, int nbblks, uint stop_on_cycle_no, xfs_daddr_t *new_blk) { xfs_daddr_t i, j; uint cycle; char *buffer; xfs_daddr_t bufblks; char *buf = NULL; int error = 0; /* * Greedily allocate a buffer big enough to handle the full * range of basic blocks we'll be examining. If that fails, * try a smaller size. We need to be able to read at least * a log sector, or we're out of luck. */ bufblks = roundup_pow_of_two(nbblks); while (bufblks > log->l_logBBsize) bufblks >>= 1; while (!(buffer = xlog_alloc_buffer(log, bufblks))) { bufblks >>= 1; if (bufblks < log->l_sectBBsize) return -ENOMEM; } for (i = start_blk; i < start_blk + nbblks; i += bufblks) { int bcount; bcount = min(bufblks, (start_blk + nbblks - i)); error = xlog_bread(log, i, bcount, buffer, &buf); if (error) goto out; for (j = 0; j < bcount; j++) { cycle = xlog_get_cycle(buf); if (cycle == stop_on_cycle_no) { *new_blk = i+j; goto out; } buf += BBSIZE; } } *new_blk = -1; out: kvfree(buffer); return error; } static inline int xlog_logrec_hblks(struct xlog *log, struct xlog_rec_header *rh) { if (xfs_has_logv2(log->l_mp)) { int h_size = be32_to_cpu(rh->h_size); if ((be32_to_cpu(rh->h_version) & XLOG_VERSION_2) && h_size > XLOG_HEADER_CYCLE_SIZE) return DIV_ROUND_UP(h_size, XLOG_HEADER_CYCLE_SIZE); } return 1; } /* * Potentially backup over partial log record write. * * In the typical case, last_blk is the number of the block directly after * a good log record. Therefore, we subtract one to get the block number * of the last block in the given buffer. extra_bblks contains the number * of blocks we would have read on a previous read. This happens when the * last log record is split over the end of the physical log. * * extra_bblks is the number of blocks potentially verified on a previous * call to this routine. */ STATIC int xlog_find_verify_log_record( struct xlog *log, xfs_daddr_t start_blk, xfs_daddr_t *last_blk, int extra_bblks) { xfs_daddr_t i; char *buffer; char *offset = NULL; xlog_rec_header_t *head = NULL; int error = 0; int smallmem = 0; int num_blks = *last_blk - start_blk; int xhdrs; ASSERT(start_blk != 0 || *last_blk != start_blk); buffer = xlog_alloc_buffer(log, num_blks); if (!buffer) { buffer = xlog_alloc_buffer(log, 1); if (!buffer) return -ENOMEM; smallmem = 1; } else { error = xlog_bread(log, start_blk, num_blks, buffer, &offset); if (error) goto out; offset += ((num_blks - 1) << BBSHIFT); } for (i = (*last_blk) - 1; i >= 0; i--) { if (i < start_blk) { /* valid log record not found */ xfs_warn(log->l_mp, "Log inconsistent (didn't find previous header)"); ASSERT(0); error = -EFSCORRUPTED; goto out; } if (smallmem) { error = xlog_bread(log, i, 1, buffer, &offset); if (error) goto out; } head = (xlog_rec_header_t *)offset; if (head->h_magicno == cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) break; if (!smallmem) offset -= BBSIZE; } /* * We hit the beginning of the physical log & still no header. Return * to caller. If caller can handle a return of -1, then this routine * will be called again for the end of the physical log. */ if (i == -1) { error = 1; goto out; } /* * We have the final block of the good log (the first block * of the log record _before_ the head. So we check the uuid. */ if ((error = xlog_header_check_mount(log->l_mp, head))) goto out; /* * We may have found a log record header before we expected one. * last_blk will be the 1st block # with a given cycle #. We may end * up reading an entire log record. In this case, we don't want to * reset last_blk. Only when last_blk points in the middle of a log * record do we update last_blk. */ xhdrs = xlog_logrec_hblks(log, head); if (*last_blk - i + extra_bblks != BTOBB(be32_to_cpu(head->h_len)) + xhdrs) *last_blk = i; out: kvfree(buffer); return error; } /* * Head is defined to be the point of the log where the next log write * could go. This means that incomplete LR writes at the end are * eliminated when calculating the head. We aren't guaranteed that previous * LR have complete transactions. We only know that a cycle number of * current cycle number -1 won't be present in the log if we start writing * from our current block number. * * last_blk contains the block number of the first block with a given * cycle number. * * Return: zero if normal, non-zero if error. */ STATIC int xlog_find_head( struct xlog *log, xfs_daddr_t *return_head_blk) { char *buffer; char *offset; xfs_daddr_t new_blk, first_blk, start_blk, last_blk, head_blk; int num_scan_bblks; uint first_half_cycle, last_half_cycle; uint stop_on_cycle; int error, log_bbnum = log->l_logBBsize; /* Is the end of the log device zeroed? */ error = xlog_find_zeroed(log, &first_blk); if (error < 0) { xfs_warn(log->l_mp, "empty log check failed"); return error; } if (error == 1) { *return_head_blk = first_blk; /* Is the whole lot zeroed? */ if (!first_blk) { /* Linux XFS shouldn't generate totally zeroed logs - * mkfs etc write a dummy unmount record to a fresh * log so we can store the uuid in there */ xfs_warn(log->l_mp, "totally zeroed log"); } return 0; } first_blk = 0; /* get cycle # of 1st block */ buffer = xlog_alloc_buffer(log, 1); if (!buffer) return -ENOMEM; error = xlog_bread(log, 0, 1, buffer, &offset); if (error) goto out_free_buffer; first_half_cycle = xlog_get_cycle(offset); last_blk = head_blk = log_bbnum - 1; /* get cycle # of last block */ error = xlog_bread(log, last_blk, 1, buffer, &offset); if (error) goto out_free_buffer; last_half_cycle = xlog_get_cycle(offset); ASSERT(last_half_cycle != 0); /* * If the 1st half cycle number is equal to the last half cycle number, * then the entire log is stamped with the same cycle number. In this * case, head_blk can't be set to zero (which makes sense). The below * math doesn't work out properly with head_blk equal to zero. Instead, * we set it to log_bbnum which is an invalid block number, but this * value makes the math correct. If head_blk doesn't changed through * all the tests below, *head_blk is set to zero at the very end rather * than log_bbnum. In a sense, log_bbnum and zero are the same block * in a circular file. */ if (first_half_cycle == last_half_cycle) { /* * In this case we believe that the entire log should have * cycle number last_half_cycle. We need to scan backwards * from the end verifying that there are no holes still * containing last_half_cycle - 1. If we find such a hole, * then the start of that hole will be the new head. The * simple case looks like * x | x ... | x - 1 | x * Another case that fits this picture would be * x | x + 1 | x ... | x * In this case the head really is somewhere at the end of the * log, as one of the latest writes at the beginning was * incomplete. * One more case is * x | x + 1 | x ... | x - 1 | x * This is really the combination of the above two cases, and * the head has to end up at the start of the x-1 hole at the * end of the log. * * In the 256k log case, we will read from the beginning to the * end of the log and search for cycle numbers equal to x-1. * We don't worry about the x+1 blocks that we encounter, * because we know that they cannot be the head since the log * started with x. */ head_blk = log_bbnum; stop_on_cycle = last_half_cycle - 1; } else { /* * In this case we want to find the first block with cycle * number matching last_half_cycle. We expect the log to be * some variation on * x + 1 ... | x ... | x * The first block with cycle number x (last_half_cycle) will * be where the new head belongs. First we do a binary search * for the first occurrence of last_half_cycle. The binary * search may not be totally accurate, so then we scan back * from there looking for occurrences of last_half_cycle before * us. If that backwards scan wraps around the beginning of * the log, then we look for occurrences of last_half_cycle - 1 * at the end of the log. The cases we're looking for look * like * v binary search stopped here * x + 1 ... | x | x + 1 | x ... | x * ^ but we want to locate this spot * or * <---------> less than scan distance * x + 1 ... | x ... | x - 1 | x * ^ we want to locate this spot */ stop_on_cycle = last_half_cycle; error = xlog_find_cycle_start(log, buffer, first_blk, &head_blk, last_half_cycle); if (error) goto out_free_buffer; } /* * Now validate the answer. Scan back some number of maximum possible * blocks and make sure each one has the expected cycle number. The * maximum is determined by the total possible amount of buffering * in the in-core log. The following number can be made tighter if * we actually look at the block size of the filesystem. */ num_scan_bblks = min_t(int, log_bbnum, XLOG_TOTAL_REC_SHIFT(log)); if (head_blk >= num_scan_bblks) { /* * We are guaranteed that the entire check can be performed * in one buffer. */ start_blk = head_blk - num_scan_bblks; if ((error = xlog_find_verify_cycle(log, start_blk, num_scan_bblks, stop_on_cycle, &new_blk))) goto out_free_buffer; if (new_blk != -1) head_blk = new_blk; } else { /* need to read 2 parts of log */ /* * We are going to scan backwards in the log in two parts. * First we scan the physical end of the log. In this part * of the log, we are looking for blocks with cycle number * last_half_cycle - 1. * If we find one, then we know that the log starts there, as * we've found a hole that didn't get written in going around * the end of the physical log. The simple case for this is * x + 1 ... | x ... | x - 1 | x * <---------> less than scan distance * If all of the blocks at the end of the log have cycle number * last_half_cycle, then we check the blocks at the start of * the log looking for occurrences of last_half_cycle. If we * find one, then our current estimate for the location of the * first occurrence of last_half_cycle is wrong and we move * back to the hole we've found. This case looks like * x + 1 ... | x | x + 1 | x ... * ^ binary search stopped here * Another case we need to handle that only occurs in 256k * logs is * x + 1 ... | x ... | x+1 | x ... * ^ binary search stops here * In a 256k log, the scan at the end of the log will see the * x + 1 blocks. We need to skip past those since that is * certainly not the head of the log. By searching for * last_half_cycle-1 we accomplish that. */ ASSERT(head_blk <= INT_MAX && (xfs_daddr_t) num_scan_bblks >= head_blk); start_blk = log_bbnum - (num_scan_bblks - head_blk); if ((error = xlog_find_verify_cycle(log, start_blk, num_scan_bblks - (int)head_blk, (stop_on_cycle - 1), &new_blk))) goto out_free_buffer; if (new_blk != -1) { head_blk = new_blk; goto validate_head; } /* * Scan beginning of log now. The last part of the physical * log is good. This scan needs to verify that it doesn't find * the last_half_cycle. */ start_blk = 0; ASSERT(head_blk <= INT_MAX); if ((error = xlog_find_verify_cycle(log, start_blk, (int)head_blk, stop_on_cycle, &new_blk))) goto out_free_buffer; if (new_blk != -1) head_blk = new_blk; } validate_head: /* * Now we need to make sure head_blk is not pointing to a block in * the middle of a log record. */ num_scan_bblks = XLOG_REC_SHIFT(log); if (head_blk >= num_scan_bblks) { start_blk = head_blk - num_scan_bblks; /* don't read head_blk */ /* start ptr at last block ptr before head_blk */ error = xlog_find_verify_log_record(log, start_blk, &head_blk, 0); if (error == 1) error = -EIO; if (error) goto out_free_buffer; } else { start_blk = 0; ASSERT(head_blk <= INT_MAX); error = xlog_find_verify_log_record(log, start_blk, &head_blk, 0); if (error < 0) goto out_free_buffer; if (error == 1) { /* We hit the beginning of the log during our search */ start_blk = log_bbnum - (num_scan_bblks - head_blk); new_blk = log_bbnum; ASSERT(start_blk <= INT_MAX && (xfs_daddr_t) log_bbnum-start_blk >= 0); ASSERT(head_blk <= INT_MAX); error = xlog_find_verify_log_record(log, start_blk, &new_blk, (int)head_blk); if (error == 1) error = -EIO; if (error) goto out_free_buffer; if (new_blk != log_bbnum) head_blk = new_blk; } else if (error) goto out_free_buffer; } kvfree(buffer); if (head_blk == log_bbnum) *return_head_blk = 0; else *return_head_blk = head_blk; /* * When returning here, we have a good block number. Bad block * means that during a previous crash, we didn't have a clean break * from cycle number N to cycle number N-1. In this case, we need * to find the first block with cycle number N-1. */ return 0; out_free_buffer: kvfree(buffer); if (error) xfs_warn(log->l_mp, "failed to find log head"); return error; } /* * Seek backwards in the log for log record headers. * * Given a starting log block, walk backwards until we find the provided number * of records or hit the provided tail block. The return value is the number of * records encountered or a negative error code. The log block and buffer * pointer of the last record seen are returned in rblk and rhead respectively. */ STATIC int xlog_rseek_logrec_hdr( struct xlog *log, xfs_daddr_t head_blk, xfs_daddr_t tail_blk, int count, char *buffer, xfs_daddr_t *rblk, struct xlog_rec_header **rhead, bool *wrapped) { int i; int error; int found = 0; char *offset = NULL; xfs_daddr_t end_blk; *wrapped = false; /* * Walk backwards from the head block until we hit the tail or the first * block in the log. */ end_blk = head_blk > tail_blk ? tail_blk : 0; for (i = (int) head_blk - 1; i >= end_blk; i--) { error = xlog_bread(log, i, 1, buffer, &offset); if (error) goto out_error; if (*(__be32 *) offset == cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) { *rblk = i; *rhead = (struct xlog_rec_header *) offset; if (++found == count) break; } } /* * If we haven't hit the tail block or the log record header count, * start looking again from the end of the physical log. Note that * callers can pass head == tail if the tail is not yet known. */ if (tail_blk >= head_blk && found != count) { for (i = log->l_logBBsize - 1; i >= (int) tail_blk; i--) { error = xlog_bread(log, i, 1, buffer, &offset); if (error) goto out_error; if (*(__be32 *)offset == cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) { *wrapped = true; *rblk = i; *rhead = (struct xlog_rec_header *) offset; if (++found == count) break; } } } return found; out_error: return error; } /* * Seek forward in the log for log record headers. * * Given head and tail blocks, walk forward from the tail block until we find * the provided number of records or hit the head block. The return value is the * number of records encountered or a negative error code. The log block and * buffer pointer of the last record seen are returned in rblk and rhead * respectively. */ STATIC int xlog_seek_logrec_hdr( struct xlog *log, xfs_daddr_t head_blk, xfs_daddr_t tail_blk, int count, char *buffer, xfs_daddr_t *rblk, struct xlog_rec_header **rhead, bool *wrapped) { int i; int error; int found = 0; char *offset = NULL; xfs_daddr_t end_blk; *wrapped = false; /* * Walk forward from the tail block until we hit the head or the last * block in the log. */ end_blk = head_blk > tail_blk ? head_blk : log->l_logBBsize - 1; for (i = (int) tail_blk; i <= end_blk; i++) { error = xlog_bread(log, i, 1, buffer, &offset); if (error) goto out_error; if (*(__be32 *) offset == cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) { *rblk = i; *rhead = (struct xlog_rec_header *) offset; if (++found == count) break; } } /* * If we haven't hit the head block or the log record header count, * start looking again from the start of the physical log. */ if (tail_blk > head_blk && found != count) { for (i = 0; i < (int) head_blk; i++) { error = xlog_bread(log, i, 1, buffer, &offset); if (error) goto out_error; if (*(__be32 *)offset == cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) { *wrapped = true; *rblk = i; *rhead = (struct xlog_rec_header *) offset; if (++found == count) break; } } } return found; out_error: return error; } /* * Calculate distance from head to tail (i.e., unused space in the log). */ static inline int xlog_tail_distance( struct xlog *log, xfs_daddr_t head_blk, xfs_daddr_t tail_blk) { if (head_blk < tail_blk) return tail_blk - head_blk; return tail_blk + (log->l_logBBsize - head_blk); } /* * Verify the log tail. This is particularly important when torn or incomplete * writes have been detected near the front of the log and the head has been * walked back accordingly. * * We also have to handle the case where the tail was pinned and the head * blocked behind the tail right before a crash. If the tail had been pushed * immediately prior to the crash and the subsequent checkpoint was only * partially written, it's possible it overwrote the last referenced tail in the * log with garbage. This is not a coherency problem because the tail must have * been pushed before it can be overwritten, but appears as log corruption to * recovery because we have no way to know the tail was updated if the * subsequent checkpoint didn't write successfully. * * Therefore, CRC check the log from tail to head. If a failure occurs and the * offending record is within max iclog bufs from the head, walk the tail * forward and retry until a valid tail is found or corruption is detected out * of the range of a possible overwrite. */ STATIC int xlog_verify_tail( struct xlog *log, xfs_daddr_t head_blk, xfs_daddr_t *tail_blk, int hsize) { struct xlog_rec_header *thead; char *buffer; xfs_daddr_t first_bad; int error = 0; bool wrapped; xfs_daddr_t tmp_tail; xfs_daddr_t orig_tail = *tail_blk; buffer = xlog_alloc_buffer(log, 1); if (!buffer) return -ENOMEM; /* * Make sure the tail points to a record (returns positive count on * success). */ error = xlog_seek_logrec_hdr(log, head_blk, *tail_blk, 1, buffer, &tmp_tail, &thead, &wrapped); if (error < 0) goto out; if (*tail_blk != tmp_tail) *tail_blk = tmp_tail; /* * Run a CRC check from the tail to the head. We can't just check * MAX_ICLOGS records past the tail because the tail may point to stale * blocks cleared during the search for the head/tail. These blocks are * overwritten with zero-length records and thus record count is not a * reliable indicator of the iclog state before a crash. */ first_bad = 0; error = xlog_do_recovery_pass(log, head_blk, *tail_blk, XLOG_RECOVER_CRCPASS, &first_bad); while ((error == -EFSBADCRC || error == -EFSCORRUPTED) && first_bad) { int tail_distance; /* * Is corruption within range of the head? If so, retry from * the next record. Otherwise return an error. */ tail_distance = xlog_tail_distance(log, head_blk, first_bad); if (tail_distance > BTOBB(XLOG_MAX_ICLOGS * hsize)) break; /* skip to the next record; returns positive count on success */ error = xlog_seek_logrec_hdr(log, head_blk, first_bad, 2, buffer, &tmp_tail, &thead, &wrapped); if (error < 0) goto out; *tail_blk = tmp_tail; first_bad = 0; error = xlog_do_recovery_pass(log, head_blk, *tail_blk, XLOG_RECOVER_CRCPASS, &first_bad); } if (!error && *tail_blk != orig_tail) xfs_warn(log->l_mp, "Tail block (0x%llx) overwrite detected. Updated to 0x%llx", orig_tail, *tail_blk); out: kvfree(buffer); return error; } /* * Detect and trim torn writes from the head of the log. * * Storage without sector atomicity guarantees can result in torn writes in the * log in the event of a crash. Our only means to detect this scenario is via * CRC verification. While we can't always be certain that CRC verification * failure is due to a torn write vs. an unrelated corruption, we do know that * only a certain number (XLOG_MAX_ICLOGS) of log records can be written out at * one time. Therefore, CRC verify up to XLOG_MAX_ICLOGS records at the head of * the log and treat failures in this range as torn writes as a matter of * policy. In the event of CRC failure, the head is walked back to the last good * record in the log and the tail is updated from that record and verified. */ STATIC int xlog_verify_head( struct xlog *log, xfs_daddr_t *head_blk, /* in/out: unverified head */ xfs_daddr_t *tail_blk, /* out: tail block */ char *buffer, xfs_daddr_t *rhead_blk, /* start blk of last record */ struct xlog_rec_header **rhead, /* ptr to last record */ bool *wrapped) /* last rec. wraps phys. log */ { struct xlog_rec_header *tmp_rhead; char *tmp_buffer; xfs_daddr_t first_bad; xfs_daddr_t tmp_rhead_blk; int found; int error; bool tmp_wrapped; /* * Check the head of the log for torn writes. Search backwards from the * head until we hit the tail or the maximum number of log record I/Os * that could have been in flight at one time. Use a temporary buffer so * we don't trash the rhead/buffer pointers from the caller. */ tmp_buffer = xlog_alloc_buffer(log, 1); if (!tmp_buffer) return -ENOMEM; error = xlog_rseek_logrec_hdr(log, *head_blk, *tail_blk, XLOG_MAX_ICLOGS, tmp_buffer, &tmp_rhead_blk, &tmp_rhead, &tmp_wrapped); kvfree(tmp_buffer); if (error < 0) return error; /* * Now run a CRC verification pass over the records starting at the * block found above to the current head. If a CRC failure occurs, the * log block of the first bad record is saved in first_bad. */ error = xlog_do_recovery_pass(log, *head_blk, tmp_rhead_blk, XLOG_RECOVER_CRCPASS, &first_bad); if ((error == -EFSBADCRC || error == -EFSCORRUPTED) && first_bad) { /* * We've hit a potential torn write. Reset the error and warn * about it. */ error = 0; xfs_warn(log->l_mp, "Torn write (CRC failure) detected at log block 0x%llx. Truncating head block from 0x%llx.", first_bad, *head_blk); /* * Get the header block and buffer pointer for the last good * record before the bad record. * * Note that xlog_find_tail() clears the blocks at the new head * (i.e., the records with invalid CRC) if the cycle number * matches the current cycle. */ found = xlog_rseek_logrec_hdr(log, first_bad, *tail_blk, 1, buffer, rhead_blk, rhead, wrapped); if (found < 0) return found; if (found == 0) /* XXX: right thing to do here? */ return -EIO; /* * Reset the head block to the starting block of the first bad * log record and set the tail block based on the last good * record. * * Bail out if the updated head/tail match as this indicates * possible corruption outside of the acceptable * (XLOG_MAX_ICLOGS) range. This is a job for xfs_repair... */ *head_blk = first_bad; *tail_blk = BLOCK_LSN(be64_to_cpu((*rhead)->h_tail_lsn)); if (*head_blk == *tail_blk) { ASSERT(0); return 0; } } if (error) return error; return xlog_verify_tail(log, *head_blk, tail_blk, be32_to_cpu((*rhead)->h_size)); } /* * We need to make sure we handle log wrapping properly, so we can't use the * calculated logbno directly. Make sure it wraps to the correct bno inside the * log. * * The log is limited to 32 bit sizes, so we use the appropriate modulus * operation here and cast it back to a 64 bit daddr on return. */ static inline xfs_daddr_t xlog_wrap_logbno( struct xlog *log, xfs_daddr_t bno) { int mod; div_s64_rem(bno, log->l_logBBsize, &mod); return mod; } /* * Check whether the head of the log points to an unmount record. In other * words, determine whether the log is clean. If so, update the in-core state * appropriately. */ static int xlog_check_unmount_rec( struct xlog *log, xfs_daddr_t *head_blk, xfs_daddr_t *tail_blk, struct xlog_rec_header *rhead, xfs_daddr_t rhead_blk, char *buffer, bool *clean) { struct xlog_op_header *op_head; xfs_daddr_t umount_data_blk; xfs_daddr_t after_umount_blk; int hblks; int error; char *offset; *clean = false; /* * Look for unmount record. If we find it, then we know there was a * clean unmount. Since 'i' could be the last block in the physical * log, we convert to a log block before comparing to the head_blk. * * Save the current tail lsn to use to pass to xlog_clear_stale_blocks() * below. We won't want to clear the unmount record if there is one, so * we pass the lsn of the unmount record rather than the block after it. */ hblks = xlog_logrec_hblks(log, rhead); after_umount_blk = xlog_wrap_logbno(log, rhead_blk + hblks + BTOBB(be32_to_cpu(rhead->h_len))); if (*head_blk == after_umount_blk && be32_to_cpu(rhead->h_num_logops) == 1) { umount_data_blk = xlog_wrap_logbno(log, rhead_blk + hblks); error = xlog_bread(log, umount_data_blk, 1, buffer, &offset); if (error) return error; op_head = (struct xlog_op_header *)offset; if (op_head->oh_flags & XLOG_UNMOUNT_TRANS) { /* * Set tail and last sync so that newly written log * records will point recovery to after the current * unmount record. */ xlog_assign_atomic_lsn(&log->l_tail_lsn, log->l_curr_cycle, after_umount_blk); log->l_ailp->ail_head_lsn = atomic64_read(&log->l_tail_lsn); *tail_blk = after_umount_blk; *clean = true; } } return 0; } static void xlog_set_state( struct xlog *log, xfs_daddr_t head_blk, struct xlog_rec_header *rhead, xfs_daddr_t rhead_blk, bool bump_cycle) { /* * Reset log values according to the state of the log when we * crashed. In the case where head_blk == 0, we bump curr_cycle * one because the next write starts a new cycle rather than * continuing the cycle of the last good log record. At this * point we have guaranteed that all partial log records have been * accounted for. Therefore, we know that the last good log record * written was complete and ended exactly on the end boundary * of the physical log. */ log->l_prev_block = rhead_blk; log->l_curr_block = (int)head_blk; log->l_curr_cycle = be32_to_cpu(rhead->h_cycle); if (bump_cycle) log->l_curr_cycle++; atomic64_set(&log->l_tail_lsn, be64_to_cpu(rhead->h_tail_lsn)); log->l_ailp->ail_head_lsn = be64_to_cpu(rhead->h_lsn); } /* * Find the sync block number or the tail of the log. * * This will be the block number of the last record to have its * associated buffers synced to disk. Every log record header has * a sync lsn embedded in it. LSNs hold block numbers, so it is easy * to get a sync block number. The only concern is to figure out which * log record header to believe. * * The following algorithm uses the log record header with the largest * lsn. The entire log record does not need to be valid. We only care * that the header is valid. * * We could speed up search by using current head_blk buffer, but it is not * available. */ STATIC int xlog_find_tail( struct xlog *log, xfs_daddr_t *head_blk, xfs_daddr_t *tail_blk) { xlog_rec_header_t *rhead; char *offset = NULL; char *buffer; int error; xfs_daddr_t rhead_blk; xfs_lsn_t tail_lsn; bool wrapped = false; bool clean = false; /* * Find previous log record */ if ((error = xlog_find_head(log, head_blk))) return error; ASSERT(*head_blk < INT_MAX); buffer = xlog_alloc_buffer(log, 1); if (!buffer) return -ENOMEM; if (*head_blk == 0) { /* special case */ error = xlog_bread(log, 0, 1, buffer, &offset); if (error) goto done; if (xlog_get_cycle(offset) == 0) { *tail_blk = 0; /* leave all other log inited values alone */ goto done; } } /* * Search backwards through the log looking for the log record header * block. This wraps all the way back around to the head so something is * seriously wrong if we can't find it. */ error = xlog_rseek_logrec_hdr(log, *head_blk, *head_blk, 1, buffer, &rhead_blk, &rhead, &wrapped); if (error < 0) goto done; if (!error) { xfs_warn(log->l_mp, "%s: couldn't find sync record", __func__); error = -EFSCORRUPTED; goto done; } *tail_blk = BLOCK_LSN(be64_to_cpu(rhead->h_tail_lsn)); /* * Set the log state based on the current head record. */ xlog_set_state(log, *head_blk, rhead, rhead_blk, wrapped); tail_lsn = atomic64_read(&log->l_tail_lsn); /* * Look for an unmount record at the head of the log. This sets the log * state to determine whether recovery is necessary. */ error = xlog_check_unmount_rec(log, head_blk, tail_blk, rhead, rhead_blk, buffer, &clean); if (error) goto done; /* * Verify the log head if the log is not clean (e.g., we have anything * but an unmount record at the head). This uses CRC verification to * detect and trim torn writes. If discovered, CRC failures are * considered torn writes and the log head is trimmed accordingly. * * Note that we can only run CRC verification when the log is dirty * because there's no guarantee that the log data behind an unmount * record is compatible with the current architecture. */ if (!clean) { xfs_daddr_t orig_head = *head_blk; error = xlog_verify_head(log, head_blk, tail_blk, buffer, &rhead_blk, &rhead, &wrapped); if (error) goto done; /* update in-core state again if the head changed */ if (*head_blk != orig_head) { xlog_set_state(log, *head_blk, rhead, rhead_blk, wrapped); tail_lsn = atomic64_read(&log->l_tail_lsn); error = xlog_check_unmount_rec(log, head_blk, tail_blk, rhead, rhead_blk, buffer, &clean); if (error) goto done; } } /* * Note that the unmount was clean. If the unmount was not clean, we * need to know this to rebuild the superblock counters from the perag * headers if we have a filesystem using non-persistent counters. */ if (clean) xfs_set_clean(log->l_mp); /* * Make sure that there are no blocks in front of the head * with the same cycle number as the head. This can happen * because we allow multiple outstanding log writes concurrently, * and the later writes might make it out before earlier ones. * * We use the lsn from before modifying it so that we'll never * overwrite the unmount record after a clean unmount. * * Do this only if we are going to recover the filesystem * * NOTE: This used to say "if (!readonly)" * However on Linux, we can & do recover a read-only filesystem. * We only skip recovery if NORECOVERY is specified on mount, * in which case we would not be here. * * But... if the -device- itself is readonly, just skip this. * We can't recover this device anyway, so it won't matter. */ if (!xfs_readonly_buftarg(log->l_targ)) error = xlog_clear_stale_blocks(log, tail_lsn); done: kvfree(buffer); if (error) xfs_warn(log->l_mp, "failed to locate log tail"); return error; } /* * Is the log zeroed at all? * * The last binary search should be changed to perform an X block read * once X becomes small enough. You can then search linearly through * the X blocks. This will cut down on the number of reads we need to do. * * If the log is partially zeroed, this routine will pass back the blkno * of the first block with cycle number 0. It won't have a complete LR * preceding it. * * Return: * 0 => the log is completely written to * 1 => use *blk_no as the first block of the log * <0 => error has occurred */ STATIC int xlog_find_zeroed( struct xlog *log, xfs_daddr_t *blk_no) { char *buffer; char *offset; uint first_cycle, last_cycle; xfs_daddr_t new_blk, last_blk, start_blk; xfs_daddr_t num_scan_bblks; int error, log_bbnum = log->l_logBBsize; int ret = 1; *blk_no = 0; /* check totally zeroed log */ buffer = xlog_alloc_buffer(log, 1); if (!buffer) return -ENOMEM; error = xlog_bread(log, 0, 1, buffer, &offset); if (error) goto out_free_buffer; first_cycle = xlog_get_cycle(offset); if (first_cycle == 0) { /* completely zeroed log */ *blk_no = 0; goto out_free_buffer; } /* check partially zeroed log */ error = xlog_bread(log, log_bbnum-1, 1, buffer, &offset); if (error) goto out_free_buffer; last_cycle = xlog_get_cycle(offset); if (last_cycle != 0) { /* log completely written to */ ret = 0; goto out_free_buffer; } /* we have a partially zeroed log */ last_blk = log_bbnum-1; error = xlog_find_cycle_start(log, buffer, 0, &last_blk, 0); if (error) goto out_free_buffer; /* * Validate the answer. Because there is no way to guarantee that * the entire log is made up of log records which are the same size, * we scan over the defined maximum blocks. At this point, the maximum * is not chosen to mean anything special. XXXmiken */ num_scan_bblks = XLOG_TOTAL_REC_SHIFT(log); ASSERT(num_scan_bblks <= INT_MAX); if (last_blk < num_scan_bblks) num_scan_bblks = last_blk; start_blk = last_blk - num_scan_bblks; /* * We search for any instances of cycle number 0 that occur before * our current estimate of the head. What we're trying to detect is * 1 ... | 0 | 1 | 0... * ^ binary search ends here */ if ((error = xlog_find_verify_cycle(log, start_blk, (int)num_scan_bblks, 0, &new_blk))) goto out_free_buffer; if (new_blk != -1) last_blk = new_blk; /* * Potentially backup over partial log record write. We don't need * to search the end of the log because we know it is zero. */ error = xlog_find_verify_log_record(log, start_blk, &last_blk, 0); if (error == 1) error = -EIO; if (error) goto out_free_buffer; *blk_no = last_blk; out_free_buffer: kvfree(buffer); if (error) return error; return ret; } /* * These are simple subroutines used by xlog_clear_stale_blocks() below * to initialize a buffer full of empty log record headers and write * them into the log. */ STATIC void xlog_add_record( struct xlog *log, char *buf, int cycle, int block, int tail_cycle, int tail_block) { xlog_rec_header_t *recp = (xlog_rec_header_t *)buf; memset(buf, 0, BBSIZE); recp->h_magicno = cpu_to_be32(XLOG_HEADER_MAGIC_NUM); recp->h_cycle = cpu_to_be32(cycle); recp->h_version = cpu_to_be32( xfs_has_logv2(log->l_mp) ? 2 : 1); recp->h_lsn = cpu_to_be64(xlog_assign_lsn(cycle, block)); recp->h_tail_lsn = cpu_to_be64(xlog_assign_lsn(tail_cycle, tail_block)); recp->h_fmt = cpu_to_be32(XLOG_FMT); memcpy(&recp->h_fs_uuid, &log->l_mp->m_sb.sb_uuid, sizeof(uuid_t)); } STATIC int xlog_write_log_records( struct xlog *log, int cycle, int start_block, int blocks, int tail_cycle, int tail_block) { char *offset; char *buffer; int balign, ealign; int sectbb = log->l_sectBBsize; int end_block = start_block + blocks; int bufblks; int error = 0; int i, j = 0; /* * Greedily allocate a buffer big enough to handle the full * range of basic blocks to be written. If that fails, try * a smaller size. We need to be able to write at least a * log sector, or we're out of luck. */ bufblks = roundup_pow_of_two(blocks); while (bufblks > log->l_logBBsize) bufblks >>= 1; while (!(buffer = xlog_alloc_buffer(log, bufblks))) { bufblks >>= 1; if (bufblks < sectbb) return -ENOMEM; } /* We may need to do a read at the start to fill in part of * the buffer in the starting sector not covered by the first * write below. */ balign = round_down(start_block, sectbb); if (balign != start_block) { error = xlog_bread_noalign(log, start_block, 1, buffer); if (error) goto out_free_buffer; j = start_block - balign; } for (i = start_block; i < end_block; i += bufblks) { int bcount, endcount; bcount = min(bufblks, end_block - start_block); endcount = bcount - j; /* We may need to do a read at the end to fill in part of * the buffer in the final sector not covered by the write. * If this is the same sector as the above read, skip it. */ ealign = round_down(end_block, sectbb); if (j == 0 && (start_block + endcount > ealign)) { error = xlog_bread_noalign(log, ealign, sectbb, buffer + BBTOB(ealign - start_block)); if (error) break; } offset = buffer + xlog_align(log, start_block); for (; j < endcount; j++) { xlog_add_record(log, offset, cycle, i+j, tail_cycle, tail_block); offset += BBSIZE; } error = xlog_bwrite(log, start_block, endcount, buffer); if (error) break; start_block += endcount; j = 0; } out_free_buffer: kvfree(buffer); return error; } /* * This routine is called to blow away any incomplete log writes out * in front of the log head. We do this so that we won't become confused * if we come up, write only a little bit more, and then crash again. * If we leave the partial log records out there, this situation could * cause us to think those partial writes are valid blocks since they * have the current cycle number. We get rid of them by overwriting them * with empty log records with the old cycle number rather than the * current one. * * The tail lsn is passed in rather than taken from * the log so that we will not write over the unmount record after a * clean unmount in a 512 block log. Doing so would leave the log without * any valid log records in it until a new one was written. If we crashed * during that time we would not be able to recover. */ STATIC int xlog_clear_stale_blocks( struct xlog *log, xfs_lsn_t tail_lsn) { int tail_cycle, head_cycle; int tail_block, head_block; int tail_distance, max_distance; int distance; int error; tail_cycle = CYCLE_LSN(tail_lsn); tail_block = BLOCK_LSN(tail_lsn); head_cycle = log->l_curr_cycle; head_block = log->l_curr_block; /* * Figure out the distance between the new head of the log * and the tail. We want to write over any blocks beyond the * head that we may have written just before the crash, but * we don't want to overwrite the tail of the log. */ if (head_cycle == tail_cycle) { /* * The tail is behind the head in the physical log, * so the distance from the head to the tail is the * distance from the head to the end of the log plus * the distance from the beginning of the log to the * tail. */ if (XFS_IS_CORRUPT(log->l_mp, head_block < tail_block || head_block >= log->l_logBBsize)) return -EFSCORRUPTED; tail_distance = tail_block + (log->l_logBBsize - head_block); } else { /* * The head is behind the tail in the physical log, * so the distance from the head to the tail is just * the tail block minus the head block. */ if (XFS_IS_CORRUPT(log->l_mp, head_block >= tail_block || head_cycle != tail_cycle + 1)) return -EFSCORRUPTED; tail_distance = tail_block - head_block; } /* * If the head is right up against the tail, we can't clear * anything. */ if (tail_distance <= 0) { ASSERT(tail_distance == 0); return 0; } max_distance = XLOG_TOTAL_REC_SHIFT(log); /* * Take the smaller of the maximum amount of outstanding I/O * we could have and the distance to the tail to clear out. * We take the smaller so that we don't overwrite the tail and * we don't waste all day writing from the head to the tail * for no reason. */ max_distance = min(max_distance, tail_distance); if ((head_block + max_distance) <= log->l_logBBsize) { /* * We can stomp all the blocks we need to without * wrapping around the end of the log. Just do it * in a single write. Use the cycle number of the * current cycle minus one so that the log will look like: * n ... | n - 1 ... */ error = xlog_write_log_records(log, (head_cycle - 1), head_block, max_distance, tail_cycle, tail_block); if (error) return error; } else { /* * We need to wrap around the end of the physical log in * order to clear all the blocks. Do it in two separate * I/Os. The first write should be from the head to the * end of the physical log, and it should use the current * cycle number minus one just like above. */ distance = log->l_logBBsize - head_block; error = xlog_write_log_records(log, (head_cycle - 1), head_block, distance, tail_cycle, tail_block); if (error) return error; /* * Now write the blocks at the start of the physical log. * This writes the remainder of the blocks we want to clear. * It uses the current cycle number since we're now on the * same cycle as the head so that we get: * n ... n ... | n - 1 ... * ^^^^^ blocks we're writing */ distance = max_distance - (log->l_logBBsize - head_block); error = xlog_write_log_records(log, head_cycle, 0, distance, tail_cycle, tail_block); if (error) return error; } return 0; } /* * Release the recovered intent item in the AIL that matches the given intent * type and intent id. */ void xlog_recover_release_intent( struct xlog *log, unsigned short intent_type, uint64_t intent_id) { struct xfs_defer_pending *dfp, *n; list_for_each_entry_safe(dfp, n, &log->r_dfops, dfp_list) { struct xfs_log_item *lip = dfp->dfp_intent; if (lip->li_type != intent_type) continue; if (!lip->li_ops->iop_match(lip, intent_id)) continue; ASSERT(xlog_item_is_intent(lip)); xfs_defer_cancel_recovery(log->l_mp, dfp); } } int xlog_recover_iget( struct xfs_mount *mp, xfs_ino_t ino, struct xfs_inode **ipp) { int error; error = xfs_iget(mp, NULL, ino, 0, 0, ipp); if (error) return error; error = xfs_qm_dqattach(*ipp); if (error) { xfs_irele(*ipp); return error; } if (VFS_I(*ipp)->i_nlink == 0) xfs_iflags_set(*ipp, XFS_IRECOVERY); return 0; } /* * Get an inode so that we can recover a log operation. * * Log intent items that target inodes effectively contain a file handle. * Check that the generation number matches the intent item like we do for * other file handles. Log intent items defined after this validation weakness * was identified must use this function. */ int xlog_recover_iget_handle( struct xfs_mount *mp, xfs_ino_t ino, uint32_t gen, struct xfs_inode **ipp) { struct xfs_inode *ip; int error; error = xlog_recover_iget(mp, ino, &ip); if (error) return error; if (VFS_I(ip)->i_generation != gen) { xfs_irele(ip); return -EFSCORRUPTED; } *ipp = ip; return 0; } /****************************************************************************** * * Log recover routines * ****************************************************************************** */ static const struct xlog_recover_item_ops *xlog_recover_item_ops[] = { &xlog_buf_item_ops, &xlog_inode_item_ops, &xlog_dquot_item_ops, &xlog_quotaoff_item_ops, &xlog_icreate_item_ops, &xlog_efi_item_ops, &xlog_efd_item_ops, &xlog_rui_item_ops, &xlog_rud_item_ops, &xlog_cui_item_ops, &xlog_cud_item_ops, &xlog_bui_item_ops, &xlog_bud_item_ops, &xlog_attri_item_ops, &xlog_attrd_item_ops, &xlog_xmi_item_ops, &xlog_xmd_item_ops, &xlog_rtefi_item_ops, &xlog_rtefd_item_ops, }; static const struct xlog_recover_item_ops * xlog_find_item_ops( struct xlog_recover_item *item) { unsigned int i; for (i = 0; i < ARRAY_SIZE(xlog_recover_item_ops); i++) if (ITEM_TYPE(item) == xlog_recover_item_ops[i]->item_type) return xlog_recover_item_ops[i]; return NULL; } /* * Sort the log items in the transaction. * * The ordering constraints are defined by the inode allocation and unlink * behaviour. The rules are: * * 1. Every item is only logged once in a given transaction. Hence it * represents the last logged state of the item. Hence ordering is * dependent on the order in which operations need to be performed so * required initial conditions are always met. * * 2. Cancelled buffers are recorded in pass 1 in a separate table and * there's nothing to replay from them so we can simply cull them * from the transaction. However, we can't do that until after we've * replayed all the other items because they may be dependent on the * cancelled buffer and replaying the cancelled buffer can remove it * form the cancelled buffer table. Hence they have to be done last. * * 3. Inode allocation buffers must be replayed before inode items that * read the buffer and replay changes into it. For filesystems using the * ICREATE transactions, this means XFS_LI_ICREATE objects need to get * treated the same as inode allocation buffers as they create and * initialise the buffers directly. * * 4. Inode unlink buffers must be replayed after inode items are replayed. * This ensures that inodes are completely flushed to the inode buffer * in a "free" state before we remove the unlinked inode list pointer. * * Hence the ordering needs to be inode allocation buffers first, inode items * second, inode unlink buffers third and cancelled buffers last. * * But there's a problem with that - we can't tell an inode allocation buffer * apart from a regular buffer, so we can't separate them. We can, however, * tell an inode unlink buffer from the others, and so we can separate them out * from all the other buffers and move them to last. * * Hence, 4 lists, in order from head to tail: * - buffer_list for all buffers except cancelled/inode unlink buffers * - item_list for all non-buffer items * - inode_buffer_list for inode unlink buffers * - cancel_list for the cancelled buffers * * Note that we add objects to the tail of the lists so that first-to-last * ordering is preserved within the lists. Adding objects to the head of the * list means when we traverse from the head we walk them in last-to-first * order. For cancelled buffers and inode unlink buffers this doesn't matter, * but for all other items there may be specific ordering that we need to * preserve. */ STATIC int xlog_recover_reorder_trans( struct xlog *log, struct xlog_recover *trans, int pass) { struct xlog_recover_item *item, *n; int error = 0; LIST_HEAD(sort_list); LIST_HEAD(cancel_list); LIST_HEAD(buffer_list); LIST_HEAD(inode_buffer_list); LIST_HEAD(item_list); list_splice_init(&trans->r_itemq, &sort_list); list_for_each_entry_safe(item, n, &sort_list, ri_list) { enum xlog_recover_reorder fate = XLOG_REORDER_ITEM_LIST; item->ri_ops = xlog_find_item_ops(item); if (!item->ri_ops) { xfs_warn(log->l_mp, "%s: unrecognized type of log operation (%d)", __func__, ITEM_TYPE(item)); ASSERT(0); /* * return the remaining items back to the transaction * item list so they can be freed in caller. */ if (!list_empty(&sort_list)) list_splice_init(&sort_list, &trans->r_itemq); error = -EFSCORRUPTED; break; } if (item->ri_ops->reorder) fate = item->ri_ops->reorder(item); switch (fate) { case XLOG_REORDER_BUFFER_LIST: list_move_tail(&item->ri_list, &buffer_list); break; case XLOG_REORDER_CANCEL_LIST: trace_xfs_log_recover_item_reorder_head(log, trans, item, pass); list_move(&item->ri_list, &cancel_list); break; case XLOG_REORDER_INODE_BUFFER_LIST: list_move(&item->ri_list, &inode_buffer_list); break; case XLOG_REORDER_ITEM_LIST: trace_xfs_log_recover_item_reorder_tail(log, trans, item, pass); list_move_tail(&item->ri_list, &item_list); break; } } ASSERT(list_empty(&sort_list)); if (!list_empty(&buffer_list)) list_splice(&buffer_list, &trans->r_itemq); if (!list_empty(&item_list)) list_splice_tail(&item_list, &trans->r_itemq); if (!list_empty(&inode_buffer_list)) list_splice_tail(&inode_buffer_list, &trans->r_itemq); if (!list_empty(&cancel_list)) list_splice_tail(&cancel_list, &trans->r_itemq); return error; } void xlog_buf_readahead( struct xlog *log, xfs_daddr_t blkno, uint len, const struct xfs_buf_ops *ops) { if (!xlog_is_buffer_cancelled(log, blkno, len)) xfs_buf_readahead(log->l_mp->m_ddev_targp, blkno, len, ops); } /* * Create a deferred work structure for resuming and tracking the progress of a * log intent item that was found during recovery. */ void xlog_recover_intent_item( struct xlog *log, struct xfs_log_item *lip, xfs_lsn_t lsn, const struct xfs_defer_op_type *ops) { ASSERT(xlog_item_is_intent(lip)); xfs_defer_start_recovery(lip, &log->r_dfops, ops); /* * Insert the intent into the AIL directly and drop one reference so * that finishing or canceling the work will drop the other. */ xfs_trans_ail_insert(log->l_ailp, lip, lsn); lip->li_ops->iop_unpin(lip, 0); } STATIC int xlog_recover_items_pass2( struct xlog *log, struct xlog_recover *trans, struct list_head *buffer_list, struct list_head *item_list) { struct xlog_recover_item *item; int error = 0; list_for_each_entry(item, item_list, ri_list) { trace_xfs_log_recover_item_recover(log, trans, item, XLOG_RECOVER_PASS2); if (item->ri_ops->commit_pass2) error = item->ri_ops->commit_pass2(log, buffer_list, item, trans->r_lsn); if (error) return error; } return error; } /* * Perform the transaction. * * If the transaction modifies a buffer or inode, do it now. Otherwise, * EFIs and EFDs get queued up by adding entries into the AIL for them. */ STATIC int xlog_recover_commit_trans( struct xlog *log, struct xlog_recover *trans, int pass, struct list_head *buffer_list) { int error = 0; int items_queued = 0; struct xlog_recover_item *item; struct xlog_recover_item *next; LIST_HEAD (ra_list); LIST_HEAD (done_list); #define XLOG_RECOVER_COMMIT_QUEUE_MAX 100 hlist_del_init(&trans->r_list); error = xlog_recover_reorder_trans(log, trans, pass); if (error) return error; list_for_each_entry_safe(item, next, &trans->r_itemq, ri_list) { trace_xfs_log_recover_item_recover(log, trans, item, pass); switch (pass) { case XLOG_RECOVER_PASS1: if (item->ri_ops->commit_pass1) error = item->ri_ops->commit_pass1(log, item); break; case XLOG_RECOVER_PASS2: if (item->ri_ops->ra_pass2) item->ri_ops->ra_pass2(log, item); list_move_tail(&item->ri_list, &ra_list); items_queued++; if (items_queued >= XLOG_RECOVER_COMMIT_QUEUE_MAX) { error = xlog_recover_items_pass2(log, trans, buffer_list, &ra_list); list_splice_tail_init(&ra_list, &done_list); items_queued = 0; } break; default: ASSERT(0); } if (error) goto out; } out: if (!list_empty(&ra_list)) { if (!error) error = xlog_recover_items_pass2(log, trans, buffer_list, &ra_list); list_splice_tail_init(&ra_list, &done_list); } if (!list_empty(&done_list)) list_splice_init(&done_list, &trans->r_itemq); return error; } STATIC void xlog_recover_add_item( struct list_head *head) { struct xlog_recover_item *item; item = kzalloc(sizeof(struct xlog_recover_item), GFP_KERNEL | __GFP_NOFAIL); INIT_LIST_HEAD(&item->ri_list); list_add_tail(&item->ri_list, head); } STATIC int xlog_recover_add_to_cont_trans( struct xlog *log, struct xlog_recover *trans, char *dp, int len) { struct xlog_recover_item *item; char *ptr, *old_ptr; int old_len; /* * If the transaction is empty, the header was split across this and the * previous record. Copy the rest of the header. */ if (list_empty(&trans->r_itemq)) { ASSERT(len <= sizeof(struct xfs_trans_header)); if (len > sizeof(struct xfs_trans_header)) { xfs_warn(log->l_mp, "%s: bad header length", __func__); return -EFSCORRUPTED; } xlog_recover_add_item(&trans->r_itemq); ptr = (char *)&trans->r_theader + sizeof(struct xfs_trans_header) - len; memcpy(ptr, dp, len); return 0; } /* take the tail entry */ item = list_entry(trans->r_itemq.prev, struct xlog_recover_item, ri_list); old_ptr = item->ri_buf[item->ri_cnt-1].i_addr; old_len = item->ri_buf[item->ri_cnt-1].i_len; ptr = kvrealloc(old_ptr, len + old_len, GFP_KERNEL); if (!ptr) return -ENOMEM; memcpy(&ptr[old_len], dp, len); item->ri_buf[item->ri_cnt-1].i_len += len; item->ri_buf[item->ri_cnt-1].i_addr = ptr; trace_xfs_log_recover_item_add_cont(log, trans, item, 0); return 0; } /* * The next region to add is the start of a new region. It could be * a whole region or it could be the first part of a new region. Because * of this, the assumption here is that the type and size fields of all * format structures fit into the first 32 bits of the structure. * * This works because all regions must be 32 bit aligned. Therefore, we * either have both fields or we have neither field. In the case we have * neither field, the data part of the region is zero length. We only have * a log_op_header and can throw away the header since a new one will appear * later. If we have at least 4 bytes, then we can determine how many regions * will appear in the current log item. */ STATIC int xlog_recover_add_to_trans( struct xlog *log, struct xlog_recover *trans, char *dp, int len) { struct xfs_inode_log_format *in_f; /* any will do */ struct xlog_recover_item *item; char *ptr; if (!len) return 0; if (list_empty(&trans->r_itemq)) { /* we need to catch log corruptions here */ if (*(uint *)dp != XFS_TRANS_HEADER_MAGIC) { xfs_warn(log->l_mp, "%s: bad header magic number", __func__); ASSERT(0); return -EFSCORRUPTED; } if (len > sizeof(struct xfs_trans_header)) { xfs_warn(log->l_mp, "%s: bad header length", __func__); ASSERT(0); return -EFSCORRUPTED; } /* * The transaction header can be arbitrarily split across op * records. If we don't have the whole thing here, copy what we * do have and handle the rest in the next record. */ if (len == sizeof(struct xfs_trans_header)) xlog_recover_add_item(&trans->r_itemq); memcpy(&trans->r_theader, dp, len); return 0; } ptr = xlog_kvmalloc(len); memcpy(ptr, dp, len); in_f = (struct xfs_inode_log_format *)ptr; /* take the tail entry */ item = list_entry(trans->r_itemq.prev, struct xlog_recover_item, ri_list); if (item->ri_total != 0 && item->ri_total == item->ri_cnt) { /* tail item is in use, get a new one */ xlog_recover_add_item(&trans->r_itemq); item = list_entry(trans->r_itemq.prev, struct xlog_recover_item, ri_list); } if (item->ri_total == 0) { /* first region to be added */ if (in_f->ilf_size == 0 || in_f->ilf_size > XLOG_MAX_REGIONS_IN_ITEM) { xfs_warn(log->l_mp, "bad number of regions (%d) in inode log format", in_f->ilf_size); ASSERT(0); kvfree(ptr); return -EFSCORRUPTED; } item->ri_total = in_f->ilf_size; item->ri_buf = kzalloc(item->ri_total * sizeof(xfs_log_iovec_t), GFP_KERNEL | __GFP_NOFAIL); } if (item->ri_total <= item->ri_cnt) { xfs_warn(log->l_mp, "log item region count (%d) overflowed size (%d)", item->ri_cnt, item->ri_total); ASSERT(0); kvfree(ptr); return -EFSCORRUPTED; } /* Description region is ri_buf[0] */ item->ri_buf[item->ri_cnt].i_addr = ptr; item->ri_buf[item->ri_cnt].i_len = len; item->ri_cnt++; trace_xfs_log_recover_item_add(log, trans, item, 0); return 0; } /* * Free up any resources allocated by the transaction * * Remember that EFIs, EFDs, and IUNLINKs are handled later. */ STATIC void xlog_recover_free_trans( struct xlog_recover *trans) { struct xlog_recover_item *item, *n; int i; hlist_del_init(&trans->r_list); list_for_each_entry_safe(item, n, &trans->r_itemq, ri_list) { /* Free the regions in the item. */ list_del(&item->ri_list); for (i = 0; i < item->ri_cnt; i++) kvfree(item->ri_buf[i].i_addr); /* Free the item itself */ kfree(item->ri_buf); kfree(item); } /* Free the transaction recover structure */ kfree(trans); } /* * On error or completion, trans is freed. */ STATIC int xlog_recovery_process_trans( struct xlog *log, struct xlog_recover *trans, char *dp, unsigned int len, unsigned int flags, int pass, struct list_head *buffer_list) { int error = 0; bool freeit = false; /* mask off ophdr transaction container flags */ flags &= ~XLOG_END_TRANS; if (flags & XLOG_WAS_CONT_TRANS) flags &= ~XLOG_CONTINUE_TRANS; /* * Callees must not free the trans structure. We'll decide if we need to * free it or not based on the operation being done and it's result. */ switch (flags) { /* expected flag values */ case 0: case XLOG_CONTINUE_TRANS: error = xlog_recover_add_to_trans(log, trans, dp, len); break; case XLOG_WAS_CONT_TRANS: error = xlog_recover_add_to_cont_trans(log, trans, dp, len); break; case XLOG_COMMIT_TRANS: error = xlog_recover_commit_trans(log, trans, pass, buffer_list); /* success or fail, we are now done with this transaction. */ freeit = true; break; /* unexpected flag values */ case XLOG_UNMOUNT_TRANS: /* just skip trans */ xfs_warn(log->l_mp, "%s: Unmount LR", __func__); freeit = true; break; case XLOG_START_TRANS: default: xfs_warn(log->l_mp, "%s: bad flag 0x%x", __func__, flags); ASSERT(0); error = -EFSCORRUPTED; break; } if (error || freeit) xlog_recover_free_trans(trans); return error; } /* * Lookup the transaction recovery structure associated with the ID in the * current ophdr. If the transaction doesn't exist and the start flag is set in * the ophdr, then allocate a new transaction for future ID matches to find. * Either way, return what we found during the lookup - an existing transaction * or nothing. */ STATIC struct xlog_recover * xlog_recover_ophdr_to_trans( struct hlist_head rhash[], struct xlog_rec_header *rhead, struct xlog_op_header *ohead) { struct xlog_recover *trans; xlog_tid_t tid; struct hlist_head *rhp; tid = be32_to_cpu(ohead->oh_tid); rhp = &rhash[XLOG_RHASH(tid)]; hlist_for_each_entry(trans, rhp, r_list) { if (trans->r_log_tid == tid) return trans; } /* * skip over non-start transaction headers - we could be * processing slack space before the next transaction starts */ if (!(ohead->oh_flags & XLOG_START_TRANS)) return NULL; ASSERT(be32_to_cpu(ohead->oh_len) == 0); /* * This is a new transaction so allocate a new recovery container to * hold the recovery ops that will follow. */ trans = kzalloc(sizeof(struct xlog_recover), GFP_KERNEL | __GFP_NOFAIL); trans->r_log_tid = tid; trans->r_lsn = be64_to_cpu(rhead->h_lsn); INIT_LIST_HEAD(&trans->r_itemq); INIT_HLIST_NODE(&trans->r_list); hlist_add_head(&trans->r_list, rhp); /* * Nothing more to do for this ophdr. Items to be added to this new * transaction will be in subsequent ophdr containers. */ return NULL; } STATIC int xlog_recover_process_ophdr( struct xlog *log, struct hlist_head rhash[], struct xlog_rec_header *rhead, struct xlog_op_header *ohead, char *dp, char *end, int pass, struct list_head *buffer_list) { struct xlog_recover *trans; unsigned int len; int error; /* Do we understand who wrote this op? */ if (ohead->oh_clientid != XFS_TRANSACTION && ohead->oh_clientid != XFS_LOG) { xfs_warn(log->l_mp, "%s: bad clientid 0x%x", __func__, ohead->oh_clientid); ASSERT(0); return -EFSCORRUPTED; } /* * Check the ophdr contains all the data it is supposed to contain. */ len = be32_to_cpu(ohead->oh_len); if (dp + len > end) { xfs_warn(log->l_mp, "%s: bad length 0x%x", __func__, len); WARN_ON(1); return -EFSCORRUPTED; } trans = xlog_recover_ophdr_to_trans(rhash, rhead, ohead); if (!trans) { /* nothing to do, so skip over this ophdr */ return 0; } /* * The recovered buffer queue is drained only once we know that all * recovery items for the current LSN have been processed. This is * required because: * * - Buffer write submission updates the metadata LSN of the buffer. * - Log recovery skips items with a metadata LSN >= the current LSN of * the recovery item. * - Separate recovery items against the same metadata buffer can share * a current LSN. I.e., consider that the LSN of a recovery item is * defined as the starting LSN of the first record in which its * transaction appears, that a record can hold multiple transactions, * and/or that a transaction can span multiple records. * * In other words, we are allowed to submit a buffer from log recovery * once per current LSN. Otherwise, we may incorrectly skip recovery * items and cause corruption. * * We don't know up front whether buffers are updated multiple times per * LSN. Therefore, track the current LSN of each commit log record as it * is processed and drain the queue when it changes. Use commit records * because they are ordered correctly by the logging code. */ if (log->l_recovery_lsn != trans->r_lsn && ohead->oh_flags & XLOG_COMMIT_TRANS) { error = xfs_buf_delwri_submit(buffer_list); if (error) return error; log->l_recovery_lsn = trans->r_lsn; } return xlog_recovery_process_trans(log, trans, dp, len, ohead->oh_flags, pass, buffer_list); } /* * There are two valid states of the r_state field. 0 indicates that the * transaction structure is in a normal state. We have either seen the * start of the transaction or the last operation we added was not a partial * operation. If the last operation we added to the transaction was a * partial operation, we need to mark r_state with XLOG_WAS_CONT_TRANS. * * NOTE: skip LRs with 0 data length. */ STATIC int xlog_recover_process_data( struct xlog *log, struct hlist_head rhash[], struct xlog_rec_header *rhead, char *dp, int pass, struct list_head *buffer_list) { struct xlog_op_header *ohead; char *end; int num_logops; int error; end = dp + be32_to_cpu(rhead->h_len); num_logops = be32_to_cpu(rhead->h_num_logops); /* check the log format matches our own - else we can't recover */ if (xlog_header_check_recover(log->l_mp, rhead)) return -EIO; trace_xfs_log_recover_record(log, rhead, pass); while ((dp < end) && num_logops) { ohead = (struct xlog_op_header *)dp; dp += sizeof(*ohead); if (dp > end) { xfs_warn(log->l_mp, "%s: op header overrun", __func__); return -EFSCORRUPTED; } /* errors will abort recovery */ error = xlog_recover_process_ophdr(log, rhash, rhead, ohead, dp, end, pass, buffer_list); if (error) return error; dp += be32_to_cpu(ohead->oh_len); num_logops--; } return 0; } /* Take all the collected deferred ops and finish them in order. */ static int xlog_finish_defer_ops( struct xfs_mount *mp, struct list_head *capture_list) { struct xfs_defer_capture *dfc, *next; struct xfs_trans *tp; int error = 0; list_for_each_entry_safe(dfc, next, capture_list, dfc_list) { struct xfs_trans_res resv; struct xfs_defer_resources dres; /* * Create a new transaction reservation from the captured * information. Set logcount to 1 to force the new transaction * to regrant every roll so that we can make forward progress * in recovery no matter how full the log might be. */ resv.tr_logres = dfc->dfc_logres; resv.tr_logcount = 1; resv.tr_logflags = XFS_TRANS_PERM_LOG_RES; error = xfs_trans_alloc(mp, &resv, dfc->dfc_blkres, dfc->dfc_rtxres, XFS_TRANS_RESERVE, &tp); if (error) { xlog_force_shutdown(mp->m_log, SHUTDOWN_LOG_IO_ERROR); return error; } /* * Transfer to this new transaction all the dfops we captured * from recovering a single intent item. */ list_del_init(&dfc->dfc_list); xfs_defer_ops_continue(dfc, tp, &dres); error = xfs_trans_commit(tp); xfs_defer_resources_rele(&dres); if (error) return error; } ASSERT(list_empty(capture_list)); return 0; } /* Release all the captured defer ops and capture structures in this list. */ static void xlog_abort_defer_ops( struct xfs_mount *mp, struct list_head *capture_list) { struct xfs_defer_capture *dfc; struct xfs_defer_capture *next; list_for_each_entry_safe(dfc, next, capture_list, dfc_list) { list_del_init(&dfc->dfc_list); xfs_defer_ops_capture_abort(mp, dfc); } } /* * When this is called, all of the log intent items which did not have * corresponding log done items should be in the AIL. What we do now is update * the data structures associated with each one. * * Since we process the log intent items in normal transactions, they will be * removed at some point after the commit. This prevents us from just walking * down the list processing each one. We'll use a flag in the intent item to * skip those that we've already processed and use the AIL iteration mechanism's * generation count to try to speed this up at least a bit. * * When we start, we know that the intents are the only things in the AIL. As we * process them, however, other items are added to the AIL. Hence we know we * have started recovery on all the pending intents when we find an non-intent * item in the AIL. */ STATIC int xlog_recover_process_intents( struct xlog *log) { LIST_HEAD(capture_list); struct xfs_defer_pending *dfp, *n; int error = 0; #if defined(DEBUG) || defined(XFS_WARN) xfs_lsn_t last_lsn; last_lsn = xlog_assign_lsn(log->l_curr_cycle, log->l_curr_block); #endif list_for_each_entry_safe(dfp, n, &log->r_dfops, dfp_list) { ASSERT(xlog_item_is_intent(dfp->dfp_intent)); /* * We should never see a redo item with a LSN higher than * the last transaction we found in the log at the start * of recovery. */ ASSERT(XFS_LSN_CMP(last_lsn, dfp->dfp_intent->li_lsn) >= 0); /* * NOTE: If your intent processing routine can create more * deferred ops, you /must/ attach them to the capture list in * the recover routine or else those subsequent intents will be * replayed in the wrong order! * * The recovery function can free the log item, so we must not * access dfp->dfp_intent after it returns. It must dispose of * @dfp if it returns 0. */ error = xfs_defer_finish_recovery(log->l_mp, dfp, &capture_list); if (error) break; } if (error) goto err; error = xlog_finish_defer_ops(log->l_mp, &capture_list); if (error) goto err; return 0; err: xlog_abort_defer_ops(log->l_mp, &capture_list); return error; } /* * A cancel occurs when the mount has failed and we're bailing out. Release all * pending log intent items that we haven't started recovery on so they don't * pin the AIL. */ STATIC void xlog_recover_cancel_intents( struct xlog *log) { struct xfs_defer_pending *dfp, *n; list_for_each_entry_safe(dfp, n, &log->r_dfops, dfp_list) { ASSERT(xlog_item_is_intent(dfp->dfp_intent)); xfs_defer_cancel_recovery(log->l_mp, dfp); } } /* * Transfer ownership of the recovered pending work to the recovery transaction * and try to finish the work. If there is more work to be done, the dfp will * remain attached to the transaction. If not, the dfp is freed. */ int xlog_recover_finish_intent( struct xfs_trans *tp, struct xfs_defer_pending *dfp) { int error; list_move(&dfp->dfp_list, &tp->t_dfops); error = xfs_defer_finish_one(tp, dfp); if (error == -EAGAIN) return 0; return error; } /* * This routine performs a transaction to null out a bad inode pointer * in an agi unlinked inode hash bucket. */ STATIC void xlog_recover_clear_agi_bucket( struct xfs_perag *pag, int bucket) { struct xfs_mount *mp = pag_mount(pag); struct xfs_trans *tp; struct xfs_agi *agi; struct xfs_buf *agibp; int offset; int error; error = xfs_trans_alloc(mp, &M_RES(mp)->tr_clearagi, 0, 0, 0, &tp); if (error) goto out_error; error = xfs_read_agi(pag, tp, 0, &agibp); if (error) goto out_abort; agi = agibp->b_addr; agi->agi_unlinked[bucket] = cpu_to_be32(NULLAGINO); offset = offsetof(xfs_agi_t, agi_unlinked) + (sizeof(xfs_agino_t) * bucket); xfs_trans_log_buf(tp, agibp, offset, (offset + sizeof(xfs_agino_t) - 1)); error = xfs_trans_commit(tp); if (error) goto out_error; return; out_abort: xfs_trans_cancel(tp); out_error: xfs_warn(mp, "%s: failed to clear agi %d. Continuing.", __func__, pag_agno(pag)); return; } static int xlog_recover_iunlink_bucket( struct xfs_perag *pag, struct xfs_agi *agi, int bucket) { struct xfs_mount *mp = pag_mount(pag); struct xfs_inode *prev_ip = NULL; struct xfs_inode *ip; xfs_agino_t prev_agino, agino; int error = 0; agino = be32_to_cpu(agi->agi_unlinked[bucket]); while (agino != NULLAGINO) { error = xfs_iget(mp, NULL, xfs_agino_to_ino(pag, agino), 0, 0, &ip); if (error) break; ASSERT(VFS_I(ip)->i_nlink == 0); ASSERT(VFS_I(ip)->i_mode != 0); xfs_iflags_clear(ip, XFS_IRECOVERY); agino = ip->i_next_unlinked; if (prev_ip) { ip->i_prev_unlinked = prev_agino; xfs_irele(prev_ip); /* * Ensure the inode is removed from the unlinked list * before we continue so that it won't race with * building the in-memory list here. This could be * serialised with the agibp lock, but that just * serialises via lockstepping and it's much simpler * just to flush the inodegc queue and wait for it to * complete. */ error = xfs_inodegc_flush(mp); if (error) break; } prev_agino = agino; prev_ip = ip; } if (prev_ip) { int error2; ip->i_prev_unlinked = prev_agino; xfs_irele(prev_ip); error2 = xfs_inodegc_flush(mp); if (error2 && !error) return error2; } return error; } /* * Recover AGI unlinked lists * * This is called during recovery to process any inodes which we unlinked but * not freed when the system crashed. These inodes will be on the lists in the * AGI blocks. What we do here is scan all the AGIs and fully truncate and free * any inodes found on the lists. Each inode is removed from the lists when it * has been fully truncated and is freed. The freeing of the inode and its * removal from the list must be atomic. * * If everything we touch in the agi processing loop is already in memory, this * loop can hold the cpu for a long time. It runs without lock contention, * memory allocation contention, the need wait for IO, etc, and so will run * until we either run out of inodes to process, run low on memory or we run out * of log space. * * This behaviour is bad for latency on single CPU and non-preemptible kernels, * and can prevent other filesystem work (such as CIL pushes) from running. This * can lead to deadlocks if the recovery process runs out of log reservation * space. Hence we need to yield the CPU when there is other kernel work * scheduled on this CPU to ensure other scheduled work can run without undue * latency. */ static void xlog_recover_iunlink_ag( struct xfs_perag *pag) { struct xfs_agi *agi; struct xfs_buf *agibp; int bucket; int error; error = xfs_read_agi(pag, NULL, 0, &agibp); if (error) { /* * AGI is b0rked. Don't process it. * * We should probably mark the filesystem as corrupt after we've * recovered all the ag's we can.... */ return; } /* * Unlock the buffer so that it can be acquired in the normal course of * the transaction to truncate and free each inode. Because we are not * racing with anyone else here for the AGI buffer, we don't even need * to hold it locked to read the initial unlinked bucket entries out of * the buffer. We keep buffer reference though, so that it stays pinned * in memory while we need the buffer. */ agi = agibp->b_addr; xfs_buf_unlock(agibp); for (bucket = 0; bucket < XFS_AGI_UNLINKED_BUCKETS; bucket++) { error = xlog_recover_iunlink_bucket(pag, agi, bucket); if (error) { /* * Bucket is unrecoverable, so only a repair scan can * free the remaining unlinked inodes. Just empty the * bucket and remaining inodes on it unreferenced and * unfreeable. */ xlog_recover_clear_agi_bucket(pag, bucket); } } xfs_buf_rele(agibp); } static void xlog_recover_process_iunlinks( struct xlog *log) { struct xfs_perag *pag = NULL; while ((pag = xfs_perag_next(log->l_mp, pag))) xlog_recover_iunlink_ag(pag); } STATIC void xlog_unpack_data( struct xlog_rec_header *rhead, char *dp, struct xlog *log) { int i, j, k; for (i = 0; i < BTOBB(be32_to_cpu(rhead->h_len)) && i < (XLOG_HEADER_CYCLE_SIZE / BBSIZE); i++) { *(__be32 *)dp = *(__be32 *)&rhead->h_cycle_data[i]; dp += BBSIZE; } if (xfs_has_logv2(log->l_mp)) { xlog_in_core_2_t *xhdr = (xlog_in_core_2_t *)rhead; for ( ; i < BTOBB(be32_to_cpu(rhead->h_len)); i++) { j = i / (XLOG_HEADER_CYCLE_SIZE / BBSIZE); k = i % (XLOG_HEADER_CYCLE_SIZE / BBSIZE); *(__be32 *)dp = xhdr[j].hic_xheader.xh_cycle_data[k]; dp += BBSIZE; } } } /* * CRC check, unpack and process a log record. */ STATIC int xlog_recover_process( struct xlog *log, struct hlist_head rhash[], struct xlog_rec_header *rhead, char *dp, int pass, struct list_head *buffer_list) { __le32 old_crc = rhead->h_crc; __le32 crc; crc = xlog_cksum(log, rhead, dp, be32_to_cpu(rhead->h_len)); /* * Nothing else to do if this is a CRC verification pass. Just return * if this a record with a non-zero crc. Unfortunately, mkfs always * sets old_crc to 0 so we must consider this valid even on v5 supers. * Otherwise, return EFSBADCRC on failure so the callers up the stack * know precisely what failed. */ if (pass == XLOG_RECOVER_CRCPASS) { if (old_crc && crc != old_crc) return -EFSBADCRC; return 0; } /* * We're in the normal recovery path. Issue a warning if and only if the * CRC in the header is non-zero. This is an advisory warning and the * zero CRC check prevents warnings from being emitted when upgrading * the kernel from one that does not add CRCs by default. */ if (crc != old_crc) { if (old_crc || xfs_has_crc(log->l_mp)) { xfs_alert(log->l_mp, "log record CRC mismatch: found 0x%x, expected 0x%x.", le32_to_cpu(old_crc), le32_to_cpu(crc)); xfs_hex_dump(dp, 32); } /* * If the filesystem is CRC enabled, this mismatch becomes a * fatal log corruption failure. */ if (xfs_has_crc(log->l_mp)) { XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, log->l_mp); return -EFSCORRUPTED; } } xlog_unpack_data(rhead, dp, log); return xlog_recover_process_data(log, rhash, rhead, dp, pass, buffer_list); } STATIC int xlog_valid_rec_header( struct xlog *log, struct xlog_rec_header *rhead, xfs_daddr_t blkno, int bufsize) { int hlen; if (XFS_IS_CORRUPT(log->l_mp, rhead->h_magicno != cpu_to_be32(XLOG_HEADER_MAGIC_NUM))) return -EFSCORRUPTED; if (XFS_IS_CORRUPT(log->l_mp, (!rhead->h_version || (be32_to_cpu(rhead->h_version) & (~XLOG_VERSION_OKBITS))))) { xfs_warn(log->l_mp, "%s: unrecognised log version (%d).", __func__, be32_to_cpu(rhead->h_version)); return -EFSCORRUPTED; } /* * LR body must have data (or it wouldn't have been written) * and h_len must not be greater than LR buffer size. */ hlen = be32_to_cpu(rhead->h_len); if (XFS_IS_CORRUPT(log->l_mp, hlen <= 0 || hlen > bufsize)) return -EFSCORRUPTED; if (XFS_IS_CORRUPT(log->l_mp, blkno > log->l_logBBsize || blkno > INT_MAX)) return -EFSCORRUPTED; return 0; } /* * Read the log from tail to head and process the log records found. * Handle the two cases where the tail and head are in the same cycle * and where the active portion of the log wraps around the end of * the physical log separately. The pass parameter is passed through * to the routines called to process the data and is not looked at * here. */ STATIC int xlog_do_recovery_pass( struct xlog *log, xfs_daddr_t head_blk, xfs_daddr_t tail_blk, int pass, xfs_daddr_t *first_bad) /* out: first bad log rec */ { xlog_rec_header_t *rhead; xfs_daddr_t blk_no, rblk_no; xfs_daddr_t rhead_blk; char *offset; char *hbp, *dbp; int error = 0, h_size, h_len; int error2 = 0; int bblks, split_bblks; int hblks = 1, split_hblks, wrapped_hblks; int i; struct hlist_head rhash[XLOG_RHASH_SIZE]; LIST_HEAD (buffer_list); ASSERT(head_blk != tail_blk); blk_no = rhead_blk = tail_blk; for (i = 0; i < XLOG_RHASH_SIZE; i++) INIT_HLIST_HEAD(&rhash[i]); hbp = xlog_alloc_buffer(log, hblks); if (!hbp) return -ENOMEM; /* * Read the header of the tail block and get the iclog buffer size from * h_size. Use this to tell how many sectors make up the log header. */ if (xfs_has_logv2(log->l_mp)) { /* * When using variable length iclogs, read first sector of * iclog header and extract the header size from it. Get a * new hbp that is the correct size. */ error = xlog_bread(log, tail_blk, 1, hbp, &offset); if (error) goto bread_err1; rhead = (xlog_rec_header_t *)offset; /* * xfsprogs has a bug where record length is based on lsunit but * h_size (iclog size) is hardcoded to 32k. Now that we * unconditionally CRC verify the unmount record, this means the * log buffer can be too small for the record and cause an * overrun. * * Detect this condition here. Use lsunit for the buffer size as * long as this looks like the mkfs case. Otherwise, return an * error to avoid a buffer overrun. */ h_size = be32_to_cpu(rhead->h_size); h_len = be32_to_cpu(rhead->h_len); if (h_len > h_size && h_len <= log->l_mp->m_logbsize && rhead->h_num_logops == cpu_to_be32(1)) { xfs_warn(log->l_mp, "invalid iclog size (%d bytes), using lsunit (%d bytes)", h_size, log->l_mp->m_logbsize); h_size = log->l_mp->m_logbsize; } error = xlog_valid_rec_header(log, rhead, tail_blk, h_size); if (error) goto bread_err1; /* * This open codes xlog_logrec_hblks so that we can reuse the * fixed up h_size value calculated above. Without that we'd * still allocate the buffer based on the incorrect on-disk * size. */ if (h_size > XLOG_HEADER_CYCLE_SIZE && (rhead->h_version & cpu_to_be32(XLOG_VERSION_2))) { hblks = DIV_ROUND_UP(h_size, XLOG_HEADER_CYCLE_SIZE); if (hblks > 1) { kvfree(hbp); hbp = xlog_alloc_buffer(log, hblks); if (!hbp) return -ENOMEM; } } } else { ASSERT(log->l_sectBBsize == 1); h_size = XLOG_BIG_RECORD_BSIZE; } dbp = xlog_alloc_buffer(log, BTOBB(h_size)); if (!dbp) { kvfree(hbp); return -ENOMEM; } memset(rhash, 0, sizeof(rhash)); if (tail_blk > head_blk) { /* * Perform recovery around the end of the physical log. * When the head is not on the same cycle number as the tail, * we can't do a sequential recovery. */ while (blk_no < log->l_logBBsize) { /* * Check for header wrapping around physical end-of-log */ offset = hbp; split_hblks = 0; wrapped_hblks = 0; if (blk_no + hblks <= log->l_logBBsize) { /* Read header in one read */ error = xlog_bread(log, blk_no, hblks, hbp, &offset); if (error) goto bread_err2; } else { /* This LR is split across physical log end */ if (blk_no != log->l_logBBsize) { /* some data before physical log end */ ASSERT(blk_no <= INT_MAX); split_hblks = log->l_logBBsize - (int)blk_no; ASSERT(split_hblks > 0); error = xlog_bread(log, blk_no, split_hblks, hbp, &offset); if (error) goto bread_err2; } /* * Note: this black magic still works with * large sector sizes (non-512) only because: * - we increased the buffer size originally * by 1 sector giving us enough extra space * for the second read; * - the log start is guaranteed to be sector * aligned; * - we read the log end (LR header start) * _first_, then the log start (LR header end) * - order is important. */ wrapped_hblks = hblks - split_hblks; error = xlog_bread_noalign(log, 0, wrapped_hblks, offset + BBTOB(split_hblks)); if (error) goto bread_err2; } rhead = (xlog_rec_header_t *)offset; error = xlog_valid_rec_header(log, rhead, split_hblks ? blk_no : 0, h_size); if (error) goto bread_err2; bblks = (int)BTOBB(be32_to_cpu(rhead->h_len)); blk_no += hblks; /* * Read the log record data in multiple reads if it * wraps around the end of the log. Note that if the * header already wrapped, blk_no could point past the * end of the log. The record data is contiguous in * that case. */ if (blk_no + bblks <= log->l_logBBsize || blk_no >= log->l_logBBsize) { rblk_no = xlog_wrap_logbno(log, blk_no); error = xlog_bread(log, rblk_no, bblks, dbp, &offset); if (error) goto bread_err2; } else { /* This log record is split across the * physical end of log */ offset = dbp; split_bblks = 0; if (blk_no != log->l_logBBsize) { /* some data is before the physical * end of log */ ASSERT(!wrapped_hblks); ASSERT(blk_no <= INT_MAX); split_bblks = log->l_logBBsize - (int)blk_no; ASSERT(split_bblks > 0); error = xlog_bread(log, blk_no, split_bblks, dbp, &offset); if (error) goto bread_err2; } /* * Note: this black magic still works with * large sector sizes (non-512) only because: * - we increased the buffer size originally * by 1 sector giving us enough extra space * for the second read; * - the log start is guaranteed to be sector * aligned; * - we read the log end (LR header start) * _first_, then the log start (LR header end) * - order is important. */ error = xlog_bread_noalign(log, 0, bblks - split_bblks, offset + BBTOB(split_bblks)); if (error) goto bread_err2; } error = xlog_recover_process(log, rhash, rhead, offset, pass, &buffer_list); if (error) goto bread_err2; blk_no += bblks; rhead_blk = blk_no; } ASSERT(blk_no >= log->l_logBBsize); blk_no -= log->l_logBBsize; rhead_blk = blk_no; } /* read first part of physical log */ while (blk_no < head_blk) { error = xlog_bread(log, blk_no, hblks, hbp, &offset); if (error) goto bread_err2; rhead = (xlog_rec_header_t *)offset; error = xlog_valid_rec_header(log, rhead, blk_no, h_size); if (error) goto bread_err2; /* blocks in data section */ bblks = (int)BTOBB(be32_to_cpu(rhead->h_len)); error = xlog_bread(log, blk_no+hblks, bblks, dbp, &offset); if (error) goto bread_err2; error = xlog_recover_process(log, rhash, rhead, offset, pass, &buffer_list); if (error) goto bread_err2; blk_no += bblks + hblks; rhead_blk = blk_no; } bread_err2: kvfree(dbp); bread_err1: kvfree(hbp); /* * Submit buffers that have been dirtied by the last record recovered. */ if (!list_empty(&buffer_list)) { if (error) { /* * If there has been an item recovery error then we * cannot allow partial checkpoint writeback to * occur. We might have multiple checkpoints with the * same start LSN in this buffer list, and partial * writeback of a checkpoint in this situation can * prevent future recovery of all the changes in the * checkpoints at this start LSN. * * Note: Shutting down the filesystem will result in the * delwri submission marking all the buffers stale, * completing them and cleaning up _XBF_LOGRECOVERY * state without doing any IO. */ xlog_force_shutdown(log, SHUTDOWN_LOG_IO_ERROR); } error2 = xfs_buf_delwri_submit(&buffer_list); } if (error && first_bad) *first_bad = rhead_blk; /* * Transactions are freed at commit time but transactions without commit * records on disk are never committed. Free any that may be left in the * hash table. */ for (i = 0; i < XLOG_RHASH_SIZE; i++) { struct hlist_node *tmp; struct xlog_recover *trans; hlist_for_each_entry_safe(trans, tmp, &rhash[i], r_list) xlog_recover_free_trans(trans); } return error ? error : error2; } /* * Do the recovery of the log. We actually do this in two phases. * The two passes are necessary in order to implement the function * of cancelling a record written into the log. The first pass * determines those things which have been cancelled, and the * second pass replays log items normally except for those which * have been cancelled. The handling of the replay and cancellations * takes place in the log item type specific routines. * * The table of items which have cancel records in the log is allocated * and freed at this level, since only here do we know when all of * the log recovery has been completed. */ STATIC int xlog_do_log_recovery( struct xlog *log, xfs_daddr_t head_blk, xfs_daddr_t tail_blk) { int error; ASSERT(head_blk != tail_blk); /* * First do a pass to find all of the cancelled buf log items. * Store them in the buf_cancel_table for use in the second pass. */ error = xlog_alloc_buf_cancel_table(log); if (error) return error; error = xlog_do_recovery_pass(log, head_blk, tail_blk, XLOG_RECOVER_PASS1, NULL); if (error != 0) goto out_cancel; /* * Then do a second pass to actually recover the items in the log. * When it is complete free the table of buf cancel items. */ error = xlog_do_recovery_pass(log, head_blk, tail_blk, XLOG_RECOVER_PASS2, NULL); if (!error) xlog_check_buf_cancel_table(log); out_cancel: xlog_free_buf_cancel_table(log); return error; } /* * Do the actual recovery */ STATIC int xlog_do_recover( struct xlog *log, xfs_daddr_t head_blk, xfs_daddr_t tail_blk) { struct xfs_mount *mp = log->l_mp; struct xfs_buf *bp = mp->m_sb_bp; struct xfs_sb *sbp = &mp->m_sb; int error; trace_xfs_log_recover(log, head_blk, tail_blk); /* * First replay the images in the log. */ error = xlog_do_log_recovery(log, head_blk, tail_blk); if (error) return error; if (xlog_is_shutdown(log)) return -EIO; /* * We now update the tail_lsn since much of the recovery has completed * and there may be space available to use. If there were no extent or * iunlinks, we can free up the entire log. This was set in * xlog_find_tail to be the lsn of the last known good LR on disk. If * there are extent frees or iunlinks they will have some entries in the * AIL; so we look at the AIL to determine how to set the tail_lsn. */ xfs_ail_assign_tail_lsn(log->l_ailp); /* * Now that we've finished replaying all buffer and inode updates, * re-read the superblock and reverify it. */ xfs_buf_lock(bp); xfs_buf_hold(bp); error = _xfs_buf_read(bp, XBF_READ); if (error) { if (!xlog_is_shutdown(log)) { xfs_buf_ioerror_alert(bp, __this_address); ASSERT(0); } xfs_buf_relse(bp); return error; } /* Convert superblock from on-disk format */ xfs_sb_from_disk(sbp, bp->b_addr); xfs_buf_relse(bp); /* re-initialise in-core superblock and geometry structures */ mp->m_features |= xfs_sb_version_to_features(sbp); xfs_reinit_percpu_counters(mp); /* Normal transactions can now occur */ clear_bit(XLOG_ACTIVE_RECOVERY, &log->l_opstate); return 0; } /* * Perform recovery and re-initialize some log variables in xlog_find_tail. * * Return error or zero. */ int xlog_recover( struct xlog *log) { xfs_daddr_t head_blk, tail_blk; int error; /* find the tail of the log */ error = xlog_find_tail(log, &head_blk, &tail_blk); if (error) return error; /* * The superblock was read before the log was available and thus the LSN * could not be verified. Check the superblock LSN against the current * LSN now that it's known. */ if (xfs_has_crc(log->l_mp) && !xfs_log_check_lsn(log->l_mp, log->l_mp->m_sb.sb_lsn)) return -EINVAL; if (tail_blk != head_blk) { /* There used to be a comment here: * * disallow recovery on read-only mounts. note -- mount * checks for ENOSPC and turns it into an intelligent * error message. * ...but this is no longer true. Now, unless you specify * NORECOVERY (in which case this function would never be * called), we just go ahead and recover. We do this all * under the vfs layer, so we can get away with it unless * the device itself is read-only, in which case we fail. */ if ((error = xfs_dev_is_read_only(log->l_mp, "recovery"))) { return error; } /* * Version 5 superblock log feature mask validation. We know the * log is dirty so check if there are any unknown log features * in what we need to recover. If there are unknown features * (e.g. unsupported transactions, then simply reject the * attempt at recovery before touching anything. */ if (xfs_sb_is_v5(&log->l_mp->m_sb) && xfs_sb_has_incompat_log_feature(&log->l_mp->m_sb, XFS_SB_FEAT_INCOMPAT_LOG_UNKNOWN)) { xfs_warn(log->l_mp, "Superblock has unknown incompatible log features (0x%x) enabled.", (log->l_mp->m_sb.sb_features_log_incompat & XFS_SB_FEAT_INCOMPAT_LOG_UNKNOWN)); xfs_warn(log->l_mp, "The log can not be fully and/or safely recovered by this kernel."); xfs_warn(log->l_mp, "Please recover the log on a kernel that supports the unknown features."); return -EINVAL; } /* * Delay log recovery if the debug hook is set. This is debug * instrumentation to coordinate simulation of I/O failures with * log recovery. */ if (xfs_globals.log_recovery_delay) { xfs_notice(log->l_mp, "Delaying log recovery for %d seconds.", xfs_globals.log_recovery_delay); msleep(xfs_globals.log_recovery_delay * 1000); } xfs_notice(log->l_mp, "Starting recovery (logdev: %s)", log->l_mp->m_logname ? log->l_mp->m_logname : "internal"); error = xlog_do_recover(log, head_blk, tail_blk); set_bit(XLOG_RECOVERY_NEEDED, &log->l_opstate); } return error; } /* * In the first part of recovery we replay inodes and buffers and build up the * list of intents which need to be processed. Here we process the intents and * clean up the on disk unlinked inode lists. This is separated from the first * part of recovery so that the root and real-time bitmap inodes can be read in * from disk in between the two stages. This is necessary so that we can free * space in the real-time portion of the file system. * * We run this whole process under GFP_NOFS allocation context. We do a * combination of non-transactional and transactional work, yet we really don't * want to recurse into the filesystem from direct reclaim during any of this * processing. This allows all the recovery code run here not to care about the * memory allocation context it is running in. */ int xlog_recover_finish( struct xlog *log) { unsigned int nofs_flags = memalloc_nofs_save(); int error; error = xlog_recover_process_intents(log); if (error) { /* * Cancel all the unprocessed intent items now so that we don't * leave them pinned in the AIL. This can cause the AIL to * livelock on the pinned item if anyone tries to push the AIL * (inode reclaim does this) before we get around to * xfs_log_mount_cancel. */ xlog_recover_cancel_intents(log); xfs_alert(log->l_mp, "Failed to recover intents"); xlog_force_shutdown(log, SHUTDOWN_LOG_IO_ERROR); goto out_error; } /* * Sync the log to get all the intents out of the AIL. This isn't * absolutely necessary, but it helps in case the unlink transactions * would have problems pushing the intents out of the way. */ xfs_log_force(log->l_mp, XFS_LOG_SYNC); xlog_recover_process_iunlinks(log); /* * Recover any CoW staging blocks that are still referenced by the * ondisk refcount metadata. During mount there cannot be any live * staging extents as we have not permitted any user modifications. * Therefore, it is safe to free them all right now, even on a * read-only mount. */ error = xfs_reflink_recover_cow(log->l_mp); if (error) { xfs_alert(log->l_mp, "Failed to recover leftover CoW staging extents, err %d.", error); /* * If we get an error here, make sure the log is shut down * but return zero so that any log items committed since the * end of intents processing can be pushed through the CIL * and AIL. */ xlog_force_shutdown(log, SHUTDOWN_LOG_IO_ERROR); error = 0; goto out_error; } out_error: memalloc_nofs_restore(nofs_flags); return error; } void xlog_recover_cancel( struct xlog *log) { if (xlog_recovery_needed(log)) xlog_recover_cancel_intents(log); }
1 2 1 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 // SPDX-License-Identifier: GPL-2.0-or-later /* * Stadia controller rumble support. * * Copyright 2023 Google LLC */ #include <linux/hid.h> #include <linux/input.h> #include <linux/slab.h> #include <linux/module.h> #include "hid-ids.h" #define STADIA_FF_REPORT_ID 5 struct stadiaff_device { struct hid_device *hid; struct hid_report *report; spinlock_t lock; bool removed; uint16_t strong_magnitude; uint16_t weak_magnitude; struct work_struct work; }; static void stadiaff_work(struct work_struct *work) { struct stadiaff_device *stadiaff = container_of(work, struct stadiaff_device, work); struct hid_field *rumble_field = stadiaff->report->field[0]; unsigned long flags; spin_lock_irqsave(&stadiaff->lock, flags); rumble_field->value[0] = stadiaff->strong_magnitude; rumble_field->value[1] = stadiaff->weak_magnitude; spin_unlock_irqrestore(&stadiaff->lock, flags); hid_hw_request(stadiaff->hid, stadiaff->report, HID_REQ_SET_REPORT); } static int stadiaff_play(struct input_dev *dev, void *data, struct ff_effect *effect) { struct hid_device *hid = input_get_drvdata(dev); struct stadiaff_device *stadiaff = hid_get_drvdata(hid); unsigned long flags; spin_lock_irqsave(&stadiaff->lock, flags); if (!stadiaff->removed) { stadiaff->strong_magnitude = effect->u.rumble.strong_magnitude; stadiaff->weak_magnitude = effect->u.rumble.weak_magnitude; schedule_work(&stadiaff->work); } spin_unlock_irqrestore(&stadiaff->lock, flags); return 0; } static int stadiaff_init(struct hid_device *hid) { struct stadiaff_device *stadiaff; struct hid_report *report; struct hid_input *hidinput; struct input_dev *dev; int error; if (list_empty(&hid->inputs)) { hid_err(hid, "no inputs found\n"); return -ENODEV; } hidinput = list_entry(hid->inputs.next, struct hid_input, list); dev = hidinput->input; report = hid_validate_values(hid, HID_OUTPUT_REPORT, STADIA_FF_REPORT_ID, 0, 2); if (!report) return -ENODEV; stadiaff = devm_kzalloc(&hid->dev, sizeof(struct stadiaff_device), GFP_KERNEL); if (!stadiaff) return -ENOMEM; hid_set_drvdata(hid, stadiaff); input_set_capability(dev, EV_FF, FF_RUMBLE); error = input_ff_create_memless(dev, NULL, stadiaff_play); if (error) return error; stadiaff->removed = false; stadiaff->hid = hid; stadiaff->report = report; INIT_WORK(&stadiaff->work, stadiaff_work); spin_lock_init(&stadiaff->lock); hid_info(hid, "Force Feedback for Google Stadia controller\n"); return 0; } static int stadia_probe(struct hid_device *hdev, const struct hid_device_id *id) { int ret; ret = hid_parse(hdev); if (ret) { hid_err(hdev, "parse failed\n"); return ret; } ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT & ~HID_CONNECT_FF); if (ret) { hid_err(hdev, "hw start failed\n"); return ret; } ret = stadiaff_init(hdev); if (ret) { hid_err(hdev, "force feedback init failed\n"); hid_hw_stop(hdev); return ret; } return 0; } static void stadia_remove(struct hid_device *hid) { struct stadiaff_device *stadiaff = hid_get_drvdata(hid); unsigned long flags; spin_lock_irqsave(&stadiaff->lock, flags); stadiaff->removed = true; spin_unlock_irqrestore(&stadiaff->lock, flags); cancel_work_sync(&stadiaff->work); hid_hw_stop(hid); } static const struct hid_device_id stadia_devices[] = { { HID_USB_DEVICE(USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_STADIA) }, { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_STADIA) }, { } }; MODULE_DEVICE_TABLE(hid, stadia_devices); static struct hid_driver stadia_driver = { .name = "stadia", .id_table = stadia_devices, .probe = stadia_probe, .remove = stadia_remove, }; module_hid_driver(stadia_driver); MODULE_DESCRIPTION("Google Stadia controller rumble support."); MODULE_LICENSE("GPL");
6 4 2 4 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 // SPDX-License-Identifier: GPL-2.0-or-later /* Iterator helpers. * * Copyright (C) 2022 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) */ #include <linux/export.h> #include <linux/slab.h> #include <linux/mm.h> #include <linux/uio.h> #include <linux/scatterlist.h> #include <linux/netfs.h> #include "internal.h" /** * netfs_extract_user_iter - Extract the pages from a user iterator into a bvec * @orig: The original iterator * @orig_len: The amount of iterator to copy * @new: The iterator to be set up * @extraction_flags: Flags to qualify the request * * Extract the page fragments from the given amount of the source iterator and * build up a second iterator that refers to all of those bits. This allows * the original iterator to disposed of. * * @extraction_flags can have ITER_ALLOW_P2PDMA set to request peer-to-peer DMA be * allowed on the pages extracted. * * On success, the number of elements in the bvec is returned, the original * iterator will have been advanced by the amount extracted. * * The iov_iter_extract_mode() function should be used to query how cleanup * should be performed. */ ssize_t netfs_extract_user_iter(struct iov_iter *orig, size_t orig_len, struct iov_iter *new, iov_iter_extraction_t extraction_flags) { struct bio_vec *bv = NULL; struct page **pages; unsigned int cur_npages; unsigned int max_pages; unsigned int npages = 0; unsigned int i; ssize_t ret; size_t count = orig_len, offset, len; size_t bv_size, pg_size; if (WARN_ON_ONCE(!iter_is_ubuf(orig) && !iter_is_iovec(orig))) return -EIO; max_pages = iov_iter_npages(orig, INT_MAX); bv_size = array_size(max_pages, sizeof(*bv)); bv = kvmalloc(bv_size, GFP_KERNEL); if (!bv) return -ENOMEM; /* Put the page list at the end of the bvec list storage. bvec * elements are larger than page pointers, so as long as we work * 0->last, we should be fine. */ pg_size = array_size(max_pages, sizeof(*pages)); pages = (void *)bv + bv_size - pg_size; while (count && npages < max_pages) { ret = iov_iter_extract_pages(orig, &pages, count, max_pages - npages, extraction_flags, &offset); if (ret < 0) { pr_err("Couldn't get user pages (rc=%zd)\n", ret); break; } if (ret > count) { pr_err("get_pages rc=%zd more than %zu\n", ret, count); break; } count -= ret; ret += offset; cur_npages = DIV_ROUND_UP(ret, PAGE_SIZE); if (npages + cur_npages > max_pages) { pr_err("Out of bvec array capacity (%u vs %u)\n", npages + cur_npages, max_pages); break; } for (i = 0; i < cur_npages; i++) { len = ret > PAGE_SIZE ? PAGE_SIZE : ret; bvec_set_page(bv + npages + i, *pages++, len - offset, offset); ret -= len; offset = 0; } npages += cur_npages; } iov_iter_bvec(new, orig->data_source, bv, npages, orig_len - count); return npages; } EXPORT_SYMBOL_GPL(netfs_extract_user_iter); /* * Select the span of a bvec iterator we're going to use. Limit it by both maximum * size and maximum number of segments. Returns the size of the span in bytes. */ static size_t netfs_limit_bvec(const struct iov_iter *iter, size_t start_offset, size_t max_size, size_t max_segs) { const struct bio_vec *bvecs = iter->bvec; unsigned int nbv = iter->nr_segs, ix = 0, nsegs = 0; size_t len, span = 0, n = iter->count; size_t skip = iter->iov_offset + start_offset; if (WARN_ON(!iov_iter_is_bvec(iter)) || WARN_ON(start_offset > n) || n == 0) return 0; while (n && ix < nbv && skip) { len = bvecs[ix].bv_len; if (skip < len) break; skip -= len; n -= len; ix++; } while (n && ix < nbv) { len = min3(n, bvecs[ix].bv_len - skip, max_size); span += len; nsegs++; ix++; if (span >= max_size || nsegs >= max_segs) break; skip = 0; n -= len; } return min(span, max_size); } /* * Select the span of an xarray iterator we're going to use. Limit it by both * maximum size and maximum number of segments. It is assumed that segments * can be larger than a page in size, provided they're physically contiguous. * Returns the size of the span in bytes. */ static size_t netfs_limit_xarray(const struct iov_iter *iter, size_t start_offset, size_t max_size, size_t max_segs) { struct folio *folio; unsigned int nsegs = 0; loff_t pos = iter->xarray_start + iter->iov_offset; pgoff_t index = pos / PAGE_SIZE; size_t span = 0, n = iter->count; XA_STATE(xas, iter->xarray, index); if (WARN_ON(!iov_iter_is_xarray(iter)) || WARN_ON(start_offset > n) || n == 0) return 0; max_size = min(max_size, n - start_offset); rcu_read_lock(); xas_for_each(&xas, folio, ULONG_MAX) { size_t offset, flen, len; if (xas_retry(&xas, folio)) continue; if (WARN_ON(xa_is_value(folio))) break; if (WARN_ON(folio_test_hugetlb(folio))) break; flen = folio_size(folio); offset = offset_in_folio(folio, pos); len = min(max_size, flen - offset); span += len; nsegs++; if (span >= max_size || nsegs >= max_segs) break; } rcu_read_unlock(); return min(span, max_size); } /* * Select the span of a folio queue iterator we're going to use. Limit it by * both maximum size and maximum number of segments. Returns the size of the * span in bytes. */ static size_t netfs_limit_folioq(const struct iov_iter *iter, size_t start_offset, size_t max_size, size_t max_segs) { const struct folio_queue *folioq = iter->folioq; unsigned int nsegs = 0; unsigned int slot = iter->folioq_slot; size_t span = 0, n = iter->count; if (WARN_ON(!iov_iter_is_folioq(iter)) || WARN_ON(start_offset > n) || n == 0) return 0; max_size = umin(max_size, n - start_offset); if (slot >= folioq_nr_slots(folioq)) { folioq = folioq->next; slot = 0; } start_offset += iter->iov_offset; do { size_t flen = folioq_folio_size(folioq, slot); if (start_offset < flen) { span += flen - start_offset; nsegs++; start_offset = 0; } else { start_offset -= flen; } if (span >= max_size || nsegs >= max_segs) break; slot++; if (slot >= folioq_nr_slots(folioq)) { folioq = folioq->next; slot = 0; } } while (folioq); return umin(span, max_size); } size_t netfs_limit_iter(const struct iov_iter *iter, size_t start_offset, size_t max_size, size_t max_segs) { if (iov_iter_is_folioq(iter)) return netfs_limit_folioq(iter, start_offset, max_size, max_segs); if (iov_iter_is_bvec(iter)) return netfs_limit_bvec(iter, start_offset, max_size, max_segs); if (iov_iter_is_xarray(iter)) return netfs_limit_xarray(iter, start_offset, max_size, max_segs); BUG(); } EXPORT_SYMBOL(netfs_limit_iter);
2 4 166 171 169 166 166 561 25 349 402 556 382 326 19 563 477 476 480 285 1 256 473 10 10 10 10 2 2 1 9 1 6 3 161 1 159 3 207 468 14 339 9 209 35 8 463 4 461 468 14 469 468 163 363 31 6 474 17 116 226 199 196 1 149 191 215 112 5 463 470 2 3 470 211 461 128 108 19 111 9 6 116 7 4 12 3 25 1 4 1 1 18 5 4 14 11 11 7 4 20 53 42 1 13 4 5 1 5 2 1 1 7 2 4 120 2 103 33 2 4 9 140 25 123 32 90 90 91 121 94 22 37 108 40 48 48 49 43 5 44 48 1 17 6 6 7 5 1 4 3 17 2 15 13 1 5 4 1 4 3 13 13 5 1 3 2 4 7 5 2 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 // SPDX-License-Identifier: GPL-2.0-or-later /* Keyring handling * * Copyright (C) 2004-2005, 2008, 2013 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) */ #include <linux/export.h> #include <linux/init.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/security.h> #include <linux/seq_file.h> #include <linux/err.h> #include <linux/user_namespace.h> #include <linux/nsproxy.h> #include <keys/keyring-type.h> #include <keys/user-type.h> #include <linux/assoc_array_priv.h> #include <linux/uaccess.h> #include <net/net_namespace.h> #include "internal.h" /* * When plumbing the depths of the key tree, this sets a hard limit * set on how deep we're willing to go. */ #define KEYRING_SEARCH_MAX_DEPTH 6 /* * We mark pointers we pass to the associative array with bit 1 set if * they're keyrings and clear otherwise. */ #define KEYRING_PTR_SUBTYPE 0x2UL static inline bool keyring_ptr_is_keyring(const struct assoc_array_ptr *x) { return (unsigned long)x & KEYRING_PTR_SUBTYPE; } static inline struct key *keyring_ptr_to_key(const struct assoc_array_ptr *x) { void *object = assoc_array_ptr_to_leaf(x); return (struct key *)((unsigned long)object & ~KEYRING_PTR_SUBTYPE); } static inline void *keyring_key_to_ptr(struct key *key) { if (key->type == &key_type_keyring) return (void *)((unsigned long)key | KEYRING_PTR_SUBTYPE); return key; } static DEFINE_RWLOCK(keyring_name_lock); /* * Clean up the bits of user_namespace that belong to us. */ void key_free_user_ns(struct user_namespace *ns) { write_lock(&keyring_name_lock); list_del_init(&ns->keyring_name_list); write_unlock(&keyring_name_lock); key_put(ns->user_keyring_register); #ifdef CONFIG_PERSISTENT_KEYRINGS key_put(ns->persistent_keyring_register); #endif } /* * The keyring key type definition. Keyrings are simply keys of this type and * can be treated as ordinary keys in addition to having their own special * operations. */ static int keyring_preparse(struct key_preparsed_payload *prep); static void keyring_free_preparse(struct key_preparsed_payload *prep); static int keyring_instantiate(struct key *keyring, struct key_preparsed_payload *prep); static void keyring_revoke(struct key *keyring); static void keyring_destroy(struct key *keyring); static void keyring_describe(const struct key *keyring, struct seq_file *m); static long keyring_read(const struct key *keyring, char *buffer, size_t buflen); struct key_type key_type_keyring = { .name = "keyring", .def_datalen = 0, .preparse = keyring_preparse, .free_preparse = keyring_free_preparse, .instantiate = keyring_instantiate, .revoke = keyring_revoke, .destroy = keyring_destroy, .describe = keyring_describe, .read = keyring_read, }; EXPORT_SYMBOL(key_type_keyring); /* * Semaphore to serialise link/link calls to prevent two link calls in parallel * introducing a cycle. */ static DEFINE_MUTEX(keyring_serialise_link_lock); /* * Publish the name of a keyring so that it can be found by name (if it has * one and it doesn't begin with a dot). */ static void keyring_publish_name(struct key *keyring) { struct user_namespace *ns = current_user_ns(); if (keyring->description && keyring->description[0] && keyring->description[0] != '.') { write_lock(&keyring_name_lock); list_add_tail(&keyring->name_link, &ns->keyring_name_list); write_unlock(&keyring_name_lock); } } /* * Preparse a keyring payload */ static int keyring_preparse(struct key_preparsed_payload *prep) { return prep->datalen != 0 ? -EINVAL : 0; } /* * Free a preparse of a user defined key payload */ static void keyring_free_preparse(struct key_preparsed_payload *prep) { } /* * Initialise a keyring. * * Returns 0 on success, -EINVAL if given any data. */ static int keyring_instantiate(struct key *keyring, struct key_preparsed_payload *prep) { assoc_array_init(&keyring->keys); /* make the keyring available by name if it has one */ keyring_publish_name(keyring); return 0; } /* * Multiply 64-bits by 32-bits to 96-bits and fold back to 64-bit. Ideally we'd * fold the carry back too, but that requires inline asm. */ static u64 mult_64x32_and_fold(u64 x, u32 y) { u64 hi = (u64)(u32)(x >> 32) * y; u64 lo = (u64)(u32)(x) * y; return lo + ((u64)(u32)hi << 32) + (u32)(hi >> 32); } /* * Hash a key type and description. */ static void hash_key_type_and_desc(struct keyring_index_key *index_key) { const unsigned level_shift = ASSOC_ARRAY_LEVEL_STEP; const unsigned long fan_mask = ASSOC_ARRAY_FAN_MASK; const char *description = index_key->description; unsigned long hash, type; u32 piece; u64 acc; int n, desc_len = index_key->desc_len; type = (unsigned long)index_key->type; acc = mult_64x32_and_fold(type, desc_len + 13); acc = mult_64x32_and_fold(acc, 9207); piece = (unsigned long)index_key->domain_tag; acc = mult_64x32_and_fold(acc, piece); acc = mult_64x32_and_fold(acc, 9207); for (;;) { n = desc_len; if (n <= 0) break; if (n > 4) n = 4; piece = 0; memcpy(&piece, description, n); description += n; desc_len -= n; acc = mult_64x32_and_fold(acc, piece); acc = mult_64x32_and_fold(acc, 9207); } /* Fold the hash down to 32 bits if need be. */ hash = acc; if (ASSOC_ARRAY_KEY_CHUNK_SIZE == 32) hash ^= acc >> 32; /* Squidge all the keyrings into a separate part of the tree to * ordinary keys by making sure the lowest level segment in the hash is * zero for keyrings and non-zero otherwise. */ if (index_key->type != &key_type_keyring && (hash & fan_mask) == 0) hash |= (hash >> (ASSOC_ARRAY_KEY_CHUNK_SIZE - level_shift)) | 1; else if (index_key->type == &key_type_keyring && (hash & fan_mask) != 0) hash = (hash + (hash << level_shift)) & ~fan_mask; index_key->hash = hash; } /* * Finalise an index key to include a part of the description actually in the * index key, to set the domain tag and to calculate the hash. */ void key_set_index_key(struct keyring_index_key *index_key) { static struct key_tag default_domain_tag = { .usage = REFCOUNT_INIT(1), }; size_t n = min_t(size_t, index_key->desc_len, sizeof(index_key->desc)); memcpy(index_key->desc, index_key->description, n); if (!index_key->domain_tag) { if (index_key->type->flags & KEY_TYPE_NET_DOMAIN) index_key->domain_tag = current->nsproxy->net_ns->key_domain; else index_key->domain_tag = &default_domain_tag; } hash_key_type_and_desc(index_key); } /** * key_put_tag - Release a ref on a tag. * @tag: The tag to release. * * This releases a reference the given tag and returns true if that ref was the * last one. */ bool key_put_tag(struct key_tag *tag) { if (refcount_dec_and_test(&tag->usage)) { kfree_rcu(tag, rcu); return true; } return false; } /** * key_remove_domain - Kill off a key domain and gc its keys * @domain_tag: The domain tag to release. * * This marks a domain tag as being dead and releases a ref on it. If that * wasn't the last reference, the garbage collector is poked to try and delete * all keys that were in the domain. */ void key_remove_domain(struct key_tag *domain_tag) { domain_tag->removed = true; if (!key_put_tag(domain_tag)) key_schedule_gc_links(); } /* * Build the next index key chunk. * * We return it one word-sized chunk at a time. */ static unsigned long keyring_get_key_chunk(const void *data, int level) { const struct keyring_index_key *index_key = data; unsigned long chunk = 0; const u8 *d; int desc_len = index_key->desc_len, n = sizeof(chunk); level /= ASSOC_ARRAY_KEY_CHUNK_SIZE; switch (level) { case 0: return index_key->hash; case 1: return index_key->x; case 2: return (unsigned long)index_key->type; case 3: return (unsigned long)index_key->domain_tag; default: level -= 4; if (desc_len <= sizeof(index_key->desc)) return 0; d = index_key->description + sizeof(index_key->desc); d += level * sizeof(long); desc_len -= sizeof(index_key->desc); if (desc_len > n) desc_len = n; do { chunk <<= 8; chunk |= *d++; } while (--desc_len > 0); return chunk; } } static unsigned long keyring_get_object_key_chunk(const void *object, int level) { const struct key *key = keyring_ptr_to_key(object); return keyring_get_key_chunk(&key->index_key, level); } static bool keyring_compare_object(const void *object, const void *data) { const struct keyring_index_key *index_key = data; const struct key *key = keyring_ptr_to_key(object); return key->index_key.type == index_key->type && key->index_key.domain_tag == index_key->domain_tag && key->index_key.desc_len == index_key->desc_len && memcmp(key->index_key.description, index_key->description, index_key->desc_len) == 0; } /* * Compare the index keys of a pair of objects and determine the bit position * at which they differ - if they differ. */ static int keyring_diff_objects(const void *object, const void *data) { const struct key *key_a = keyring_ptr_to_key(object); const struct keyring_index_key *a = &key_a->index_key; const struct keyring_index_key *b = data; unsigned long seg_a, seg_b; int level, i; level = 0; seg_a = a->hash; seg_b = b->hash; if ((seg_a ^ seg_b) != 0) goto differ; level += ASSOC_ARRAY_KEY_CHUNK_SIZE / 8; /* The number of bits contributed by the hash is controlled by a * constant in the assoc_array headers. Everything else thereafter we * can deal with as being machine word-size dependent. */ seg_a = a->x; seg_b = b->x; if ((seg_a ^ seg_b) != 0) goto differ; level += sizeof(unsigned long); /* The next bit may not work on big endian */ seg_a = (unsigned long)a->type; seg_b = (unsigned long)b->type; if ((seg_a ^ seg_b) != 0) goto differ; level += sizeof(unsigned long); seg_a = (unsigned long)a->domain_tag; seg_b = (unsigned long)b->domain_tag; if ((seg_a ^ seg_b) != 0) goto differ; level += sizeof(unsigned long); i = sizeof(a->desc); if (a->desc_len <= i) goto same; for (; i < a->desc_len; i++) { seg_a = *(unsigned char *)(a->description + i); seg_b = *(unsigned char *)(b->description + i); if ((seg_a ^ seg_b) != 0) goto differ_plus_i; } same: return -1; differ_plus_i: level += i; differ: i = level * 8 + __ffs(seg_a ^ seg_b); return i; } /* * Free an object after stripping the keyring flag off of the pointer. */ static void keyring_free_object(void *object) { key_put(keyring_ptr_to_key(object)); } /* * Operations for keyring management by the index-tree routines. */ static const struct assoc_array_ops keyring_assoc_array_ops = { .get_key_chunk = keyring_get_key_chunk, .get_object_key_chunk = keyring_get_object_key_chunk, .compare_object = keyring_compare_object, .diff_objects = keyring_diff_objects, .free_object = keyring_free_object, }; /* * Clean up a keyring when it is destroyed. Unpublish its name if it had one * and dispose of its data. * * The garbage collector detects the final key_put(), removes the keyring from * the serial number tree and then does RCU synchronisation before coming here, * so we shouldn't need to worry about code poking around here with the RCU * readlock held by this time. */ static void keyring_destroy(struct key *keyring) { if (keyring->description) { write_lock(&keyring_name_lock); if (keyring->name_link.next != NULL && !list_empty(&keyring->name_link)) list_del(&keyring->name_link); write_unlock(&keyring_name_lock); } if (keyring->restrict_link) { struct key_restriction *keyres = keyring->restrict_link; key_put(keyres->key); kfree(keyres); } assoc_array_destroy(&keyring->keys, &keyring_assoc_array_ops); } /* * Describe a keyring for /proc. */ static void keyring_describe(const struct key *keyring, struct seq_file *m) { if (keyring->description) seq_puts(m, keyring->description); else seq_puts(m, "[anon]"); if (key_is_positive(keyring)) { if (keyring->keys.nr_leaves_on_tree != 0) seq_printf(m, ": %lu", keyring->keys.nr_leaves_on_tree); else seq_puts(m, ": empty"); } } struct keyring_read_iterator_context { size_t buflen; size_t count; key_serial_t *buffer; }; static int keyring_read_iterator(const void *object, void *data) { struct keyring_read_iterator_context *ctx = data; const struct key *key = keyring_ptr_to_key(object); kenter("{%s,%d},,{%zu/%zu}", key->type->name, key->serial, ctx->count, ctx->buflen); if (ctx->count >= ctx->buflen) return 1; *ctx->buffer++ = key->serial; ctx->count += sizeof(key->serial); return 0; } /* * Read a list of key IDs from the keyring's contents in binary form * * The keyring's semaphore is read-locked by the caller. This prevents someone * from modifying it under us - which could cause us to read key IDs multiple * times. */ static long keyring_read(const struct key *keyring, char *buffer, size_t buflen) { struct keyring_read_iterator_context ctx; long ret; kenter("{%d},,%zu", key_serial(keyring), buflen); if (buflen & (sizeof(key_serial_t) - 1)) return -EINVAL; /* Copy as many key IDs as fit into the buffer */ if (buffer && buflen) { ctx.buffer = (key_serial_t *)buffer; ctx.buflen = buflen; ctx.count = 0; ret = assoc_array_iterate(&keyring->keys, keyring_read_iterator, &ctx); if (ret < 0) { kleave(" = %ld [iterate]", ret); return ret; } } /* Return the size of the buffer needed */ ret = keyring->keys.nr_leaves_on_tree * sizeof(key_serial_t); if (ret <= buflen) kleave("= %ld [ok]", ret); else kleave("= %ld [buffer too small]", ret); return ret; } /* * Allocate a keyring and link into the destination keyring. */ struct key *keyring_alloc(const char *description, kuid_t uid, kgid_t gid, const struct cred *cred, key_perm_t perm, unsigned long flags, struct key_restriction *restrict_link, struct key *dest) { struct key *keyring; int ret; keyring = key_alloc(&key_type_keyring, description, uid, gid, cred, perm, flags, restrict_link); if (!IS_ERR(keyring)) { ret = key_instantiate_and_link(keyring, NULL, 0, dest, NULL); if (ret < 0) { key_put(keyring); keyring = ERR_PTR(ret); } } return keyring; } EXPORT_SYMBOL(keyring_alloc); /** * restrict_link_reject - Give -EPERM to restrict link * @keyring: The keyring being added to. * @type: The type of key being added. * @payload: The payload of the key intended to be added. * @restriction_key: Keys providing additional data for evaluating restriction. * * Reject the addition of any links to a keyring. It can be overridden by * passing KEY_ALLOC_BYPASS_RESTRICTION to key_instantiate_and_link() when * adding a key to a keyring. * * This is meant to be stored in a key_restriction structure which is passed * in the restrict_link parameter to keyring_alloc(). */ int restrict_link_reject(struct key *keyring, const struct key_type *type, const union key_payload *payload, struct key *restriction_key) { return -EPERM; } /* * By default, we keys found by getting an exact match on their descriptions. */ bool key_default_cmp(const struct key *key, const struct key_match_data *match_data) { return strcmp(key->description, match_data->raw_data) == 0; } /* * Iteration function to consider each key found. */ static int keyring_search_iterator(const void *object, void *iterator_data) { struct keyring_search_context *ctx = iterator_data; const struct key *key = keyring_ptr_to_key(object); unsigned long kflags = READ_ONCE(key->flags); short state = READ_ONCE(key->state); kenter("{%d}", key->serial); /* ignore keys not of this type */ if (key->type != ctx->index_key.type) { kleave(" = 0 [!type]"); return 0; } /* skip invalidated, revoked and expired keys */ if (ctx->flags & KEYRING_SEARCH_DO_STATE_CHECK) { time64_t expiry = READ_ONCE(key->expiry); if (kflags & ((1 << KEY_FLAG_INVALIDATED) | (1 << KEY_FLAG_REVOKED))) { ctx->result = ERR_PTR(-EKEYREVOKED); kleave(" = %d [invrev]", ctx->skipped_ret); goto skipped; } if (expiry && ctx->now >= expiry) { if (!(ctx->flags & KEYRING_SEARCH_SKIP_EXPIRED)) ctx->result = ERR_PTR(-EKEYEXPIRED); kleave(" = %d [expire]", ctx->skipped_ret); goto skipped; } } /* keys that don't match */ if (!ctx->match_data.cmp(key, &ctx->match_data)) { kleave(" = 0 [!match]"); return 0; } /* key must have search permissions */ if (!(ctx->flags & KEYRING_SEARCH_NO_CHECK_PERM) && key_task_permission(make_key_ref(key, ctx->possessed), ctx->cred, KEY_NEED_SEARCH) < 0) { ctx->result = ERR_PTR(-EACCES); kleave(" = %d [!perm]", ctx->skipped_ret); goto skipped; } if (ctx->flags & KEYRING_SEARCH_DO_STATE_CHECK) { /* we set a different error code if we pass a negative key */ if (state < 0) { ctx->result = ERR_PTR(state); kleave(" = %d [neg]", ctx->skipped_ret); goto skipped; } } /* Found */ ctx->result = make_key_ref(key, ctx->possessed); kleave(" = 1 [found]"); return 1; skipped: return ctx->skipped_ret; } /* * Search inside a keyring for a key. We can search by walking to it * directly based on its index-key or we can iterate over the entire * tree looking for it, based on the match function. */ static int search_keyring(struct key *keyring, struct keyring_search_context *ctx) { if (ctx->match_data.lookup_type == KEYRING_SEARCH_LOOKUP_DIRECT) { const void *object; object = assoc_array_find(&keyring->keys, &keyring_assoc_array_ops, &ctx->index_key); return object ? ctx->iterator(object, ctx) : 0; } return assoc_array_iterate(&keyring->keys, ctx->iterator, ctx); } /* * Search a tree of keyrings that point to other keyrings up to the maximum * depth. */ static bool search_nested_keyrings(struct key *keyring, struct keyring_search_context *ctx) { struct { struct key *keyring; struct assoc_array_node *node; int slot; } stack[KEYRING_SEARCH_MAX_DEPTH]; struct assoc_array_shortcut *shortcut; struct assoc_array_node *node; struct assoc_array_ptr *ptr; struct key *key; int sp = 0, slot; kenter("{%d},{%s,%s}", keyring->serial, ctx->index_key.type->name, ctx->index_key.description); #define STATE_CHECKS (KEYRING_SEARCH_NO_STATE_CHECK | KEYRING_SEARCH_DO_STATE_CHECK) BUG_ON((ctx->flags & STATE_CHECKS) == 0 || (ctx->flags & STATE_CHECKS) == STATE_CHECKS); if (ctx->index_key.description) key_set_index_key(&ctx->index_key); /* Check to see if this top-level keyring is what we are looking for * and whether it is valid or not. */ if (ctx->match_data.lookup_type == KEYRING_SEARCH_LOOKUP_ITERATE || keyring_compare_object(keyring, &ctx->index_key)) { ctx->skipped_ret = 2; switch (ctx->iterator(keyring_key_to_ptr(keyring), ctx)) { case 1: goto found; case 2: return false; default: break; } } ctx->skipped_ret = 0; /* Start processing a new keyring */ descend_to_keyring: kdebug("descend to %d", keyring->serial); if (keyring->flags & ((1 << KEY_FLAG_INVALIDATED) | (1 << KEY_FLAG_REVOKED))) goto not_this_keyring; /* Search through the keys in this keyring before its searching its * subtrees. */ if (search_keyring(keyring, ctx)) goto found; /* Then manually iterate through the keyrings nested in this one. * * Start from the root node of the index tree. Because of the way the * hash function has been set up, keyrings cluster on the leftmost * branch of the root node (root slot 0) or in the root node itself. * Non-keyrings avoid the leftmost branch of the root entirely (root * slots 1-15). */ if (!(ctx->flags & KEYRING_SEARCH_RECURSE)) goto not_this_keyring; ptr = READ_ONCE(keyring->keys.root); if (!ptr) goto not_this_keyring; if (assoc_array_ptr_is_shortcut(ptr)) { /* If the root is a shortcut, either the keyring only contains * keyring pointers (everything clusters behind root slot 0) or * doesn't contain any keyring pointers. */ shortcut = assoc_array_ptr_to_shortcut(ptr); if ((shortcut->index_key[0] & ASSOC_ARRAY_FAN_MASK) != 0) goto not_this_keyring; ptr = READ_ONCE(shortcut->next_node); node = assoc_array_ptr_to_node(ptr); goto begin_node; } node = assoc_array_ptr_to_node(ptr); ptr = node->slots[0]; if (!assoc_array_ptr_is_meta(ptr)) goto begin_node; descend_to_node: /* Descend to a more distal node in this keyring's content tree and go * through that. */ kdebug("descend"); if (assoc_array_ptr_is_shortcut(ptr)) { shortcut = assoc_array_ptr_to_shortcut(ptr); ptr = READ_ONCE(shortcut->next_node); BUG_ON(!assoc_array_ptr_is_node(ptr)); } node = assoc_array_ptr_to_node(ptr); begin_node: kdebug("begin_node"); slot = 0; ascend_to_node: /* Go through the slots in a node */ for (; slot < ASSOC_ARRAY_FAN_OUT; slot++) { ptr = READ_ONCE(node->slots[slot]); if (assoc_array_ptr_is_meta(ptr)) { if (node->back_pointer || assoc_array_ptr_is_shortcut(ptr)) goto descend_to_node; } if (!keyring_ptr_is_keyring(ptr)) continue; key = keyring_ptr_to_key(ptr); if (sp >= KEYRING_SEARCH_MAX_DEPTH) { if (ctx->flags & KEYRING_SEARCH_DETECT_TOO_DEEP) { ctx->result = ERR_PTR(-ELOOP); return false; } goto not_this_keyring; } /* Search a nested keyring */ if (!(ctx->flags & KEYRING_SEARCH_NO_CHECK_PERM) && key_task_permission(make_key_ref(key, ctx->possessed), ctx->cred, KEY_NEED_SEARCH) < 0) continue; /* stack the current position */ stack[sp].keyring = keyring; stack[sp].node = node; stack[sp].slot = slot; sp++; /* begin again with the new keyring */ keyring = key; goto descend_to_keyring; } /* We've dealt with all the slots in the current node, so now we need * to ascend to the parent and continue processing there. */ ptr = READ_ONCE(node->back_pointer); slot = node->parent_slot; if (ptr && assoc_array_ptr_is_shortcut(ptr)) { shortcut = assoc_array_ptr_to_shortcut(ptr); ptr = READ_ONCE(shortcut->back_pointer); slot = shortcut->parent_slot; } if (!ptr) goto not_this_keyring; node = assoc_array_ptr_to_node(ptr); slot++; /* If we've ascended to the root (zero backpointer), we must have just * finished processing the leftmost branch rather than the root slots - * so there can't be any more keyrings for us to find. */ if (node->back_pointer) { kdebug("ascend %d", slot); goto ascend_to_node; } /* The keyring we're looking at was disqualified or didn't contain a * matching key. */ not_this_keyring: kdebug("not_this_keyring %d", sp); if (sp <= 0) { kleave(" = false"); return false; } /* Resume the processing of a keyring higher up in the tree */ sp--; keyring = stack[sp].keyring; node = stack[sp].node; slot = stack[sp].slot + 1; kdebug("ascend to %d [%d]", keyring->serial, slot); goto ascend_to_node; /* We found a viable match */ found: key = key_ref_to_ptr(ctx->result); key_check(key); if (!(ctx->flags & KEYRING_SEARCH_NO_UPDATE_TIME)) { key->last_used_at = ctx->now; keyring->last_used_at = ctx->now; while (sp > 0) stack[--sp].keyring->last_used_at = ctx->now; } kleave(" = true"); return true; } /** * keyring_search_rcu - Search a keyring tree for a matching key under RCU * @keyring_ref: A pointer to the keyring with possession indicator. * @ctx: The keyring search context. * * Search the supplied keyring tree for a key that matches the criteria given. * The root keyring and any linked keyrings must grant Search permission to the * caller to be searchable and keys can only be found if they too grant Search * to the caller. The possession flag on the root keyring pointer controls use * of the possessor bits in permissions checking of the entire tree. In * addition, the LSM gets to forbid keyring searches and key matches. * * The search is performed as a breadth-then-depth search up to the prescribed * limit (KEYRING_SEARCH_MAX_DEPTH). The caller must hold the RCU read lock to * prevent keyrings from being destroyed or rearranged whilst they are being * searched. * * Keys are matched to the type provided and are then filtered by the match * function, which is given the description to use in any way it sees fit. The * match function may use any attributes of a key that it wishes to * determine the match. Normally the match function from the key type would be * used. * * RCU can be used to prevent the keyring key lists from disappearing without * the need to take lots of locks. * * Returns a pointer to the found key and increments the key usage count if * successful; -EAGAIN if no matching keys were found, or if expired or revoked * keys were found; -ENOKEY if only negative keys were found; -ENOTDIR if the * specified keyring wasn't a keyring. * * In the case of a successful return, the possession attribute from * @keyring_ref is propagated to the returned key reference. */ key_ref_t keyring_search_rcu(key_ref_t keyring_ref, struct keyring_search_context *ctx) { struct key *keyring; long err; ctx->iterator = keyring_search_iterator; ctx->possessed = is_key_possessed(keyring_ref); ctx->result = ERR_PTR(-EAGAIN); keyring = key_ref_to_ptr(keyring_ref); key_check(keyring); if (keyring->type != &key_type_keyring) return ERR_PTR(-ENOTDIR); if (!(ctx->flags & KEYRING_SEARCH_NO_CHECK_PERM)) { err = key_task_permission(keyring_ref, ctx->cred, KEY_NEED_SEARCH); if (err < 0) return ERR_PTR(err); } ctx->now = ktime_get_real_seconds(); if (search_nested_keyrings(keyring, ctx)) __key_get(key_ref_to_ptr(ctx->result)); return ctx->result; } /** * keyring_search - Search the supplied keyring tree for a matching key * @keyring: The root of the keyring tree to be searched. * @type: The type of keyring we want to find. * @description: The name of the keyring we want to find. * @recurse: True to search the children of @keyring also * * As keyring_search_rcu() above, but using the current task's credentials and * type's default matching function and preferred search method. */ key_ref_t keyring_search(key_ref_t keyring, struct key_type *type, const char *description, bool recurse) { struct keyring_search_context ctx = { .index_key.type = type, .index_key.description = description, .index_key.desc_len = strlen(description), .cred = current_cred(), .match_data.cmp = key_default_cmp, .match_data.raw_data = description, .match_data.lookup_type = KEYRING_SEARCH_LOOKUP_DIRECT, .flags = KEYRING_SEARCH_DO_STATE_CHECK, }; key_ref_t key; int ret; if (recurse) ctx.flags |= KEYRING_SEARCH_RECURSE; if (type->match_preparse) { ret = type->match_preparse(&ctx.match_data); if (ret < 0) return ERR_PTR(ret); } rcu_read_lock(); key = keyring_search_rcu(keyring, &ctx); rcu_read_unlock(); if (type->match_free) type->match_free(&ctx.match_data); return key; } EXPORT_SYMBOL(keyring_search); static struct key_restriction *keyring_restriction_alloc( key_restrict_link_func_t check) { struct key_restriction *keyres = kzalloc(sizeof(struct key_restriction), GFP_KERNEL); if (!keyres) return ERR_PTR(-ENOMEM); keyres->check = check; return keyres; } /* * Semaphore to serialise restriction setup to prevent reference count * cycles through restriction key pointers. */ static DECLARE_RWSEM(keyring_serialise_restrict_sem); /* * Check for restriction cycles that would prevent keyring garbage collection. * keyring_serialise_restrict_sem must be held. */ static bool keyring_detect_restriction_cycle(const struct key *dest_keyring, struct key_restriction *keyres) { while (keyres && keyres->key && keyres->key->type == &key_type_keyring) { if (keyres->key == dest_keyring) return true; keyres = keyres->key->restrict_link; } return false; } /** * keyring_restrict - Look up and apply a restriction to a keyring * @keyring_ref: The keyring to be restricted * @type: The key type that will provide the restriction checker. * @restriction: The restriction options to apply to the keyring * * Look up a keyring and apply a restriction to it. The restriction is managed * by the specific key type, but can be configured by the options specified in * the restriction string. */ int keyring_restrict(key_ref_t keyring_ref, const char *type, const char *restriction) { struct key *keyring; struct key_type *restrict_type = NULL; struct key_restriction *restrict_link; int ret = 0; keyring = key_ref_to_ptr(keyring_ref); key_check(keyring); if (keyring->type != &key_type_keyring) return -ENOTDIR; if (!type) { restrict_link = keyring_restriction_alloc(restrict_link_reject); } else { restrict_type = key_type_lookup(type); if (IS_ERR(restrict_type)) return PTR_ERR(restrict_type); if (!restrict_type->lookup_restriction) { ret = -ENOENT; goto error; } restrict_link = restrict_type->lookup_restriction(restriction); } if (IS_ERR(restrict_link)) { ret = PTR_ERR(restrict_link); goto error; } down_write(&keyring->sem); down_write(&keyring_serialise_restrict_sem); if (keyring->restrict_link) { ret = -EEXIST; } else if (keyring_detect_restriction_cycle(keyring, restrict_link)) { ret = -EDEADLK; } else { keyring->restrict_link = restrict_link; notify_key(keyring, NOTIFY_KEY_SETATTR, 0); } up_write(&keyring_serialise_restrict_sem); up_write(&keyring->sem); if (ret < 0) { key_put(restrict_link->key); kfree(restrict_link); } error: if (restrict_type) key_type_put(restrict_type); return ret; } EXPORT_SYMBOL(keyring_restrict); /* * Search the given keyring for a key that might be updated. * * The caller must guarantee that the keyring is a keyring and that the * permission is granted to modify the keyring as no check is made here. The * caller must also hold a lock on the keyring semaphore. * * Returns a pointer to the found key with usage count incremented if * successful and returns NULL if not found. Revoked and invalidated keys are * skipped over. * * If successful, the possession indicator is propagated from the keyring ref * to the returned key reference. */ key_ref_t find_key_to_update(key_ref_t keyring_ref, const struct keyring_index_key *index_key) { struct key *keyring, *key; const void *object; keyring = key_ref_to_ptr(keyring_ref); kenter("{%d},{%s,%s}", keyring->serial, index_key->type->name, index_key->description); object = assoc_array_find(&keyring->keys, &keyring_assoc_array_ops, index_key); if (object) goto found; kleave(" = NULL"); return NULL; found: key = keyring_ptr_to_key(object); if (key->flags & ((1 << KEY_FLAG_INVALIDATED) | (1 << KEY_FLAG_REVOKED))) { kleave(" = NULL [x]"); return NULL; } __key_get(key); kleave(" = {%d}", key->serial); return make_key_ref(key, is_key_possessed(keyring_ref)); } /* * Find a keyring with the specified name. * * Only keyrings that have nonzero refcount, are not revoked, and are owned by a * user in the current user namespace are considered. If @uid_keyring is %true, * the keyring additionally must have been allocated as a user or user session * keyring; otherwise, it must grant Search permission directly to the caller. * * Returns a pointer to the keyring with the keyring's refcount having being * incremented on success. -ENOKEY is returned if a key could not be found. */ struct key *find_keyring_by_name(const char *name, bool uid_keyring) { struct user_namespace *ns = current_user_ns(); struct key *keyring; if (!name) return ERR_PTR(-EINVAL); read_lock(&keyring_name_lock); /* Search this hash bucket for a keyring with a matching name that * grants Search permission and that hasn't been revoked */ list_for_each_entry(keyring, &ns->keyring_name_list, name_link) { if (!kuid_has_mapping(ns, keyring->user->uid)) continue; if (test_bit(KEY_FLAG_REVOKED, &keyring->flags)) continue; if (strcmp(keyring->description, name) != 0) continue; if (uid_keyring) { if (!test_bit(KEY_FLAG_UID_KEYRING, &keyring->flags)) continue; } else { if (key_permission(make_key_ref(keyring, 0), KEY_NEED_SEARCH) < 0) continue; } /* we've got a match but we might end up racing with * key_cleanup() if the keyring is currently 'dead' * (ie. it has a zero usage count) */ if (!refcount_inc_not_zero(&keyring->usage)) continue; keyring->last_used_at = ktime_get_real_seconds(); goto out; } keyring = ERR_PTR(-ENOKEY); out: read_unlock(&keyring_name_lock); return keyring; } static int keyring_detect_cycle_iterator(const void *object, void *iterator_data) { struct keyring_search_context *ctx = iterator_data; const struct key *key = keyring_ptr_to_key(object); kenter("{%d}", key->serial); /* We might get a keyring with matching index-key that is nonetheless a * different keyring. */ if (key != ctx->match_data.raw_data) return 0; ctx->result = ERR_PTR(-EDEADLK); return 1; } /* * See if a cycle will be created by inserting acyclic tree B in acyclic * tree A at the topmost level (ie: as a direct child of A). * * Since we are adding B to A at the top level, checking for cycles should just * be a matter of seeing if node A is somewhere in tree B. */ static int keyring_detect_cycle(struct key *A, struct key *B) { struct keyring_search_context ctx = { .index_key = A->index_key, .match_data.raw_data = A, .match_data.lookup_type = KEYRING_SEARCH_LOOKUP_DIRECT, .iterator = keyring_detect_cycle_iterator, .flags = (KEYRING_SEARCH_NO_STATE_CHECK | KEYRING_SEARCH_NO_UPDATE_TIME | KEYRING_SEARCH_NO_CHECK_PERM | KEYRING_SEARCH_DETECT_TOO_DEEP | KEYRING_SEARCH_RECURSE), }; rcu_read_lock(); search_nested_keyrings(B, &ctx); rcu_read_unlock(); return PTR_ERR(ctx.result) == -EAGAIN ? 0 : PTR_ERR(ctx.result); } /* * Lock keyring for link. */ int __key_link_lock(struct key *keyring, const struct keyring_index_key *index_key) __acquires(&keyring->sem) __acquires(&keyring_serialise_link_lock) { if (keyring->type != &key_type_keyring) return -ENOTDIR; down_write(&keyring->sem); /* Serialise link/link calls to prevent parallel calls causing a cycle * when linking two keyring in opposite orders. */ if (index_key->type == &key_type_keyring) mutex_lock(&keyring_serialise_link_lock); return 0; } /* * Lock keyrings for move (link/unlink combination). */ int __key_move_lock(struct key *l_keyring, struct key *u_keyring, const struct keyring_index_key *index_key) __acquires(&l_keyring->sem) __acquires(&u_keyring->sem) __acquires(&keyring_serialise_link_lock) { if (l_keyring->type != &key_type_keyring || u_keyring->type != &key_type_keyring) return -ENOTDIR; /* We have to be very careful here to take the keyring locks in the * right order, lest we open ourselves to deadlocking against another * move operation. */ if (l_keyring < u_keyring) { down_write(&l_keyring->sem); down_write_nested(&u_keyring->sem, 1); } else { down_write(&u_keyring->sem); down_write_nested(&l_keyring->sem, 1); } /* Serialise link/link calls to prevent parallel calls causing a cycle * when linking two keyring in opposite orders. */ if (index_key->type == &key_type_keyring) mutex_lock(&keyring_serialise_link_lock); return 0; } /* * Preallocate memory so that a key can be linked into to a keyring. */ int __key_link_begin(struct key *keyring, const struct keyring_index_key *index_key, struct assoc_array_edit **_edit) { struct assoc_array_edit *edit; int ret; kenter("%d,%s,%s,", keyring->serial, index_key->type->name, index_key->description); BUG_ON(index_key->desc_len == 0); BUG_ON(*_edit != NULL); *_edit = NULL; ret = -EKEYREVOKED; if (test_bit(KEY_FLAG_REVOKED, &keyring->flags)) goto error; /* Create an edit script that will insert/replace the key in the * keyring tree. */ edit = assoc_array_insert(&keyring->keys, &keyring_assoc_array_ops, index_key, NULL); if (IS_ERR(edit)) { ret = PTR_ERR(edit); goto error; } /* If we're not replacing a link in-place then we're going to need some * extra quota. */ if (!edit->dead_leaf) { ret = key_payload_reserve(keyring, keyring->datalen + KEYQUOTA_LINK_BYTES); if (ret < 0) goto error_cancel; } *_edit = edit; kleave(" = 0"); return 0; error_cancel: assoc_array_cancel_edit(edit); error: kleave(" = %d", ret); return ret; } /* * Check already instantiated keys aren't going to be a problem. * * The caller must have called __key_link_begin(). Don't need to call this for * keys that were created since __key_link_begin() was called. */ int __key_link_check_live_key(struct key *keyring, struct key *key) { if (key->type == &key_type_keyring) /* check that we aren't going to create a cycle by linking one * keyring to another */ return keyring_detect_cycle(keyring, key); return 0; } /* * Link a key into to a keyring. * * Must be called with __key_link_begin() having being called. Discards any * already extant link to matching key if there is one, so that each keyring * holds at most one link to any given key of a particular type+description * combination. */ void __key_link(struct key *keyring, struct key *key, struct assoc_array_edit **_edit) { __key_get(key); assoc_array_insert_set_object(*_edit, keyring_key_to_ptr(key)); assoc_array_apply_edit(*_edit); *_edit = NULL; notify_key(keyring, NOTIFY_KEY_LINKED, key_serial(key)); } /* * Finish linking a key into to a keyring. * * Must be called with __key_link_begin() having being called. */ void __key_link_end(struct key *keyring, const struct keyring_index_key *index_key, struct assoc_array_edit *edit) __releases(&keyring->sem) __releases(&keyring_serialise_link_lock) { BUG_ON(index_key->type == NULL); kenter("%d,%s,", keyring->serial, index_key->type->name); if (edit) { if (!edit->dead_leaf) { key_payload_reserve(keyring, keyring->datalen - KEYQUOTA_LINK_BYTES); } assoc_array_cancel_edit(edit); } up_write(&keyring->sem); if (index_key->type == &key_type_keyring) mutex_unlock(&keyring_serialise_link_lock); } /* * Check addition of keys to restricted keyrings. */ static int __key_link_check_restriction(struct key *keyring, struct key *key) { if (!keyring->restrict_link || !keyring->restrict_link->check) return 0; return keyring->restrict_link->check(keyring, key->type, &key->payload, keyring->restrict_link->key); } /** * key_link - Link a key to a keyring * @keyring: The keyring to make the link in. * @key: The key to link to. * * Make a link in a keyring to a key, such that the keyring holds a reference * on that key and the key can potentially be found by searching that keyring. * * This function will write-lock the keyring's semaphore and will consume some * of the user's key data quota to hold the link. * * Returns 0 if successful, -ENOTDIR if the keyring isn't a keyring, * -EKEYREVOKED if the keyring has been revoked, -ENFILE if the keyring is * full, -EDQUOT if there is insufficient key data quota remaining to add * another link or -ENOMEM if there's insufficient memory. * * It is assumed that the caller has checked that it is permitted for a link to * be made (the keyring should have Write permission and the key Link * permission). */ int key_link(struct key *keyring, struct key *key) { struct assoc_array_edit *edit = NULL; int ret; kenter("{%d,%d}", keyring->serial, refcount_read(&keyring->usage)); key_check(keyring); key_check(key); ret = __key_link_lock(keyring, &key->index_key); if (ret < 0) goto error; ret = __key_link_begin(keyring, &key->index_key, &edit); if (ret < 0) goto error_end; kdebug("begun {%d,%d}", keyring->serial, refcount_read(&keyring->usage)); ret = __key_link_check_restriction(keyring, key); if (ret == 0) ret = __key_link_check_live_key(keyring, key); if (ret == 0) __key_link(keyring, key, &edit); error_end: __key_link_end(keyring, &key->index_key, edit); error: kleave(" = %d {%d,%d}", ret, keyring->serial, refcount_read(&keyring->usage)); return ret; } EXPORT_SYMBOL(key_link); /* * Lock a keyring for unlink. */ static int __key_unlink_lock(struct key *keyring) __acquires(&keyring->sem) { if (keyring->type != &key_type_keyring) return -ENOTDIR; down_write(&keyring->sem); return 0; } /* * Begin the process of unlinking a key from a keyring. */ static int __key_unlink_begin(struct key *keyring, struct key *key, struct assoc_array_edit **_edit) { struct assoc_array_edit *edit; BUG_ON(*_edit != NULL); edit = assoc_array_delete(&keyring->keys, &keyring_assoc_array_ops, &key->index_key); if (IS_ERR(edit)) return PTR_ERR(edit); if (!edit) return -ENOENT; *_edit = edit; return 0; } /* * Apply an unlink change. */ static void __key_unlink(struct key *keyring, struct key *key, struct assoc_array_edit **_edit) { assoc_array_apply_edit(*_edit); notify_key(keyring, NOTIFY_KEY_UNLINKED, key_serial(key)); *_edit = NULL; key_payload_reserve(keyring, keyring->datalen - KEYQUOTA_LINK_BYTES); } /* * Finish unlinking a key from to a keyring. */ static void __key_unlink_end(struct key *keyring, struct key *key, struct assoc_array_edit *edit) __releases(&keyring->sem) { if (edit) assoc_array_cancel_edit(edit); up_write(&keyring->sem); } /** * key_unlink - Unlink the first link to a key from a keyring. * @keyring: The keyring to remove the link from. * @key: The key the link is to. * * Remove a link from a keyring to a key. * * This function will write-lock the keyring's semaphore. * * Returns 0 if successful, -ENOTDIR if the keyring isn't a keyring, -ENOENT if * the key isn't linked to by the keyring or -ENOMEM if there's insufficient * memory. * * It is assumed that the caller has checked that it is permitted for a link to * be removed (the keyring should have Write permission; no permissions are * required on the key). */ int key_unlink(struct key *keyring, struct key *key) { struct assoc_array_edit *edit = NULL; int ret; key_check(keyring); key_check(key); ret = __key_unlink_lock(keyring); if (ret < 0) return ret; ret = __key_unlink_begin(keyring, key, &edit); if (ret == 0) __key_unlink(keyring, key, &edit); __key_unlink_end(keyring, key, edit); return ret; } EXPORT_SYMBOL(key_unlink); /** * key_move - Move a key from one keyring to another * @key: The key to move * @from_keyring: The keyring to remove the link from. * @to_keyring: The keyring to make the link in. * @flags: Qualifying flags, such as KEYCTL_MOVE_EXCL. * * Make a link in @to_keyring to a key, such that the keyring holds a reference * on that key and the key can potentially be found by searching that keyring * whilst simultaneously removing a link to the key from @from_keyring. * * This function will write-lock both keyring's semaphores and will consume * some of the user's key data quota to hold the link on @to_keyring. * * Returns 0 if successful, -ENOTDIR if either keyring isn't a keyring, * -EKEYREVOKED if either keyring has been revoked, -ENFILE if the second * keyring is full, -EDQUOT if there is insufficient key data quota remaining * to add another link or -ENOMEM if there's insufficient memory. If * KEYCTL_MOVE_EXCL is set, then -EEXIST will be returned if there's already a * matching key in @to_keyring. * * It is assumed that the caller has checked that it is permitted for a link to * be made (the keyring should have Write permission and the key Link * permission). */ int key_move(struct key *key, struct key *from_keyring, struct key *to_keyring, unsigned int flags) { struct assoc_array_edit *from_edit = NULL, *to_edit = NULL; int ret; kenter("%d,%d,%d", key->serial, from_keyring->serial, to_keyring->serial); if (from_keyring == to_keyring) return 0; key_check(key); key_check(from_keyring); key_check(to_keyring); ret = __key_move_lock(from_keyring, to_keyring, &key->index_key); if (ret < 0) goto out; ret = __key_unlink_begin(from_keyring, key, &from_edit); if (ret < 0) goto error; ret = __key_link_begin(to_keyring, &key->index_key, &to_edit); if (ret < 0) goto error; ret = -EEXIST; if (to_edit->dead_leaf && (flags & KEYCTL_MOVE_EXCL)) goto error; ret = __key_link_check_restriction(to_keyring, key); if (ret < 0) goto error; ret = __key_link_check_live_key(to_keyring, key); if (ret < 0) goto error; __key_unlink(from_keyring, key, &from_edit); __key_link(to_keyring, key, &to_edit); error: __key_link_end(to_keyring, &key->index_key, to_edit); __key_unlink_end(from_keyring, key, from_edit); out: kleave(" = %d", ret); return ret; } EXPORT_SYMBOL(key_move); /** * keyring_clear - Clear a keyring * @keyring: The keyring to clear. * * Clear the contents of the specified keyring. * * Returns 0 if successful or -ENOTDIR if the keyring isn't a keyring. */ int keyring_clear(struct key *keyring) { struct assoc_array_edit *edit; int ret; if (keyring->type != &key_type_keyring) return -ENOTDIR; down_write(&keyring->sem); edit = assoc_array_clear(&keyring->keys, &keyring_assoc_array_ops); if (IS_ERR(edit)) { ret = PTR_ERR(edit); } else { if (edit) assoc_array_apply_edit(edit); notify_key(keyring, NOTIFY_KEY_CLEARED, 0); key_payload_reserve(keyring, 0); ret = 0; } up_write(&keyring->sem); return ret; } EXPORT_SYMBOL(keyring_clear); /* * Dispose of the links from a revoked keyring. * * This is called with the key sem write-locked. */ static void keyring_revoke(struct key *keyring) { struct assoc_array_edit *edit; edit = assoc_array_clear(&keyring->keys, &keyring_assoc_array_ops); if (!IS_ERR(edit)) { if (edit) assoc_array_apply_edit(edit); key_payload_reserve(keyring, 0); } } static bool keyring_gc_select_iterator(void *object, void *iterator_data) { struct key *key = keyring_ptr_to_key(object); time64_t *limit = iterator_data; if (key_is_dead(key, *limit)) return false; key_get(key); return true; } static int keyring_gc_check_iterator(const void *object, void *iterator_data) { const struct key *key = keyring_ptr_to_key(object); time64_t *limit = iterator_data; key_check(key); return key_is_dead(key, *limit); } /* * Garbage collect pointers from a keyring. * * Not called with any locks held. The keyring's key struct will not be * deallocated under us as only our caller may deallocate it. */ void keyring_gc(struct key *keyring, time64_t limit) { int result; kenter("%x{%s}", keyring->serial, keyring->description ?: ""); if (keyring->flags & ((1 << KEY_FLAG_INVALIDATED) | (1 << KEY_FLAG_REVOKED))) goto dont_gc; /* scan the keyring looking for dead keys */ rcu_read_lock(); result = assoc_array_iterate(&keyring->keys, keyring_gc_check_iterator, &limit); rcu_read_unlock(); if (result == true) goto do_gc; dont_gc: kleave(" [no gc]"); return; do_gc: down_write(&keyring->sem); assoc_array_gc(&keyring->keys, &keyring_assoc_array_ops, keyring_gc_select_iterator, &limit); up_write(&keyring->sem); kleave(" [gc]"); } /* * Garbage collect restriction pointers from a keyring. * * Keyring restrictions are associated with a key type, and must be cleaned * up if the key type is unregistered. The restriction is altered to always * reject additional keys so a keyring cannot be opened up by unregistering * a key type. * * Not called with any keyring locks held. The keyring's key struct will not * be deallocated under us as only our caller may deallocate it. * * The caller is required to hold key_types_sem and dead_type->sem. This is * fulfilled by key_gc_keytype() holding the locks on behalf of * key_garbage_collector(), which it invokes on a workqueue. */ void keyring_restriction_gc(struct key *keyring, struct key_type *dead_type) { struct key_restriction *keyres; kenter("%x{%s}", keyring->serial, keyring->description ?: ""); /* * keyring->restrict_link is only assigned at key allocation time * or with the key type locked, so the only values that could be * concurrently assigned to keyring->restrict_link are for key * types other than dead_type. Given this, it's ok to check * the key type before acquiring keyring->sem. */ if (!dead_type || !keyring->restrict_link || keyring->restrict_link->keytype != dead_type) { kleave(" [no restriction gc]"); return; } /* Lock the keyring to ensure that a link is not in progress */ down_write(&keyring->sem); keyres = keyring->restrict_link; keyres->check = restrict_link_reject; key_put(keyres->key); keyres->key = NULL; keyres->keytype = NULL; up_write(&keyring->sem); kleave(" [restriction gc]"); }
2 1 1 2 2 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 // SPDX-License-Identifier: GPL-2.0-or-later /* * USB-to-WWAN Driver for Sierra Wireless modems * * Copyright (C) 2008, 2009, 2010 Paxton Smith, Matthew Safar, Rory Filer * <linux@sierrawireless.com> * * Portions of this based on the cdc_ether driver by David Brownell (2003-2005) * and Ole Andre Vadla Ravnas (ActiveSync) (2006). * * IMPORTANT DISCLAIMER: This driver is not commercially supported by * Sierra Wireless. Use at your own risk. */ #define DRIVER_VERSION "v.2.0" #define DRIVER_AUTHOR "Paxton Smith, Matthew Safar, Rory Filer" #define DRIVER_DESC "USB-to-WWAN Driver for Sierra Wireless modems" static const char driver_name[] = "sierra_net"; /* if defined debug messages enabled */ /*#define DEBUG*/ #include <linux/module.h> #include <linux/etherdevice.h> #include <linux/ethtool.h> #include <linux/mii.h> #include <linux/sched.h> #include <linux/timer.h> #include <linux/usb.h> #include <linux/usb/cdc.h> #include <net/ip.h> #include <net/udp.h> #include <linux/unaligned.h> #include <linux/usb/usbnet.h> #define SWI_USB_REQUEST_GET_FW_ATTR 0x06 #define SWI_GET_FW_ATTR_MASK 0x08 /* atomic counter partially included in MAC address to make sure 2 devices * do not end up with the same MAC - concept breaks in case of > 255 ifaces */ static atomic_t iface_counter = ATOMIC_INIT(0); /* * SYNC Timer Delay definition used to set the expiry time */ #define SIERRA_NET_SYNCDELAY (2*HZ) /* Max. MTU supported. The modem buffers are limited to 1500 */ #define SIERRA_NET_MAX_SUPPORTED_MTU 1500 /* The SIERRA_NET_USBCTL_BUF_LEN defines a buffer size allocated for control * message reception ... and thus the max. received packet. * (May be the cause for parse_hip returning -EINVAL) */ #define SIERRA_NET_USBCTL_BUF_LEN 1024 /* Overriding the default usbnet rx_urb_size */ #define SIERRA_NET_RX_URB_SIZE (8 * 1024) /* Private data structure */ struct sierra_net_data { u16 link_up; /* air link up or down */ u8 tx_hdr_template[4]; /* part of HIP hdr for tx'd packets */ u8 sync_msg[4]; /* SYNC message */ u8 shdwn_msg[4]; /* Shutdown message */ /* Backpointer to the container */ struct usbnet *usbnet; u8 ifnum; /* interface number */ /* Bit masks, must be a power of 2 */ #define SIERRA_NET_EVENT_RESP_AVAIL 0x01 #define SIERRA_NET_TIMER_EXPIRY 0x02 unsigned long kevent_flags; struct work_struct sierra_net_kevent; struct timer_list sync_timer; /* For retrying SYNC sequence */ }; struct param { int is_present; union { void *ptr; u32 dword; u16 word; u8 byte; }; }; /* HIP message type */ #define SIERRA_NET_HIP_EXTENDEDID 0x7F #define SIERRA_NET_HIP_HSYNC_ID 0x60 /* Modem -> host */ #define SIERRA_NET_HIP_RESTART_ID 0x62 /* Modem -> host */ #define SIERRA_NET_HIP_MSYNC_ID 0x20 /* Host -> modem */ #define SIERRA_NET_HIP_SHUTD_ID 0x26 /* Host -> modem */ #define SIERRA_NET_HIP_EXT_IP_IN_ID 0x0202 #define SIERRA_NET_HIP_EXT_IP_OUT_ID 0x0002 /* 3G UMTS Link Sense Indication definitions */ #define SIERRA_NET_HIP_LSI_UMTSID 0x78 /* Reverse Channel Grant Indication HIP message */ #define SIERRA_NET_HIP_RCGI 0x64 /* LSI Protocol types */ #define SIERRA_NET_PROTOCOL_UMTS 0x01 #define SIERRA_NET_PROTOCOL_UMTS_DS 0x04 /* LSI Coverage */ #define SIERRA_NET_COVERAGE_NONE 0x00 #define SIERRA_NET_COVERAGE_NOPACKET 0x01 /* LSI Session */ #define SIERRA_NET_SESSION_IDLE 0x00 /* LSI Link types */ #define SIERRA_NET_AS_LINK_TYPE_IPV4 0x00 #define SIERRA_NET_AS_LINK_TYPE_IPV6 0x02 struct lsi_umts { u8 protocol; u8 unused1; __be16 length; /* eventually use a union for the rest - assume umts for now */ u8 coverage; u8 network_len; /* network name len */ u8 network[40]; /* network name (UCS2, bigendian) */ u8 session_state; u8 unused3[33]; } __packed; struct lsi_umts_single { struct lsi_umts lsi; u8 link_type; u8 pdp_addr_len; /* NW-supplied PDP address len */ u8 pdp_addr[16]; /* NW-supplied PDP address (bigendian)) */ u8 unused4[23]; u8 dns1_addr_len; /* NW-supplied 1st DNS address len (bigendian) */ u8 dns1_addr[16]; /* NW-supplied 1st DNS address */ u8 dns2_addr_len; /* NW-supplied 2nd DNS address len */ u8 dns2_addr[16]; /* NW-supplied 2nd DNS address (bigendian)*/ u8 wins1_addr_len; /* NW-supplied 1st Wins address len */ u8 wins1_addr[16]; /* NW-supplied 1st Wins address (bigendian)*/ u8 wins2_addr_len; /* NW-supplied 2nd Wins address len */ u8 wins2_addr[16]; /* NW-supplied 2nd Wins address (bigendian) */ u8 unused5[4]; u8 gw_addr_len; /* NW-supplied GW address len */ u8 gw_addr[16]; /* NW-supplied GW address (bigendian) */ u8 reserved[8]; } __packed; struct lsi_umts_dual { struct lsi_umts lsi; u8 pdp_addr4_len; /* NW-supplied PDP IPv4 address len */ u8 pdp_addr4[4]; /* NW-supplied PDP IPv4 address (bigendian)) */ u8 pdp_addr6_len; /* NW-supplied PDP IPv6 address len */ u8 pdp_addr6[16]; /* NW-supplied PDP IPv6 address (bigendian)) */ u8 unused4[23]; u8 dns1_addr4_len; /* NW-supplied 1st DNS v4 address len (bigendian) */ u8 dns1_addr4[4]; /* NW-supplied 1st DNS v4 address */ u8 dns1_addr6_len; /* NW-supplied 1st DNS v6 address len */ u8 dns1_addr6[16]; /* NW-supplied 1st DNS v6 address (bigendian)*/ u8 dns2_addr4_len; /* NW-supplied 2nd DNS v4 address len (bigendian) */ u8 dns2_addr4[4]; /* NW-supplied 2nd DNS v4 address */ u8 dns2_addr6_len; /* NW-supplied 2nd DNS v6 address len */ u8 dns2_addr6[16]; /* NW-supplied 2nd DNS v6 address (bigendian)*/ u8 unused5[68]; } __packed; #define SIERRA_NET_LSI_COMMON_LEN 4 #define SIERRA_NET_LSI_UMTS_LEN (sizeof(struct lsi_umts_single)) #define SIERRA_NET_LSI_UMTS_STATUS_LEN \ (SIERRA_NET_LSI_UMTS_LEN - SIERRA_NET_LSI_COMMON_LEN) #define SIERRA_NET_LSI_UMTS_DS_LEN (sizeof(struct lsi_umts_dual)) #define SIERRA_NET_LSI_UMTS_DS_STATUS_LEN \ (SIERRA_NET_LSI_UMTS_DS_LEN - SIERRA_NET_LSI_COMMON_LEN) /* Our own net device operations structure */ static const struct net_device_ops sierra_net_device_ops = { .ndo_open = usbnet_open, .ndo_stop = usbnet_stop, .ndo_start_xmit = usbnet_start_xmit, .ndo_tx_timeout = usbnet_tx_timeout, .ndo_change_mtu = usbnet_change_mtu, .ndo_get_stats64 = dev_get_tstats64, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, }; /* get private data associated with passed in usbnet device */ static inline struct sierra_net_data *sierra_net_get_private(struct usbnet *dev) { return (struct sierra_net_data *)dev->data[0]; } /* set private data associated with passed in usbnet device */ static inline void sierra_net_set_private(struct usbnet *dev, struct sierra_net_data *priv) { dev->data[0] = (unsigned long)priv; } /* is packet IPv4/IPv6 */ static inline int is_ip(struct sk_buff *skb) { return skb->protocol == cpu_to_be16(ETH_P_IP) || skb->protocol == cpu_to_be16(ETH_P_IPV6); } /* * check passed in packet and make sure that: * - it is linear (no scatter/gather) * - it is ethernet (mac_header properly set) */ static int check_ethip_packet(struct sk_buff *skb, struct usbnet *dev) { skb_reset_mac_header(skb); /* ethernet header */ if (skb_is_nonlinear(skb)) { netdev_err(dev->net, "Non linear buffer-dropping\n"); return 0; } if (!pskb_may_pull(skb, ETH_HLEN)) return 0; skb->protocol = eth_hdr(skb)->h_proto; return 1; } static const u8 *save16bit(struct param *p, const u8 *datap) { p->is_present = 1; p->word = get_unaligned_be16(datap); return datap + sizeof(p->word); } static const u8 *save8bit(struct param *p, const u8 *datap) { p->is_present = 1; p->byte = *datap; return datap + sizeof(p->byte); } /*----------------------------------------------------------------------------* * BEGIN HIP * *----------------------------------------------------------------------------*/ /* HIP header */ #define SIERRA_NET_HIP_HDR_LEN 4 /* Extended HIP header */ #define SIERRA_NET_HIP_EXT_HDR_LEN 6 struct hip_hdr { int hdrlen; struct param payload_len; struct param msgid; struct param msgspecific; struct param extmsgid; }; static int parse_hip(const u8 *buf, const u32 buflen, struct hip_hdr *hh) { const u8 *curp = buf; int padded; if (buflen < SIERRA_NET_HIP_HDR_LEN) return -EPROTO; curp = save16bit(&hh->payload_len, curp); curp = save8bit(&hh->msgid, curp); curp = save8bit(&hh->msgspecific, curp); padded = hh->msgid.byte & 0x80; hh->msgid.byte &= 0x7F; /* 7 bits */ hh->extmsgid.is_present = (hh->msgid.byte == SIERRA_NET_HIP_EXTENDEDID); if (hh->extmsgid.is_present) { if (buflen < SIERRA_NET_HIP_EXT_HDR_LEN) return -EPROTO; hh->payload_len.word &= 0x3FFF; /* 14 bits */ curp = save16bit(&hh->extmsgid, curp); hh->extmsgid.word &= 0x03FF; /* 10 bits */ hh->hdrlen = SIERRA_NET_HIP_EXT_HDR_LEN; } else { hh->payload_len.word &= 0x07FF; /* 11 bits */ hh->hdrlen = SIERRA_NET_HIP_HDR_LEN; } if (padded) { hh->hdrlen++; hh->payload_len.word--; } /* if real packet shorter than the claimed length */ if (buflen < (hh->hdrlen + hh->payload_len.word)) return -EINVAL; return 0; } static void build_hip(u8 *buf, const u16 payloadlen, struct sierra_net_data *priv) { /* the following doesn't have the full functionality. We * currently build only one kind of header, so it is faster this way */ put_unaligned_be16(payloadlen, buf); memcpy(buf+2, priv->tx_hdr_template, sizeof(priv->tx_hdr_template)); } /*----------------------------------------------------------------------------* * END HIP * *----------------------------------------------------------------------------*/ static int sierra_net_send_cmd(struct usbnet *dev, u8 *cmd, int cmdlen, const char * cmd_name) { struct sierra_net_data *priv = sierra_net_get_private(dev); int status; status = usbnet_write_cmd(dev, USB_CDC_SEND_ENCAPSULATED_COMMAND, USB_DIR_OUT|USB_TYPE_CLASS|USB_RECIP_INTERFACE, 0, priv->ifnum, cmd, cmdlen); if (status != cmdlen && status != -ENODEV) netdev_err(dev->net, "Submit %s failed %d\n", cmd_name, status); return status; } static int sierra_net_send_sync(struct usbnet *dev) { int status; struct sierra_net_data *priv = sierra_net_get_private(dev); dev_dbg(&dev->udev->dev, "%s", __func__); status = sierra_net_send_cmd(dev, priv->sync_msg, sizeof(priv->sync_msg), "SYNC"); return status; } static void sierra_net_set_ctx_index(struct sierra_net_data *priv, u8 ctx_ix) { dev_dbg(&(priv->usbnet->udev->dev), "%s %d", __func__, ctx_ix); priv->tx_hdr_template[0] = 0x3F; priv->tx_hdr_template[1] = ctx_ix; *((__be16 *)&priv->tx_hdr_template[2]) = cpu_to_be16(SIERRA_NET_HIP_EXT_IP_OUT_ID); } static int sierra_net_parse_lsi(struct usbnet *dev, char *data, int datalen) { struct lsi_umts *lsi = (struct lsi_umts *)data; u32 expected_length; if (datalen < sizeof(struct lsi_umts_single)) { netdev_err(dev->net, "%s: Data length %d, exp >= %zu\n", __func__, datalen, sizeof(struct lsi_umts_single)); return -1; } /* Validate the session state */ if (lsi->session_state == SIERRA_NET_SESSION_IDLE) { netdev_err(dev->net, "Session idle, 0x%02x\n", lsi->session_state); return 0; } /* Validate the protocol - only support UMTS for now */ if (lsi->protocol == SIERRA_NET_PROTOCOL_UMTS) { struct lsi_umts_single *single = (struct lsi_umts_single *)lsi; /* Validate the link type */ if (single->link_type != SIERRA_NET_AS_LINK_TYPE_IPV4 && single->link_type != SIERRA_NET_AS_LINK_TYPE_IPV6) { netdev_err(dev->net, "Link type unsupported: 0x%02x\n", single->link_type); return -1; } expected_length = SIERRA_NET_LSI_UMTS_STATUS_LEN; } else if (lsi->protocol == SIERRA_NET_PROTOCOL_UMTS_DS) { expected_length = SIERRA_NET_LSI_UMTS_DS_STATUS_LEN; } else { netdev_err(dev->net, "Protocol unsupported, 0x%02x\n", lsi->protocol); return -1; } if (be16_to_cpu(lsi->length) != expected_length) { netdev_err(dev->net, "%s: LSI_UMTS_STATUS_LEN %d, exp %u\n", __func__, be16_to_cpu(lsi->length), expected_length); return -1; } /* Validate the coverage */ if (lsi->coverage == SIERRA_NET_COVERAGE_NONE || lsi->coverage == SIERRA_NET_COVERAGE_NOPACKET) { netdev_err(dev->net, "No coverage, 0x%02x\n", lsi->coverage); return 0; } /* Set link_sense true */ return 1; } static void sierra_net_handle_lsi(struct usbnet *dev, char *data, struct hip_hdr *hh) { struct sierra_net_data *priv = sierra_net_get_private(dev); int link_up; link_up = sierra_net_parse_lsi(dev, data + hh->hdrlen, hh->payload_len.word); if (link_up < 0) { netdev_err(dev->net, "Invalid LSI\n"); return; } if (link_up) { sierra_net_set_ctx_index(priv, hh->msgspecific.byte); priv->link_up = 1; } else { priv->link_up = 0; } usbnet_link_change(dev, link_up, 0); } static void sierra_net_dosync(struct usbnet *dev) { int status; struct sierra_net_data *priv = sierra_net_get_private(dev); dev_dbg(&dev->udev->dev, "%s", __func__); /* The SIERRA_NET_HIP_MSYNC_ID command appears to request that the * firmware restart itself. After restarting, the modem will respond * with the SIERRA_NET_HIP_RESTART_ID indication. The driver continues * sending MSYNC commands every few seconds until it receives the * RESTART event from the firmware */ /* tell modem we are ready */ status = sierra_net_send_sync(dev); if (status < 0) netdev_err(dev->net, "Send SYNC failed, status %d\n", status); status = sierra_net_send_sync(dev); if (status < 0) netdev_err(dev->net, "Send SYNC failed, status %d\n", status); /* Now, start a timer and make sure we get the Restart Indication */ priv->sync_timer.expires = jiffies + SIERRA_NET_SYNCDELAY; add_timer(&priv->sync_timer); } static void sierra_net_kevent(struct work_struct *work) { struct sierra_net_data *priv = container_of(work, struct sierra_net_data, sierra_net_kevent); struct usbnet *dev = priv->usbnet; int len; int err; u8 *buf; u8 ifnum; if (test_bit(SIERRA_NET_EVENT_RESP_AVAIL, &priv->kevent_flags)) { clear_bit(SIERRA_NET_EVENT_RESP_AVAIL, &priv->kevent_flags); /* Query the modem for the LSI message */ buf = kzalloc(SIERRA_NET_USBCTL_BUF_LEN, GFP_KERNEL); if (!buf) return; ifnum = priv->ifnum; len = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0), USB_CDC_GET_ENCAPSULATED_RESPONSE, USB_DIR_IN|USB_TYPE_CLASS|USB_RECIP_INTERFACE, 0, ifnum, buf, SIERRA_NET_USBCTL_BUF_LEN, USB_CTRL_SET_TIMEOUT); if (len < 0) { netdev_err(dev->net, "usb_control_msg failed, status %d\n", len); } else { struct hip_hdr hh; dev_dbg(&dev->udev->dev, "%s: Received status message," " %04x bytes", __func__, len); err = parse_hip(buf, len, &hh); if (err) { netdev_err(dev->net, "%s: Bad packet," " parse result %d\n", __func__, err); kfree(buf); return; } /* Validate packet length */ if (len != hh.hdrlen + hh.payload_len.word) { netdev_err(dev->net, "%s: Bad packet, received" " %d, expected %d\n", __func__, len, hh.hdrlen + hh.payload_len.word); kfree(buf); return; } /* Switch on received message types */ switch (hh.msgid.byte) { case SIERRA_NET_HIP_LSI_UMTSID: dev_dbg(&dev->udev->dev, "LSI for ctx:%d", hh.msgspecific.byte); sierra_net_handle_lsi(dev, buf, &hh); break; case SIERRA_NET_HIP_RESTART_ID: dev_dbg(&dev->udev->dev, "Restart reported: %d," " stopping sync timer", hh.msgspecific.byte); /* Got sync resp - stop timer & clear mask */ del_timer_sync(&priv->sync_timer); clear_bit(SIERRA_NET_TIMER_EXPIRY, &priv->kevent_flags); break; case SIERRA_NET_HIP_HSYNC_ID: dev_dbg(&dev->udev->dev, "SYNC received"); err = sierra_net_send_sync(dev); if (err < 0) netdev_err(dev->net, "Send SYNC failed %d\n", err); break; case SIERRA_NET_HIP_EXTENDEDID: netdev_err(dev->net, "Unrecognized HIP msg, " "extmsgid 0x%04x\n", hh.extmsgid.word); break; case SIERRA_NET_HIP_RCGI: /* Ignored */ break; default: netdev_err(dev->net, "Unrecognized HIP msg, " "msgid 0x%02x\n", hh.msgid.byte); break; } } kfree(buf); } /* The sync timer bit might be set */ if (test_bit(SIERRA_NET_TIMER_EXPIRY, &priv->kevent_flags)) { clear_bit(SIERRA_NET_TIMER_EXPIRY, &priv->kevent_flags); dev_dbg(&dev->udev->dev, "Deferred sync timer expiry"); sierra_net_dosync(priv->usbnet); } if (priv->kevent_flags) dev_dbg(&dev->udev->dev, "sierra_net_kevent done, " "kevent_flags = 0x%lx", priv->kevent_flags); } static void sierra_net_defer_kevent(struct usbnet *dev, int work) { struct sierra_net_data *priv = sierra_net_get_private(dev); set_bit(work, &priv->kevent_flags); schedule_work(&priv->sierra_net_kevent); } /* * Sync Retransmit Timer Handler. On expiry, kick the work queue */ static void sierra_sync_timer(struct timer_list *t) { struct sierra_net_data *priv = from_timer(priv, t, sync_timer); struct usbnet *dev = priv->usbnet; dev_dbg(&dev->udev->dev, "%s", __func__); /* Kick the tasklet */ sierra_net_defer_kevent(dev, SIERRA_NET_TIMER_EXPIRY); } static void sierra_net_status(struct usbnet *dev, struct urb *urb) { struct usb_cdc_notification *event; dev_dbg(&dev->udev->dev, "%s", __func__); if (urb->actual_length < sizeof *event) return; /* Add cases to handle other standard notifications. */ event = urb->transfer_buffer; switch (event->bNotificationType) { case USB_CDC_NOTIFY_NETWORK_CONNECTION: case USB_CDC_NOTIFY_SPEED_CHANGE: /* USB 305 sends those */ break; case USB_CDC_NOTIFY_RESPONSE_AVAILABLE: sierra_net_defer_kevent(dev, SIERRA_NET_EVENT_RESP_AVAIL); break; default: netdev_err(dev->net, ": unexpected notification %02x!\n", event->bNotificationType); break; } } static void sierra_net_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *info) { /* Inherit standard device info */ usbnet_get_drvinfo(net, info); strscpy(info->driver, driver_name, sizeof(info->driver)); strscpy(info->version, DRIVER_VERSION, sizeof(info->version)); } static u32 sierra_net_get_link(struct net_device *net) { struct usbnet *dev = netdev_priv(net); /* Report link is down whenever the interface is down */ return sierra_net_get_private(dev)->link_up && netif_running(net); } static const struct ethtool_ops sierra_net_ethtool_ops = { .get_drvinfo = sierra_net_get_drvinfo, .get_link = sierra_net_get_link, .get_msglevel = usbnet_get_msglevel, .set_msglevel = usbnet_set_msglevel, .nway_reset = usbnet_nway_reset, .get_link_ksettings = usbnet_get_link_ksettings_mii, .set_link_ksettings = usbnet_set_link_ksettings_mii, }; static int sierra_net_get_fw_attr(struct usbnet *dev, u16 *datap) { int result = 0; __le16 attrdata; result = usbnet_read_cmd(dev, /* _u8 vendor specific request */ SWI_USB_REQUEST_GET_FW_ATTR, USB_DIR_IN | USB_TYPE_VENDOR, /* __u8 request type */ 0x0000, /* __u16 value not used */ 0x0000, /* __u16 index not used */ &attrdata, /* char *data */ sizeof(attrdata) /* __u16 size */ ); if (result < 0) return -EIO; *datap = le16_to_cpu(attrdata); return result; } /* * collects the bulk endpoints, the status endpoint. */ static int sierra_net_bind(struct usbnet *dev, struct usb_interface *intf) { u8 ifacenum; u8 numendpoints; u16 fwattr = 0; int status; struct sierra_net_data *priv; static const u8 sync_tmplate[sizeof(priv->sync_msg)] = { 0x00, 0x00, SIERRA_NET_HIP_MSYNC_ID, 0x00}; static const u8 shdwn_tmplate[sizeof(priv->shdwn_msg)] = { 0x00, 0x00, SIERRA_NET_HIP_SHUTD_ID, 0x00}; u8 mod[2]; dev_dbg(&dev->udev->dev, "%s", __func__); ifacenum = intf->cur_altsetting->desc.bInterfaceNumber; numendpoints = intf->cur_altsetting->desc.bNumEndpoints; /* We have three endpoints, bulk in and out, and a status */ if (numendpoints != 3) { dev_err(&dev->udev->dev, "Expected 3 endpoints, found: %d", numendpoints); return -ENODEV; } /* Status endpoint set in usbnet_get_endpoints() */ dev->status = NULL; status = usbnet_get_endpoints(dev, intf); if (status < 0) { dev_err(&dev->udev->dev, "Error in usbnet_get_endpoints (%d)", status); return -ENODEV; } /* Initialize sierra private data */ priv = kzalloc(sizeof *priv, GFP_KERNEL); if (!priv) return -ENOMEM; priv->usbnet = dev; priv->ifnum = ifacenum; dev->net->netdev_ops = &sierra_net_device_ops; /* change MAC addr to include, ifacenum, and to be unique */ mod[0] = atomic_inc_return(&iface_counter); mod[1] = ifacenum; dev_addr_mod(dev->net, ETH_ALEN - 2, mod, 2); /* prepare shutdown message template */ memcpy(priv->shdwn_msg, shdwn_tmplate, sizeof(priv->shdwn_msg)); /* set context index initially to 0 - prepares tx hdr template */ sierra_net_set_ctx_index(priv, 0); /* prepare sync message template */ memcpy(priv->sync_msg, sync_tmplate, sizeof(priv->sync_msg)); /* decrease the rx_urb_size and max_tx_size to 4k on USB 1.1 */ dev->rx_urb_size = SIERRA_NET_RX_URB_SIZE; if (dev->udev->speed != USB_SPEED_HIGH) dev->rx_urb_size = min_t(size_t, 4096, SIERRA_NET_RX_URB_SIZE); dev->net->hard_header_len += SIERRA_NET_HIP_EXT_HDR_LEN; dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len; dev->net->max_mtu = SIERRA_NET_MAX_SUPPORTED_MTU; /* Set up the netdev */ dev->net->flags |= IFF_NOARP; dev->net->ethtool_ops = &sierra_net_ethtool_ops; netif_carrier_off(dev->net); sierra_net_set_private(dev, priv); priv->kevent_flags = 0; /* Use the shared workqueue */ INIT_WORK(&priv->sierra_net_kevent, sierra_net_kevent); /* Only need to do this once */ timer_setup(&priv->sync_timer, sierra_sync_timer, 0); /* verify fw attributes */ status = sierra_net_get_fw_attr(dev, &fwattr); dev_dbg(&dev->udev->dev, "Fw attr: %x\n", fwattr); /* test whether firmware supports DHCP */ if (!(status == sizeof(fwattr) && (fwattr & SWI_GET_FW_ATTR_MASK))) { /* found incompatible firmware version */ dev_err(&dev->udev->dev, "Incompatible driver and firmware" " versions\n"); kfree(priv); return -ENODEV; } return 0; } static void sierra_net_unbind(struct usbnet *dev, struct usb_interface *intf) { int status; struct sierra_net_data *priv = sierra_net_get_private(dev); dev_dbg(&dev->udev->dev, "%s", __func__); /* kill the timer and work */ timer_shutdown_sync(&priv->sync_timer); cancel_work_sync(&priv->sierra_net_kevent); /* tell modem we are going away */ status = sierra_net_send_cmd(dev, priv->shdwn_msg, sizeof(priv->shdwn_msg), "Shutdown"); if (status < 0) netdev_err(dev->net, "usb_control_msg failed, status %d\n", status); usbnet_status_stop(dev); sierra_net_set_private(dev, NULL); kfree(priv); } static struct sk_buff *sierra_net_skb_clone(struct usbnet *dev, struct sk_buff *skb, int len) { struct sk_buff *new_skb; /* clone skb */ new_skb = skb_clone(skb, GFP_ATOMIC); /* remove len bytes from original */ skb_pull(skb, len); /* trim next packet to it's length */ if (new_skb) { skb_trim(new_skb, len); } else { if (netif_msg_rx_err(dev)) netdev_err(dev->net, "failed to get skb\n"); dev->net->stats.rx_dropped++; } return new_skb; } /* ---------------------------- Receive data path ----------------------*/ static int sierra_net_rx_fixup(struct usbnet *dev, struct sk_buff *skb) { int err; struct hip_hdr hh; struct sk_buff *new_skb; dev_dbg(&dev->udev->dev, "%s", __func__); /* could contain multiple packets */ while (likely(skb->len)) { err = parse_hip(skb->data, skb->len, &hh); if (err) { if (netif_msg_rx_err(dev)) netdev_err(dev->net, "Invalid HIP header %d\n", err); /* dev->net->stats.rx_errors incremented by caller */ dev->net->stats.rx_length_errors++; return 0; } /* Validate Extended HIP header */ if (!hh.extmsgid.is_present || hh.extmsgid.word != SIERRA_NET_HIP_EXT_IP_IN_ID) { if (netif_msg_rx_err(dev)) netdev_err(dev->net, "HIP/ETH: Invalid pkt\n"); dev->net->stats.rx_frame_errors++; /* dev->net->stats.rx_errors incremented by caller */ return 0; } skb_pull(skb, hh.hdrlen); /* We are going to accept this packet, prepare it. * In case protocol is IPv6, keep it, otherwise force IPv4. */ skb_reset_mac_header(skb); if (eth_hdr(skb)->h_proto != cpu_to_be16(ETH_P_IPV6)) eth_hdr(skb)->h_proto = cpu_to_be16(ETH_P_IP); eth_zero_addr(eth_hdr(skb)->h_source); memcpy(eth_hdr(skb)->h_dest, dev->net->dev_addr, ETH_ALEN); /* Last packet in batch handled by usbnet */ if (hh.payload_len.word == skb->len) return 1; new_skb = sierra_net_skb_clone(dev, skb, hh.payload_len.word); if (new_skb) usbnet_skb_return(dev, new_skb); } /* while */ return 0; } /* ---------------------------- Transmit data path ----------------------*/ static struct sk_buff *sierra_net_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags) { struct sierra_net_data *priv = sierra_net_get_private(dev); u16 len; bool need_tail; BUILD_BUG_ON(sizeof_field(struct usbnet, data) < sizeof(struct cdc_state)); dev_dbg(&dev->udev->dev, "%s", __func__); if (priv->link_up && check_ethip_packet(skb, dev) && is_ip(skb)) { /* enough head room as is? */ if (SIERRA_NET_HIP_EXT_HDR_LEN <= skb_headroom(skb)) { /* Save the Eth/IP length and set up HIP hdr */ len = skb->len; skb_push(skb, SIERRA_NET_HIP_EXT_HDR_LEN); /* Handle ZLP issue */ need_tail = ((len + SIERRA_NET_HIP_EXT_HDR_LEN) % dev->maxpacket == 0); if (need_tail) { if (unlikely(skb_tailroom(skb) == 0)) { netdev_err(dev->net, "tx_fixup:" "no room for packet\n"); dev_kfree_skb_any(skb); return NULL; } else { skb->data[skb->len] = 0; __skb_put(skb, 1); len = len + 1; } } build_hip(skb->data, len, priv); return skb; } else { /* * compensate in the future if necessary */ netdev_err(dev->net, "tx_fixup: no room for HIP\n"); } /* headroom */ } if (!priv->link_up) dev->net->stats.tx_carrier_errors++; /* tx_dropped incremented by usbnet */ /* filter the packet out, release it */ dev_kfree_skb_any(skb); return NULL; } static const struct driver_info sierra_net_info_direct_ip = { .description = "Sierra Wireless USB-to-WWAN Modem", .flags = FLAG_WWAN | FLAG_SEND_ZLP, .bind = sierra_net_bind, .unbind = sierra_net_unbind, .status = sierra_net_status, .rx_fixup = sierra_net_rx_fixup, .tx_fixup = sierra_net_tx_fixup, }; static int sierra_net_probe(struct usb_interface *udev, const struct usb_device_id *prod) { int ret; ret = usbnet_probe(udev, prod); if (ret == 0) { struct usbnet *dev = usb_get_intfdata(udev); ret = usbnet_status_start(dev, GFP_KERNEL); if (ret == 0) { /* Interrupt URB now set up; initiate sync sequence */ sierra_net_dosync(dev); } } return ret; } #define DIRECT_IP_DEVICE(vend, prod) \ {USB_DEVICE_INTERFACE_NUMBER(vend, prod, 7), \ .driver_info = (unsigned long)&sierra_net_info_direct_ip}, \ {USB_DEVICE_INTERFACE_NUMBER(vend, prod, 10), \ .driver_info = (unsigned long)&sierra_net_info_direct_ip}, \ {USB_DEVICE_INTERFACE_NUMBER(vend, prod, 11), \ .driver_info = (unsigned long)&sierra_net_info_direct_ip} static const struct usb_device_id products[] = { DIRECT_IP_DEVICE(0x1199, 0x68A3), /* Sierra Wireless USB-to-WWAN modem */ DIRECT_IP_DEVICE(0x0F3D, 0x68A3), /* AT&T Direct IP modem */ DIRECT_IP_DEVICE(0x1199, 0x68AA), /* Sierra Wireless Direct IP LTE modem */ DIRECT_IP_DEVICE(0x0F3D, 0x68AA), /* AT&T Direct IP LTE modem */ {}, /* last item */ }; MODULE_DEVICE_TABLE(usb, products); /* We are based on usbnet, so let it handle the USB driver specifics */ static struct usb_driver sierra_net_driver = { .name = "sierra_net", .id_table = products, .probe = sierra_net_probe, .disconnect = usbnet_disconnect, .suspend = usbnet_suspend, .resume = usbnet_resume, .no_dynamic_id = 1, .disable_hub_initiated_lpm = 1, }; module_usb_driver(sierra_net_driver); MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_VERSION(DRIVER_VERSION); MODULE_LICENSE("GPL");
40 8008 8015 42877 224 5388 8042 8010 42018 481 42006 7198 40415 2 4353 39124 5 39108 16238 16238 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 /* SPDX-License-Identifier: GPL-2.0 */ /* * Variant of atomic_t specialized for reference counts. * * The interface matches the atomic_t interface (to aid in porting) but only * provides the few functions one should use for reference counting. * * Saturation semantics * ==================== * * refcount_t differs from atomic_t in that the counter saturates at * REFCOUNT_SATURATED and will not move once there. This avoids wrapping the * counter and causing 'spurious' use-after-free issues. In order to avoid the * cost associated with introducing cmpxchg() loops into all of the saturating * operations, we temporarily allow the counter to take on an unchecked value * and then explicitly set it to REFCOUNT_SATURATED on detecting that underflow * or overflow has occurred. Although this is racy when multiple threads * access the refcount concurrently, by placing REFCOUNT_SATURATED roughly * equidistant from 0 and INT_MAX we minimise the scope for error: * * INT_MAX REFCOUNT_SATURATED UINT_MAX * 0 (0x7fff_ffff) (0xc000_0000) (0xffff_ffff) * +--------------------------------+----------------+----------------+ * <---------- bad value! ----------> * * (in a signed view of the world, the "bad value" range corresponds to * a negative counter value). * * As an example, consider a refcount_inc() operation that causes the counter * to overflow: * * int old = atomic_fetch_add_relaxed(r); * // old is INT_MAX, refcount now INT_MIN (0x8000_0000) * if (old < 0) * atomic_set(r, REFCOUNT_SATURATED); * * If another thread also performs a refcount_inc() operation between the two * atomic operations, then the count will continue to edge closer to 0. If it * reaches a value of 1 before /any/ of the threads reset it to the saturated * value, then a concurrent refcount_dec_and_test() may erroneously free the * underlying object. * Linux limits the maximum number of tasks to PID_MAX_LIMIT, which is currently * 0x400000 (and can't easily be raised in the future beyond FUTEX_TID_MASK). * With the current PID limit, if no batched refcounting operations are used and * the attacker can't repeatedly trigger kernel oopses in the middle of refcount * operations, this makes it impossible for a saturated refcount to leave the * saturation range, even if it is possible for multiple uses of the same * refcount to nest in the context of a single task: * * (UINT_MAX+1-REFCOUNT_SATURATED) / PID_MAX_LIMIT = * 0x40000000 / 0x400000 = 0x100 = 256 * * If hundreds of references are added/removed with a single refcounting * operation, it may potentially be possible to leave the saturation range; but * given the precise timing details involved with the round-robin scheduling of * each thread manipulating the refcount and the need to hit the race multiple * times in succession, there doesn't appear to be a practical avenue of attack * even if using refcount_add() operations with larger increments. * * Memory ordering * =============== * * Memory ordering rules are slightly relaxed wrt regular atomic_t functions * and provide only what is strictly required for refcounts. * * The increments are fully relaxed; these will not provide ordering. The * rationale is that whatever is used to obtain the object we're increasing the * reference count on will provide the ordering. For locked data structures, * its the lock acquire, for RCU/lockless data structures its the dependent * load. * * Do note that inc_not_zero() provides a control dependency which will order * future stores against the inc, this ensures we'll never modify the object * if we did not in fact acquire a reference. * * The decrements will provide release order, such that all the prior loads and * stores will be issued before, it also provides a control dependency, which * will order us against the subsequent free(). * * The control dependency is against the load of the cmpxchg (ll/sc) that * succeeded. This means the stores aren't fully ordered, but this is fine * because the 1->0 transition indicates no concurrency. * * Note that the allocator is responsible for ordering things between free() * and alloc(). * * The decrements dec_and_test() and sub_and_test() also provide acquire * ordering on success. * */ #ifndef _LINUX_REFCOUNT_H #define _LINUX_REFCOUNT_H #include <linux/atomic.h> #include <linux/bug.h> #include <linux/compiler.h> #include <linux/limits.h> #include <linux/refcount_types.h> #include <linux/spinlock_types.h> struct mutex; #define REFCOUNT_INIT(n) { .refs = ATOMIC_INIT(n), } #define REFCOUNT_MAX INT_MAX #define REFCOUNT_SATURATED (INT_MIN / 2) enum refcount_saturation_type { REFCOUNT_ADD_NOT_ZERO_OVF, REFCOUNT_ADD_OVF, REFCOUNT_ADD_UAF, REFCOUNT_SUB_UAF, REFCOUNT_DEC_LEAK, }; void refcount_warn_saturate(refcount_t *r, enum refcount_saturation_type t); /** * refcount_set - set a refcount's value * @r: the refcount * @n: value to which the refcount will be set */ static inline void refcount_set(refcount_t *r, int n) { atomic_set(&r->refs, n); } /** * refcount_read - get a refcount's value * @r: the refcount * * Return: the refcount's value */ static inline unsigned int refcount_read(const refcount_t *r) { return atomic_read(&r->refs); } static inline __must_check __signed_wrap bool __refcount_add_not_zero(int i, refcount_t *r, int *oldp) { int old = refcount_read(r); do { if (!old) break; } while (!atomic_try_cmpxchg_relaxed(&r->refs, &old, old + i)); if (oldp) *oldp = old; if (unlikely(old < 0 || old + i < 0)) refcount_warn_saturate(r, REFCOUNT_ADD_NOT_ZERO_OVF); return old; } /** * refcount_add_not_zero - add a value to a refcount unless it is 0 * @i: the value to add to the refcount * @r: the refcount * * Will saturate at REFCOUNT_SATURATED and WARN. * * Provides no memory ordering, it is assumed the caller has guaranteed the * object memory to be stable (RCU, etc.). It does provide a control dependency * and thereby orders future stores. See the comment on top. * * Use of this function is not recommended for the normal reference counting * use case in which references are taken and released one at a time. In these * cases, refcount_inc(), or one of its variants, should instead be used to * increment a reference count. * * Return: false if the passed refcount is 0, true otherwise */ static inline __must_check bool refcount_add_not_zero(int i, refcount_t *r) { return __refcount_add_not_zero(i, r, NULL); } static inline __signed_wrap void __refcount_add(int i, refcount_t *r, int *oldp) { int old = atomic_fetch_add_relaxed(i, &r->refs); if (oldp) *oldp = old; if (unlikely(!old)) refcount_warn_saturate(r, REFCOUNT_ADD_UAF); else if (unlikely(old < 0 || old + i < 0)) refcount_warn_saturate(r, REFCOUNT_ADD_OVF); } /** * refcount_add - add a value to a refcount * @i: the value to add to the refcount * @r: the refcount * * Similar to atomic_add(), but will saturate at REFCOUNT_SATURATED and WARN. * * Provides no memory ordering, it is assumed the caller has guaranteed the * object memory to be stable (RCU, etc.). It does provide a control dependency * and thereby orders future stores. See the comment on top. * * Use of this function is not recommended for the normal reference counting * use case in which references are taken and released one at a time. In these * cases, refcount_inc(), or one of its variants, should instead be used to * increment a reference count. */ static inline void refcount_add(int i, refcount_t *r) { __refcount_add(i, r, NULL); } static inline __must_check bool __refcount_inc_not_zero(refcount_t *r, int *oldp) { return __refcount_add_not_zero(1, r, oldp); } /** * refcount_inc_not_zero - increment a refcount unless it is 0 * @r: the refcount to increment * * Similar to atomic_inc_not_zero(), but will saturate at REFCOUNT_SATURATED * and WARN. * * Provides no memory ordering, it is assumed the caller has guaranteed the * object memory to be stable (RCU, etc.). It does provide a control dependency * and thereby orders future stores. See the comment on top. * * Return: true if the increment was successful, false otherwise */ static inline __must_check bool refcount_inc_not_zero(refcount_t *r) { return __refcount_inc_not_zero(r, NULL); } static inline void __refcount_inc(refcount_t *r, int *oldp) { __refcount_add(1, r, oldp); } /** * refcount_inc - increment a refcount * @r: the refcount to increment * * Similar to atomic_inc(), but will saturate at REFCOUNT_SATURATED and WARN. * * Provides no memory ordering, it is assumed the caller already has a * reference on the object. * * Will WARN if the refcount is 0, as this represents a possible use-after-free * condition. */ static inline void refcount_inc(refcount_t *r) { __refcount_inc(r, NULL); } static inline __must_check __signed_wrap bool __refcount_sub_and_test(int i, refcount_t *r, int *oldp) { int old = atomic_fetch_sub_release(i, &r->refs); if (oldp) *oldp = old; if (old > 0 && old == i) { smp_acquire__after_ctrl_dep(); return true; } if (unlikely(old <= 0 || old - i < 0)) refcount_warn_saturate(r, REFCOUNT_SUB_UAF); return false; } /** * refcount_sub_and_test - subtract from a refcount and test if it is 0 * @i: amount to subtract from the refcount * @r: the refcount * * Similar to atomic_dec_and_test(), but it will WARN, return false and * ultimately leak on underflow and will fail to decrement when saturated * at REFCOUNT_SATURATED. * * Provides release memory ordering, such that prior loads and stores are done * before, and provides an acquire ordering on success such that free() * must come after. * * Use of this function is not recommended for the normal reference counting * use case in which references are taken and released one at a time. In these * cases, refcount_dec(), or one of its variants, should instead be used to * decrement a reference count. * * Return: true if the resulting refcount is 0, false otherwise */ static inline __must_check bool refcount_sub_and_test(int i, refcount_t *r) { return __refcount_sub_and_test(i, r, NULL); } static inline __must_check bool __refcount_dec_and_test(refcount_t *r, int *oldp) { return __refcount_sub_and_test(1, r, oldp); } /** * refcount_dec_and_test - decrement a refcount and test if it is 0 * @r: the refcount * * Similar to atomic_dec_and_test(), it will WARN on underflow and fail to * decrement when saturated at REFCOUNT_SATURATED. * * Provides release memory ordering, such that prior loads and stores are done * before, and provides an acquire ordering on success such that free() * must come after. * * Return: true if the resulting refcount is 0, false otherwise */ static inline __must_check bool refcount_dec_and_test(refcount_t *r) { return __refcount_dec_and_test(r, NULL); } static inline void __refcount_dec(refcount_t *r, int *oldp) { int old = atomic_fetch_sub_release(1, &r->refs); if (oldp) *oldp = old; if (unlikely(old <= 1)) refcount_warn_saturate(r, REFCOUNT_DEC_LEAK); } /** * refcount_dec - decrement a refcount * @r: the refcount * * Similar to atomic_dec(), it will WARN on underflow and fail to decrement * when saturated at REFCOUNT_SATURATED. * * Provides release memory ordering, such that prior loads and stores are done * before. */ static inline void refcount_dec(refcount_t *r) { __refcount_dec(r, NULL); } extern __must_check bool refcount_dec_if_one(refcount_t *r); extern __must_check bool refcount_dec_not_one(refcount_t *r); extern __must_check bool refcount_dec_and_mutex_lock(refcount_t *r, struct mutex *lock) __cond_acquires(lock); extern __must_check bool refcount_dec_and_lock(refcount_t *r, spinlock_t *lock) __cond_acquires(lock); extern __must_check bool refcount_dec_and_lock_irqsave(refcount_t *r, spinlock_t *lock, unsigned long *flags) __cond_acquires(lock); #endif /* _LINUX_REFCOUNT_H */
36 8 45 44 4 3 1 2 5 45 6 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 /* SPDX-License-Identifier: GPL-2.0 */ #include <linux/usb.h> #include <linux/usb/hcd.h> #include "usb.h" static int uas_is_interface(struct usb_host_interface *intf) { return (intf->desc.bInterfaceClass == USB_CLASS_MASS_STORAGE && intf->desc.bInterfaceSubClass == USB_SC_SCSI && intf->desc.bInterfaceProtocol == USB_PR_UAS); } static struct usb_host_interface *uas_find_uas_alt_setting( struct usb_interface *intf) { int i; for (i = 0; i < intf->num_altsetting; i++) { struct usb_host_interface *alt = &intf->altsetting[i]; if (uas_is_interface(alt)) return alt; } return NULL; } static int uas_find_endpoints(struct usb_host_interface *alt, struct usb_host_endpoint *eps[]) { struct usb_host_endpoint *endpoint = alt->endpoint; unsigned i, n_endpoints = alt->desc.bNumEndpoints; for (i = 0; i < n_endpoints; i++) { unsigned char *extra = endpoint[i].extra; int len = endpoint[i].extralen; while (len >= 3) { if (extra[1] == USB_DT_PIPE_USAGE) { unsigned pipe_id = extra[2]; if (pipe_id > 0 && pipe_id < 5) eps[pipe_id - 1] = &endpoint[i]; break; } len -= extra[0]; extra += extra[0]; } } if (!eps[0] || !eps[1] || !eps[2] || !eps[3]) return -ENODEV; return 0; } static int uas_use_uas_driver(struct usb_interface *intf, const struct usb_device_id *id, u64 *flags_ret) { struct usb_host_endpoint *eps[4] = { }; struct usb_device *udev = interface_to_usbdev(intf); struct usb_hcd *hcd = bus_to_hcd(udev->bus); u64 flags = id->driver_info; struct usb_host_interface *alt; int r; alt = uas_find_uas_alt_setting(intf); if (!alt) return 0; r = uas_find_endpoints(alt, eps); if (r < 0) return 0; /* * ASMedia has a number of usb3 to sata bridge chips, at the time of * this writing the following versions exist: * ASM1051 - no uas support version * ASM1051 - with broken (*) uas support * ASM1053 - with working uas support, but problems with large xfers * ASM1153 - with working uas support * * Devices with these chips re-use a number of device-ids over the * entire line, so the device-id is useless to determine if we're * dealing with an ASM1051 (which we want to avoid). * * The ASM1153 can be identified by config.MaxPower == 0, * where as the ASM105x models have config.MaxPower == 36. * * Differentiating between the ASM1053 and ASM1051 is trickier, when * connected over USB-3 we can look at the number of streams supported, * ASM1051 supports 32 streams, where as early ASM1053 versions support * 16 streams, newer ASM1053-s also support 32 streams, but have a * different prod-id. * * (*) ASM1051 chips do work with UAS with some disks (with the * US_FL_NO_REPORT_OPCODES quirk), but are broken with other disks */ if (le16_to_cpu(udev->descriptor.idVendor) == 0x174c && (le16_to_cpu(udev->descriptor.idProduct) == 0x5106 || le16_to_cpu(udev->descriptor.idProduct) == 0x55aa)) { if (udev->actconfig->desc.bMaxPower == 0) { /* ASM1153, do nothing */ } else if (udev->speed < USB_SPEED_SUPER) { /* No streams info, assume ASM1051 */ flags |= US_FL_IGNORE_UAS; } else if (usb_ss_max_streams(&eps[1]->ss_ep_comp) == 32) { /* Possibly an ASM1051, disable uas */ flags |= US_FL_IGNORE_UAS; } else { /* ASM1053, these have issues with large transfers */ flags |= US_FL_MAX_SECTORS_240; } } /* All Seagate disk enclosures have broken ATA pass-through support */ if (le16_to_cpu(udev->descriptor.idVendor) == 0x0bc2) flags |= US_FL_NO_ATA_1X; /* * RTL9210-based enclosure from HIKSEMI, MD202 reportedly have issues * with UAS. This isn't distinguishable with just idVendor and * idProduct, use manufacturer and product too. * * Reported-by: Hongling Zeng <zenghongling@kylinos.cn> */ if (le16_to_cpu(udev->descriptor.idVendor) == 0x0bda && le16_to_cpu(udev->descriptor.idProduct) == 0x9210 && (udev->manufacturer && !strcmp(udev->manufacturer, "HIKSEMI")) && (udev->product && !strcmp(udev->product, "MD202"))) flags |= US_FL_IGNORE_UAS; usb_stor_adjust_quirks(udev, &flags); if (flags & US_FL_IGNORE_UAS) { dev_warn(&udev->dev, "UAS is ignored for this device, using usb-storage instead\n"); return 0; } if (udev->bus->sg_tablesize == 0) { dev_warn(&udev->dev, "The driver for the USB controller %s does not support scatter-gather which is\n", hcd->driver->description); dev_warn(&udev->dev, "required by the UAS driver. Please try an other USB controller if you wish to use UAS.\n"); return 0; } if (udev->speed >= USB_SPEED_SUPER && !hcd->can_do_streams) { dev_warn(&udev->dev, "USB controller %s does not support streams, which are required by the UAS driver.\n", hcd_to_bus(hcd)->bus_name); dev_warn(&udev->dev, "Please try an other USB controller if you wish to use UAS.\n"); return 0; } if (flags_ret) *flags_ret = flags; return 1; }
1 3 1 63 1 6 57 188 94 5 1 25 1 1 1 1 26 1 3 1 2 1 2 5 5 1 4 4 17 5 22 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 // SPDX-License-Identifier: GPL-2.0-or-later /* * Squashfs - a compressed read only filesystem for Linux * * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008 * Phillip Lougher <phillip@squashfs.org.uk> * * inode.c */ /* * This file implements code to create and read inodes from disk. * * Inodes in Squashfs are identified by a 48-bit inode which encodes the * location of the compressed metadata block containing the inode, and the byte * offset into that block where the inode is placed (<block, offset>). * * To maximise compression there are different inodes for each file type * (regular file, directory, device, etc.), the inode contents and length * varying with the type. * * To further maximise compression, two types of regular file inode and * directory inode are defined: inodes optimised for frequently occurring * regular files and directories, and extended types where extra * information has to be stored. */ #include <linux/fs.h> #include <linux/vfs.h> #include <linux/xattr.h> #include <linux/pagemap.h> #include "squashfs_fs.h" #include "squashfs_fs_sb.h" #include "squashfs_fs_i.h" #include "squashfs.h" #include "xattr.h" /* * Initialise VFS inode with the base inode information common to all * Squashfs inode types. Sqsh_ino contains the unswapped base inode * off disk. */ static int squashfs_new_inode(struct super_block *sb, struct inode *inode, struct squashfs_base_inode *sqsh_ino) { uid_t i_uid; gid_t i_gid; int err; inode->i_ino = le32_to_cpu(sqsh_ino->inode_number); if (inode->i_ino == 0) return -EINVAL; err = squashfs_get_id(sb, le16_to_cpu(sqsh_ino->uid), &i_uid); if (err) return err; err = squashfs_get_id(sb, le16_to_cpu(sqsh_ino->guid), &i_gid); if (err) return err; i_uid_write(inode, i_uid); i_gid_write(inode, i_gid); inode_set_mtime(inode, le32_to_cpu(sqsh_ino->mtime), 0); inode_set_atime(inode, inode_get_mtime_sec(inode), 0); inode_set_ctime(inode, inode_get_mtime_sec(inode), 0); inode->i_mode = le16_to_cpu(sqsh_ino->mode); inode->i_size = 0; return err; } struct inode *squashfs_iget(struct super_block *sb, long long ino, unsigned int ino_number) { struct inode *inode = iget_locked(sb, ino_number); int err; TRACE("Entered squashfs_iget\n"); if (!inode) return ERR_PTR(-ENOMEM); if (!(inode->i_state & I_NEW)) return inode; err = squashfs_read_inode(inode, ino); if (err) { iget_failed(inode); return ERR_PTR(err); } unlock_new_inode(inode); return inode; } /* * Initialise VFS inode by reading inode from inode table (compressed * metadata). The format and amount of data read depends on type. */ int squashfs_read_inode(struct inode *inode, long long ino) { struct super_block *sb = inode->i_sb; struct squashfs_sb_info *msblk = sb->s_fs_info; u64 block = SQUASHFS_INODE_BLK(ino) + msblk->inode_table; int err, type, offset = SQUASHFS_INODE_OFFSET(ino); union squashfs_inode squashfs_ino; struct squashfs_base_inode *sqshb_ino = &squashfs_ino.base; int xattr_id = SQUASHFS_INVALID_XATTR; TRACE("Entered squashfs_read_inode\n"); /* * Read inode base common to all inode types. */ err = squashfs_read_metadata(sb, sqshb_ino, &block, &offset, sizeof(*sqshb_ino)); if (err < 0) goto failed_read; err = squashfs_new_inode(sb, inode, sqshb_ino); if (err) goto failed_read; block = SQUASHFS_INODE_BLK(ino) + msblk->inode_table; offset = SQUASHFS_INODE_OFFSET(ino); type = le16_to_cpu(sqshb_ino->inode_type); switch (type) { case SQUASHFS_REG_TYPE: { unsigned int frag_offset, frag; int frag_size; u64 frag_blk; struct squashfs_reg_inode *sqsh_ino = &squashfs_ino.reg; err = squashfs_read_metadata(sb, sqsh_ino, &block, &offset, sizeof(*sqsh_ino)); if (err < 0) goto failed_read; frag = le32_to_cpu(sqsh_ino->fragment); if (frag != SQUASHFS_INVALID_FRAG) { frag_offset = le32_to_cpu(sqsh_ino->offset); frag_size = squashfs_frag_lookup(sb, frag, &frag_blk); if (frag_size < 0) { err = frag_size; goto failed_read; } } else { frag_blk = SQUASHFS_INVALID_BLK; frag_size = 0; frag_offset = 0; } set_nlink(inode, 1); inode->i_size = le32_to_cpu(sqsh_ino->file_size); inode->i_fop = &generic_ro_fops; inode->i_mode |= S_IFREG; inode->i_blocks = ((inode->i_size - 1) >> 9) + 1; squashfs_i(inode)->fragment_block = frag_blk; squashfs_i(inode)->fragment_size = frag_size; squashfs_i(inode)->fragment_offset = frag_offset; squashfs_i(inode)->start = le32_to_cpu(sqsh_ino->start_block); squashfs_i(inode)->block_list_start = block; squashfs_i(inode)->offset = offset; inode->i_data.a_ops = &squashfs_aops; TRACE("File inode %x:%x, start_block %llx, block_list_start " "%llx, offset %x\n", SQUASHFS_INODE_BLK(ino), offset, squashfs_i(inode)->start, block, offset); break; } case SQUASHFS_LREG_TYPE: { unsigned int frag_offset, frag; int frag_size; u64 frag_blk; struct squashfs_lreg_inode *sqsh_ino = &squashfs_ino.lreg; err = squashfs_read_metadata(sb, sqsh_ino, &block, &offset, sizeof(*sqsh_ino)); if (err < 0) goto failed_read; frag = le32_to_cpu(sqsh_ino->fragment); if (frag != SQUASHFS_INVALID_FRAG) { frag_offset = le32_to_cpu(sqsh_ino->offset); frag_size = squashfs_frag_lookup(sb, frag, &frag_blk); if (frag_size < 0) { err = frag_size; goto failed_read; } } else { frag_blk = SQUASHFS_INVALID_BLK; frag_size = 0; frag_offset = 0; } xattr_id = le32_to_cpu(sqsh_ino->xattr); set_nlink(inode, le32_to_cpu(sqsh_ino->nlink)); inode->i_size = le64_to_cpu(sqsh_ino->file_size); inode->i_op = &squashfs_inode_ops; inode->i_fop = &generic_ro_fops; inode->i_mode |= S_IFREG; inode->i_blocks = (inode->i_size - le64_to_cpu(sqsh_ino->sparse) + 511) >> 9; squashfs_i(inode)->fragment_block = frag_blk; squashfs_i(inode)->fragment_size = frag_size; squashfs_i(inode)->fragment_offset = frag_offset; squashfs_i(inode)->start = le64_to_cpu(sqsh_ino->start_block); squashfs_i(inode)->block_list_start = block; squashfs_i(inode)->offset = offset; inode->i_data.a_ops = &squashfs_aops; TRACE("File inode %x:%x, start_block %llx, block_list_start " "%llx, offset %x\n", SQUASHFS_INODE_BLK(ino), offset, squashfs_i(inode)->start, block, offset); break; } case SQUASHFS_DIR_TYPE: { struct squashfs_dir_inode *sqsh_ino = &squashfs_ino.dir; err = squashfs_read_metadata(sb, sqsh_ino, &block, &offset, sizeof(*sqsh_ino)); if (err < 0) goto failed_read; set_nlink(inode, le32_to_cpu(sqsh_ino->nlink)); inode->i_size = le16_to_cpu(sqsh_ino->file_size); inode->i_op = &squashfs_dir_inode_ops; inode->i_fop = &squashfs_dir_ops; inode->i_mode |= S_IFDIR; squashfs_i(inode)->start = le32_to_cpu(sqsh_ino->start_block); squashfs_i(inode)->offset = le16_to_cpu(sqsh_ino->offset); squashfs_i(inode)->dir_idx_cnt = 0; squashfs_i(inode)->parent = le32_to_cpu(sqsh_ino->parent_inode); TRACE("Directory inode %x:%x, start_block %llx, offset %x\n", SQUASHFS_INODE_BLK(ino), offset, squashfs_i(inode)->start, le16_to_cpu(sqsh_ino->offset)); break; } case SQUASHFS_LDIR_TYPE: { struct squashfs_ldir_inode *sqsh_ino = &squashfs_ino.ldir; err = squashfs_read_metadata(sb, sqsh_ino, &block, &offset, sizeof(*sqsh_ino)); if (err < 0) goto failed_read; xattr_id = le32_to_cpu(sqsh_ino->xattr); set_nlink(inode, le32_to_cpu(sqsh_ino->nlink)); inode->i_size = le32_to_cpu(sqsh_ino->file_size); inode->i_op = &squashfs_dir_inode_ops; inode->i_fop = &squashfs_dir_ops; inode->i_mode |= S_IFDIR; squashfs_i(inode)->start = le32_to_cpu(sqsh_ino->start_block); squashfs_i(inode)->offset = le16_to_cpu(sqsh_ino->offset); squashfs_i(inode)->dir_idx_start = block; squashfs_i(inode)->dir_idx_offset = offset; squashfs_i(inode)->dir_idx_cnt = le16_to_cpu(sqsh_ino->i_count); squashfs_i(inode)->parent = le32_to_cpu(sqsh_ino->parent_inode); TRACE("Long directory inode %x:%x, start_block %llx, offset " "%x\n", SQUASHFS_INODE_BLK(ino), offset, squashfs_i(inode)->start, le16_to_cpu(sqsh_ino->offset)); break; } case SQUASHFS_SYMLINK_TYPE: case SQUASHFS_LSYMLINK_TYPE: { struct squashfs_symlink_inode *sqsh_ino = &squashfs_ino.symlink; err = squashfs_read_metadata(sb, sqsh_ino, &block, &offset, sizeof(*sqsh_ino)); if (err < 0) goto failed_read; inode->i_size = le32_to_cpu(sqsh_ino->symlink_size); if (inode->i_size > PAGE_SIZE) { ERROR("Corrupted symlink\n"); return -EINVAL; } set_nlink(inode, le32_to_cpu(sqsh_ino->nlink)); inode->i_op = &squashfs_symlink_inode_ops; inode_nohighmem(inode); inode->i_data.a_ops = &squashfs_symlink_aops; inode->i_mode |= S_IFLNK; squashfs_i(inode)->start = block; squashfs_i(inode)->offset = offset; if (type == SQUASHFS_LSYMLINK_TYPE) { __le32 xattr; err = squashfs_read_metadata(sb, NULL, &block, &offset, inode->i_size); if (err < 0) goto failed_read; err = squashfs_read_metadata(sb, &xattr, &block, &offset, sizeof(xattr)); if (err < 0) goto failed_read; xattr_id = le32_to_cpu(xattr); } TRACE("Symbolic link inode %x:%x, start_block %llx, offset " "%x\n", SQUASHFS_INODE_BLK(ino), offset, block, offset); break; } case SQUASHFS_BLKDEV_TYPE: case SQUASHFS_CHRDEV_TYPE: { struct squashfs_dev_inode *sqsh_ino = &squashfs_ino.dev; unsigned int rdev; err = squashfs_read_metadata(sb, sqsh_ino, &block, &offset, sizeof(*sqsh_ino)); if (err < 0) goto failed_read; if (type == SQUASHFS_CHRDEV_TYPE) inode->i_mode |= S_IFCHR; else inode->i_mode |= S_IFBLK; set_nlink(inode, le32_to_cpu(sqsh_ino->nlink)); rdev = le32_to_cpu(sqsh_ino->rdev); init_special_inode(inode, inode->i_mode, new_decode_dev(rdev)); TRACE("Device inode %x:%x, rdev %x\n", SQUASHFS_INODE_BLK(ino), offset, rdev); break; } case SQUASHFS_LBLKDEV_TYPE: case SQUASHFS_LCHRDEV_TYPE: { struct squashfs_ldev_inode *sqsh_ino = &squashfs_ino.ldev; unsigned int rdev; err = squashfs_read_metadata(sb, sqsh_ino, &block, &offset, sizeof(*sqsh_ino)); if (err < 0) goto failed_read; if (type == SQUASHFS_LCHRDEV_TYPE) inode->i_mode |= S_IFCHR; else inode->i_mode |= S_IFBLK; xattr_id = le32_to_cpu(sqsh_ino->xattr); inode->i_op = &squashfs_inode_ops; set_nlink(inode, le32_to_cpu(sqsh_ino->nlink)); rdev = le32_to_cpu(sqsh_ino->rdev); init_special_inode(inode, inode->i_mode, new_decode_dev(rdev)); TRACE("Device inode %x:%x, rdev %x\n", SQUASHFS_INODE_BLK(ino), offset, rdev); break; } case SQUASHFS_FIFO_TYPE: case SQUASHFS_SOCKET_TYPE: { struct squashfs_ipc_inode *sqsh_ino = &squashfs_ino.ipc; err = squashfs_read_metadata(sb, sqsh_ino, &block, &offset, sizeof(*sqsh_ino)); if (err < 0) goto failed_read; if (type == SQUASHFS_FIFO_TYPE) inode->i_mode |= S_IFIFO; else inode->i_mode |= S_IFSOCK; set_nlink(inode, le32_to_cpu(sqsh_ino->nlink)); init_special_inode(inode, inode->i_mode, 0); break; } case SQUASHFS_LFIFO_TYPE: case SQUASHFS_LSOCKET_TYPE: { struct squashfs_lipc_inode *sqsh_ino = &squashfs_ino.lipc; err = squashfs_read_metadata(sb, sqsh_ino, &block, &offset, sizeof(*sqsh_ino)); if (err < 0) goto failed_read; if (type == SQUASHFS_LFIFO_TYPE) inode->i_mode |= S_IFIFO; else inode->i_mode |= S_IFSOCK; xattr_id = le32_to_cpu(sqsh_ino->xattr); inode->i_op = &squashfs_inode_ops; set_nlink(inode, le32_to_cpu(sqsh_ino->nlink)); init_special_inode(inode, inode->i_mode, 0); break; } default: ERROR("Unknown inode type %d in squashfs_iget!\n", type); return -EINVAL; } if (xattr_id != SQUASHFS_INVALID_XATTR && msblk->xattr_id_table) { err = squashfs_xattr_lookup(sb, xattr_id, &squashfs_i(inode)->xattr_count, &squashfs_i(inode)->xattr_size, &squashfs_i(inode)->xattr); if (err < 0) goto failed_read; inode->i_blocks += ((squashfs_i(inode)->xattr_size - 1) >> 9) + 1; } else squashfs_i(inode)->xattr_count = 0; return 0; failed_read: ERROR("Unable to read inode 0x%llx\n", ino); return err; } const struct inode_operations squashfs_inode_ops = { .listxattr = squashfs_listxattr };
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 /* SPDX-License-Identifier: GPL-2.0 */ /* * thermal.h ($Revision: 0 $) * * Copyright (C) 2008 Intel Corp * Copyright (C) 2008 Zhang Rui <rui.zhang@intel.com> * Copyright (C) 2008 Sujith Thomas <sujith.thomas@intel.com> */ #ifndef __THERMAL_H__ #define __THERMAL_H__ #include <linux/of.h> #include <linux/idr.h> #include <linux/device.h> #include <linux/sysfs.h> #include <linux/workqueue.h> #include <uapi/linux/thermal.h> /* invalid cooling state */ #define THERMAL_CSTATE_INVALID -1UL /* No upper/lower limit requirement */ #define THERMAL_NO_LIMIT ((u32)~0) /* Default weight of a bound cooling device */ #define THERMAL_WEIGHT_DEFAULT 0 /* use value, which < 0K, to indicate an invalid/uninitialized temperature */ #define THERMAL_TEMP_INVALID -274000 struct thermal_zone_device; struct thermal_cooling_device; struct thermal_instance; struct thermal_debugfs; struct thermal_attr; enum thermal_trend { THERMAL_TREND_STABLE, /* temperature is stable */ THERMAL_TREND_RAISING, /* temperature is raising */ THERMAL_TREND_DROPPING, /* temperature is dropping */ }; /* Thermal notification reason */ enum thermal_notify_event { THERMAL_EVENT_UNSPECIFIED, /* Unspecified event */ THERMAL_EVENT_TEMP_SAMPLE, /* New Temperature sample */ THERMAL_TRIP_VIOLATED, /* TRIP Point violation */ THERMAL_TRIP_CHANGED, /* TRIP Point temperature changed */ THERMAL_DEVICE_DOWN, /* Thermal device is down */ THERMAL_DEVICE_UP, /* Thermal device is up after a down event */ THERMAL_DEVICE_POWER_CAPABILITY_CHANGED, /* power capability changed */ THERMAL_TABLE_CHANGED, /* Thermal table(s) changed */ THERMAL_EVENT_KEEP_ALIVE, /* Request for user space handler to respond */ THERMAL_TZ_BIND_CDEV, /* Cooling dev is bind to the thermal zone */ THERMAL_TZ_UNBIND_CDEV, /* Cooling dev is unbind from the thermal zone */ THERMAL_INSTANCE_WEIGHT_CHANGED, /* Thermal instance weight changed */ THERMAL_TZ_RESUME, /* Thermal zone is resuming after system sleep */ THERMAL_TZ_ADD_THRESHOLD, /* Threshold added */ THERMAL_TZ_DEL_THRESHOLD, /* Threshold deleted */ THERMAL_TZ_FLUSH_THRESHOLDS, /* All thresholds deleted */ }; /** * struct thermal_trip - representation of a point in temperature domain * @temperature: temperature value in miliCelsius * @hysteresis: relative hysteresis in miliCelsius * @type: trip point type * @priv: pointer to driver data associated with this trip * @flags: flags representing binary properties of the trip */ struct thermal_trip { int temperature; int hysteresis; enum thermal_trip_type type; u8 flags; void *priv; }; #define THERMAL_TRIP_FLAG_RW_TEMP BIT(0) #define THERMAL_TRIP_FLAG_RW_HYST BIT(1) #define THERMAL_TRIP_FLAG_RW (THERMAL_TRIP_FLAG_RW_TEMP | \ THERMAL_TRIP_FLAG_RW_HYST) #define THERMAL_TRIP_PRIV_TO_INT(_val_) (uintptr_t)(_val_) #define THERMAL_INT_TO_TRIP_PRIV(_val_) (void *)(uintptr_t)(_val_) struct thermal_zone_device; struct cooling_spec { unsigned long upper; /* Highest cooling state */ unsigned long lower; /* Lowest cooling state */ unsigned int weight; /* Cooling device weight */ }; struct thermal_zone_device_ops { bool (*should_bind) (struct thermal_zone_device *, const struct thermal_trip *, struct thermal_cooling_device *, struct cooling_spec *); int (*get_temp) (struct thermal_zone_device *, int *); int (*set_trips) (struct thermal_zone_device *, int, int); int (*change_mode) (struct thermal_zone_device *, enum thermal_device_mode); int (*set_trip_temp) (struct thermal_zone_device *, const struct thermal_trip *, int); int (*get_crit_temp) (struct thermal_zone_device *, int *); int (*set_emul_temp) (struct thermal_zone_device *, int); int (*get_trend) (struct thermal_zone_device *, const struct thermal_trip *, enum thermal_trend *); void (*hot)(struct thermal_zone_device *); void (*critical)(struct thermal_zone_device *); }; struct thermal_cooling_device_ops { int (*get_max_state) (struct thermal_cooling_device *, unsigned long *); int (*get_cur_state) (struct thermal_cooling_device *, unsigned long *); int (*set_cur_state) (struct thermal_cooling_device *, unsigned long); int (*get_requested_power)(struct thermal_cooling_device *, u32 *); int (*state2power)(struct thermal_cooling_device *, unsigned long, u32 *); int (*power2state)(struct thermal_cooling_device *, u32, unsigned long *); }; struct thermal_cooling_device { int id; const char *type; unsigned long max_state; struct device device; struct device_node *np; void *devdata; void *stats; const struct thermal_cooling_device_ops *ops; bool updated; /* true if the cooling device does not need update */ struct mutex lock; /* protect thermal_instances list */ struct list_head thermal_instances; struct list_head node; #ifdef CONFIG_THERMAL_DEBUGFS struct thermal_debugfs *debugfs; #endif }; DEFINE_GUARD(cooling_dev, struct thermal_cooling_device *, mutex_lock(&_T->lock), mutex_unlock(&_T->lock)) /* Structure to define Thermal Zone parameters */ struct thermal_zone_params { const char *governor_name; /* * a boolean to indicate if the thermal to hwmon sysfs interface * is required. when no_hwmon == false, a hwmon sysfs interface * will be created. when no_hwmon == true, nothing will be done */ bool no_hwmon; /* * Sustainable power (heat) that this thermal zone can dissipate in * mW */ u32 sustainable_power; /* * Proportional parameter of the PID controller when * overshooting (i.e., when temperature is below the target) */ s32 k_po; /* * Proportional parameter of the PID controller when * undershooting */ s32 k_pu; /* Integral parameter of the PID controller */ s32 k_i; /* Derivative parameter of the PID controller */ s32 k_d; /* threshold below which the error is no longer accumulated */ s32 integral_cutoff; /* * @slope: slope of a linear temperature adjustment curve. * Used by thermal zone drivers. */ int slope; /* * @offset: offset of a linear temperature adjustment curve. * Used by thermal zone drivers (default 0). */ int offset; }; /* Function declarations */ #ifdef CONFIG_THERMAL_OF struct thermal_zone_device *devm_thermal_of_zone_register(struct device *dev, int id, void *data, const struct thermal_zone_device_ops *ops); void devm_thermal_of_zone_unregister(struct device *dev, struct thermal_zone_device *tz); #else static inline struct thermal_zone_device *devm_thermal_of_zone_register(struct device *dev, int id, void *data, const struct thermal_zone_device_ops *ops) { return ERR_PTR(-ENOTSUPP); } static inline void devm_thermal_of_zone_unregister(struct device *dev, struct thermal_zone_device *tz) { } #endif int for_each_thermal_trip(struct thermal_zone_device *tz, int (*cb)(struct thermal_trip *, void *), void *data); int thermal_zone_for_each_trip(struct thermal_zone_device *tz, int (*cb)(struct thermal_trip *, void *), void *data); void thermal_zone_set_trip_temp(struct thermal_zone_device *tz, struct thermal_trip *trip, int temp); int thermal_zone_get_crit_temp(struct thermal_zone_device *tz, int *temp); #ifdef CONFIG_THERMAL struct thermal_zone_device *thermal_zone_device_register_with_trips( const char *type, const struct thermal_trip *trips, int num_trips, void *devdata, const struct thermal_zone_device_ops *ops, const struct thermal_zone_params *tzp, unsigned int passive_delay, unsigned int polling_delay); struct thermal_zone_device *thermal_tripless_zone_device_register( const char *type, void *devdata, const struct thermal_zone_device_ops *ops, const struct thermal_zone_params *tzp); void thermal_zone_device_unregister(struct thermal_zone_device *tz); void *thermal_zone_device_priv(struct thermal_zone_device *tzd); const char *thermal_zone_device_type(struct thermal_zone_device *tzd); int thermal_zone_device_id(struct thermal_zone_device *tzd); struct device *thermal_zone_device(struct thermal_zone_device *tzd); void thermal_zone_device_update(struct thermal_zone_device *, enum thermal_notify_event); struct thermal_cooling_device *thermal_cooling_device_register(const char *, void *, const struct thermal_cooling_device_ops *); struct thermal_cooling_device * thermal_of_cooling_device_register(struct device_node *np, const char *, void *, const struct thermal_cooling_device_ops *); struct thermal_cooling_device * devm_thermal_of_cooling_device_register(struct device *dev, struct device_node *np, const char *type, void *devdata, const struct thermal_cooling_device_ops *ops); void thermal_cooling_device_update(struct thermal_cooling_device *); void thermal_cooling_device_unregister(struct thermal_cooling_device *); struct thermal_zone_device *thermal_zone_get_zone_by_name(const char *name); int thermal_zone_get_temp(struct thermal_zone_device *tz, int *temp); int thermal_zone_get_slope(struct thermal_zone_device *tz); int thermal_zone_get_offset(struct thermal_zone_device *tz); bool thermal_trip_is_bound_to_cdev(struct thermal_zone_device *tz, const struct thermal_trip *trip, struct thermal_cooling_device *cdev); int thermal_zone_device_enable(struct thermal_zone_device *tz); int thermal_zone_device_disable(struct thermal_zone_device *tz); void thermal_zone_device_critical(struct thermal_zone_device *tz); #else static inline struct thermal_zone_device *thermal_zone_device_register_with_trips( const char *type, const struct thermal_trip *trips, int num_trips, void *devdata, const struct thermal_zone_device_ops *ops, const struct thermal_zone_params *tzp, int passive_delay, int polling_delay) { return ERR_PTR(-ENODEV); } static inline struct thermal_zone_device *thermal_tripless_zone_device_register( const char *type, void *devdata, struct thermal_zone_device_ops *ops, const struct thermal_zone_params *tzp) { return ERR_PTR(-ENODEV); } static inline void thermal_zone_device_unregister(struct thermal_zone_device *tz) { } static inline struct thermal_cooling_device * thermal_cooling_device_register(const char *type, void *devdata, const struct thermal_cooling_device_ops *ops) { return ERR_PTR(-ENODEV); } static inline struct thermal_cooling_device * thermal_of_cooling_device_register(struct device_node *np, const char *type, void *devdata, const struct thermal_cooling_device_ops *ops) { return ERR_PTR(-ENODEV); } static inline struct thermal_cooling_device * devm_thermal_of_cooling_device_register(struct device *dev, struct device_node *np, const char *type, void *devdata, const struct thermal_cooling_device_ops *ops) { return ERR_PTR(-ENODEV); } static inline void thermal_cooling_device_unregister( struct thermal_cooling_device *cdev) { } static inline struct thermal_zone_device *thermal_zone_get_zone_by_name( const char *name) { return ERR_PTR(-ENODEV); } static inline int thermal_zone_get_temp( struct thermal_zone_device *tz, int *temp) { return -ENODEV; } static inline int thermal_zone_get_slope( struct thermal_zone_device *tz) { return -ENODEV; } static inline int thermal_zone_get_offset( struct thermal_zone_device *tz) { return -ENODEV; } static inline void *thermal_zone_device_priv(struct thermal_zone_device *tz) { return NULL; } static inline const char *thermal_zone_device_type(struct thermal_zone_device *tzd) { return NULL; } static inline int thermal_zone_device_id(struct thermal_zone_device *tzd) { return -ENODEV; } static inline int thermal_zone_device_enable(struct thermal_zone_device *tz) { return -ENODEV; } static inline int thermal_zone_device_disable(struct thermal_zone_device *tz) { return -ENODEV; } #endif /* CONFIG_THERMAL */ #endif /* __THERMAL_H__ */
44 44 159 159 77 77 106 2 2 5 1 5 5 5 5 5 2 2 249 268 249 1 248 107 159 390 98 249 160 390 389 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 /* SPDX-License-Identifier: GPL-2.0 */ #include <linux/kernel.h> #include <linux/slab.h> #include <net/act_api.h> #include <net/flow_offload.h> #include <linux/rtnetlink.h> #include <linux/mutex.h> #include <linux/rhashtable.h> struct flow_rule *flow_rule_alloc(unsigned int num_actions) { struct flow_rule *rule; int i; rule = kzalloc(struct_size(rule, action.entries, num_actions), GFP_KERNEL); if (!rule) return NULL; rule->action.num_entries = num_actions; /* Pre-fill each action hw_stats with DONT_CARE. * Caller can override this if it wants stats for a given action. */ for (i = 0; i < num_actions; i++) rule->action.entries[i].hw_stats = FLOW_ACTION_HW_STATS_DONT_CARE; return rule; } EXPORT_SYMBOL(flow_rule_alloc); struct flow_offload_action *offload_action_alloc(unsigned int num_actions) { struct flow_offload_action *fl_action; int i; fl_action = kzalloc(struct_size(fl_action, action.entries, num_actions), GFP_KERNEL); if (!fl_action) return NULL; fl_action->action.num_entries = num_actions; /* Pre-fill each action hw_stats with DONT_CARE. * Caller can override this if it wants stats for a given action. */ for (i = 0; i < num_actions; i++) fl_action->action.entries[i].hw_stats = FLOW_ACTION_HW_STATS_DONT_CARE; return fl_action; } #define FLOW_DISSECTOR_MATCH(__rule, __type, __out) \ const struct flow_match *__m = &(__rule)->match; \ struct flow_dissector *__d = (__m)->dissector; \ \ (__out)->key = skb_flow_dissector_target(__d, __type, (__m)->key); \ (__out)->mask = skb_flow_dissector_target(__d, __type, (__m)->mask); \ void flow_rule_match_meta(const struct flow_rule *rule, struct flow_match_meta *out) { FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_META, out); } EXPORT_SYMBOL(flow_rule_match_meta); void flow_rule_match_basic(const struct flow_rule *rule, struct flow_match_basic *out) { FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_BASIC, out); } EXPORT_SYMBOL(flow_rule_match_basic); void flow_rule_match_control(const struct flow_rule *rule, struct flow_match_control *out) { FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_CONTROL, out); } EXPORT_SYMBOL(flow_rule_match_control); void flow_rule_match_eth_addrs(const struct flow_rule *rule, struct flow_match_eth_addrs *out) { FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS, out); } EXPORT_SYMBOL(flow_rule_match_eth_addrs); void flow_rule_match_vlan(const struct flow_rule *rule, struct flow_match_vlan *out) { FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_VLAN, out); } EXPORT_SYMBOL(flow_rule_match_vlan); void flow_rule_match_cvlan(const struct flow_rule *rule, struct flow_match_vlan *out) { FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_CVLAN, out); } EXPORT_SYMBOL(flow_rule_match_cvlan); void flow_rule_match_arp(const struct flow_rule *rule, struct flow_match_arp *out) { FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ARP, out); } EXPORT_SYMBOL(flow_rule_match_arp); void flow_rule_match_ipv4_addrs(const struct flow_rule *rule, struct flow_match_ipv4_addrs *out) { FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS, out); } EXPORT_SYMBOL(flow_rule_match_ipv4_addrs); void flow_rule_match_ipv6_addrs(const struct flow_rule *rule, struct flow_match_ipv6_addrs *out) { FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS, out); } EXPORT_SYMBOL(flow_rule_match_ipv6_addrs); void flow_rule_match_ip(const struct flow_rule *rule, struct flow_match_ip *out) { FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_IP, out); } EXPORT_SYMBOL(flow_rule_match_ip); void flow_rule_match_ports(const struct flow_rule *rule, struct flow_match_ports *out) { FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_PORTS, out); } EXPORT_SYMBOL(flow_rule_match_ports); void flow_rule_match_ports_range(const struct flow_rule *rule, struct flow_match_ports_range *out) { FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_PORTS_RANGE, out); } EXPORT_SYMBOL(flow_rule_match_ports_range); void flow_rule_match_tcp(const struct flow_rule *rule, struct flow_match_tcp *out) { FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_TCP, out); } EXPORT_SYMBOL(flow_rule_match_tcp); void flow_rule_match_ipsec(const struct flow_rule *rule, struct flow_match_ipsec *out) { FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_IPSEC, out); } EXPORT_SYMBOL(flow_rule_match_ipsec); void flow_rule_match_icmp(const struct flow_rule *rule, struct flow_match_icmp *out) { FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ICMP, out); } EXPORT_SYMBOL(flow_rule_match_icmp); void flow_rule_match_mpls(const struct flow_rule *rule, struct flow_match_mpls *out) { FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_MPLS, out); } EXPORT_SYMBOL(flow_rule_match_mpls); void flow_rule_match_enc_control(const struct flow_rule *rule, struct flow_match_control *out) { FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_CONTROL, out); } EXPORT_SYMBOL(flow_rule_match_enc_control); void flow_rule_match_enc_ipv4_addrs(const struct flow_rule *rule, struct flow_match_ipv4_addrs *out) { FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS, out); } EXPORT_SYMBOL(flow_rule_match_enc_ipv4_addrs); void flow_rule_match_enc_ipv6_addrs(const struct flow_rule *rule, struct flow_match_ipv6_addrs *out) { FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS, out); } EXPORT_SYMBOL(flow_rule_match_enc_ipv6_addrs); void flow_rule_match_enc_ip(const struct flow_rule *rule, struct flow_match_ip *out) { FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_IP, out); } EXPORT_SYMBOL(flow_rule_match_enc_ip); void flow_rule_match_enc_ports(const struct flow_rule *rule, struct flow_match_ports *out) { FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_PORTS, out); } EXPORT_SYMBOL(flow_rule_match_enc_ports); void flow_rule_match_enc_keyid(const struct flow_rule *rule, struct flow_match_enc_keyid *out) { FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_KEYID, out); } EXPORT_SYMBOL(flow_rule_match_enc_keyid); void flow_rule_match_enc_opts(const struct flow_rule *rule, struct flow_match_enc_opts *out) { FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_OPTS, out); } EXPORT_SYMBOL(flow_rule_match_enc_opts); struct flow_action_cookie *flow_action_cookie_create(void *data, unsigned int len, gfp_t gfp) { struct flow_action_cookie *cookie; cookie = kmalloc(sizeof(*cookie) + len, gfp); if (!cookie) return NULL; cookie->cookie_len = len; memcpy(cookie->cookie, data, len); return cookie; } EXPORT_SYMBOL(flow_action_cookie_create); void flow_action_cookie_destroy(struct flow_action_cookie *cookie) { kfree(cookie); } EXPORT_SYMBOL(flow_action_cookie_destroy); void flow_rule_match_ct(const struct flow_rule *rule, struct flow_match_ct *out) { FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_CT, out); } EXPORT_SYMBOL(flow_rule_match_ct); void flow_rule_match_pppoe(const struct flow_rule *rule, struct flow_match_pppoe *out) { FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_PPPOE, out); } EXPORT_SYMBOL(flow_rule_match_pppoe); void flow_rule_match_l2tpv3(const struct flow_rule *rule, struct flow_match_l2tpv3 *out) { FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_L2TPV3, out); } EXPORT_SYMBOL(flow_rule_match_l2tpv3); struct flow_block_cb *flow_block_cb_alloc(flow_setup_cb_t *cb, void *cb_ident, void *cb_priv, void (*release)(void *cb_priv)) { struct flow_block_cb *block_cb; block_cb = kzalloc(sizeof(*block_cb), GFP_KERNEL); if (!block_cb) return ERR_PTR(-ENOMEM); block_cb->cb = cb; block_cb->cb_ident = cb_ident; block_cb->cb_priv = cb_priv; block_cb->release = release; return block_cb; } EXPORT_SYMBOL(flow_block_cb_alloc); void flow_block_cb_free(struct flow_block_cb *block_cb) { if (block_cb->release) block_cb->release(block_cb->cb_priv); kfree(block_cb); } EXPORT_SYMBOL(flow_block_cb_free); struct flow_block_cb *flow_block_cb_lookup(struct flow_block *block, flow_setup_cb_t *cb, void *cb_ident) { struct flow_block_cb *block_cb; list_for_each_entry(block_cb, &block->cb_list, list) { if (block_cb->cb == cb && block_cb->cb_ident == cb_ident) return block_cb; } return NULL; } EXPORT_SYMBOL(flow_block_cb_lookup); void *flow_block_cb_priv(struct flow_block_cb *block_cb) { return block_cb->cb_priv; } EXPORT_SYMBOL(flow_block_cb_priv); void flow_block_cb_incref(struct flow_block_cb *block_cb) { block_cb->refcnt++; } EXPORT_SYMBOL(flow_block_cb_incref); unsigned int flow_block_cb_decref(struct flow_block_cb *block_cb) { return --block_cb->refcnt; } EXPORT_SYMBOL(flow_block_cb_decref); bool flow_block_cb_is_busy(flow_setup_cb_t *cb, void *cb_ident, struct list_head *driver_block_list) { struct flow_block_cb *block_cb; list_for_each_entry(block_cb, driver_block_list, driver_list) { if (block_cb->cb == cb && block_cb->cb_ident == cb_ident) return true; } return false; } EXPORT_SYMBOL(flow_block_cb_is_busy); int flow_block_cb_setup_simple(struct flow_block_offload *f, struct list_head *driver_block_list, flow_setup_cb_t *cb, void *cb_ident, void *cb_priv, bool ingress_only) { struct flow_block_cb *block_cb; if (ingress_only && f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS) return -EOPNOTSUPP; f->driver_block_list = driver_block_list; switch (f->command) { case FLOW_BLOCK_BIND: if (flow_block_cb_is_busy(cb, cb_ident, driver_block_list)) return -EBUSY; block_cb = flow_block_cb_alloc(cb, cb_ident, cb_priv, NULL); if (IS_ERR(block_cb)) return PTR_ERR(block_cb); flow_block_cb_add(block_cb, f); list_add_tail(&block_cb->driver_list, driver_block_list); return 0; case FLOW_BLOCK_UNBIND: block_cb = flow_block_cb_lookup(f->block, cb, cb_ident); if (!block_cb) return -ENOENT; flow_block_cb_remove(block_cb, f); list_del(&block_cb->driver_list); return 0; default: return -EOPNOTSUPP; } } EXPORT_SYMBOL(flow_block_cb_setup_simple); static DEFINE_MUTEX(flow_indr_block_lock); static LIST_HEAD(flow_block_indr_list); static LIST_HEAD(flow_block_indr_dev_list); static LIST_HEAD(flow_indir_dev_list); struct flow_indr_dev { struct list_head list; flow_indr_block_bind_cb_t *cb; void *cb_priv; refcount_t refcnt; }; static struct flow_indr_dev *flow_indr_dev_alloc(flow_indr_block_bind_cb_t *cb, void *cb_priv) { struct flow_indr_dev *indr_dev; indr_dev = kmalloc(sizeof(*indr_dev), GFP_KERNEL); if (!indr_dev) return NULL; indr_dev->cb = cb; indr_dev->cb_priv = cb_priv; refcount_set(&indr_dev->refcnt, 1); return indr_dev; } struct flow_indir_dev_info { void *data; struct net_device *dev; struct Qdisc *sch; enum tc_setup_type type; void (*cleanup)(struct flow_block_cb *block_cb); struct list_head list; enum flow_block_command command; enum flow_block_binder_type binder_type; struct list_head *cb_list; }; static void existing_qdiscs_register(flow_indr_block_bind_cb_t *cb, void *cb_priv) { struct flow_block_offload bo; struct flow_indir_dev_info *cur; list_for_each_entry(cur, &flow_indir_dev_list, list) { memset(&bo, 0, sizeof(bo)); bo.command = cur->command; bo.binder_type = cur->binder_type; INIT_LIST_HEAD(&bo.cb_list); cb(cur->dev, cur->sch, cb_priv, cur->type, &bo, cur->data, cur->cleanup); list_splice(&bo.cb_list, cur->cb_list); } } int flow_indr_dev_register(flow_indr_block_bind_cb_t *cb, void *cb_priv) { struct flow_indr_dev *indr_dev; mutex_lock(&flow_indr_block_lock); list_for_each_entry(indr_dev, &flow_block_indr_dev_list, list) { if (indr_dev->cb == cb && indr_dev->cb_priv == cb_priv) { refcount_inc(&indr_dev->refcnt); mutex_unlock(&flow_indr_block_lock); return 0; } } indr_dev = flow_indr_dev_alloc(cb, cb_priv); if (!indr_dev) { mutex_unlock(&flow_indr_block_lock); return -ENOMEM; } list_add(&indr_dev->list, &flow_block_indr_dev_list); existing_qdiscs_register(cb, cb_priv); mutex_unlock(&flow_indr_block_lock); tcf_action_reoffload_cb(cb, cb_priv, true); return 0; } EXPORT_SYMBOL(flow_indr_dev_register); static void __flow_block_indr_cleanup(void (*release)(void *cb_priv), void *cb_priv, struct list_head *cleanup_list) { struct flow_block_cb *this, *next; list_for_each_entry_safe(this, next, &flow_block_indr_list, indr.list) { if (this->release == release && this->indr.cb_priv == cb_priv) list_move(&this->indr.list, cleanup_list); } } static void flow_block_indr_notify(struct list_head *cleanup_list) { struct flow_block_cb *this, *next; list_for_each_entry_safe(this, next, cleanup_list, indr.list) { list_del(&this->indr.list); this->indr.cleanup(this); } } void flow_indr_dev_unregister(flow_indr_block_bind_cb_t *cb, void *cb_priv, void (*release)(void *cb_priv)) { struct flow_indr_dev *this, *next, *indr_dev = NULL; LIST_HEAD(cleanup_list); mutex_lock(&flow_indr_block_lock); list_for_each_entry_safe(this, next, &flow_block_indr_dev_list, list) { if (this->cb == cb && this->cb_priv == cb_priv && refcount_dec_and_test(&this->refcnt)) { indr_dev = this; list_del(&indr_dev->list); break; } } if (!indr_dev) { mutex_unlock(&flow_indr_block_lock); return; } __flow_block_indr_cleanup(release, cb_priv, &cleanup_list); mutex_unlock(&flow_indr_block_lock); tcf_action_reoffload_cb(cb, cb_priv, false); flow_block_indr_notify(&cleanup_list); kfree(indr_dev); } EXPORT_SYMBOL(flow_indr_dev_unregister); static void flow_block_indr_init(struct flow_block_cb *flow_block, struct flow_block_offload *bo, struct net_device *dev, struct Qdisc *sch, void *data, void *cb_priv, void (*cleanup)(struct flow_block_cb *block_cb)) { flow_block->indr.binder_type = bo->binder_type; flow_block->indr.data = data; flow_block->indr.cb_priv = cb_priv; flow_block->indr.dev = dev; flow_block->indr.sch = sch; flow_block->indr.cleanup = cleanup; } struct flow_block_cb *flow_indr_block_cb_alloc(flow_setup_cb_t *cb, void *cb_ident, void *cb_priv, void (*release)(void *cb_priv), struct flow_block_offload *bo, struct net_device *dev, struct Qdisc *sch, void *data, void *indr_cb_priv, void (*cleanup)(struct flow_block_cb *block_cb)) { struct flow_block_cb *block_cb; block_cb = flow_block_cb_alloc(cb, cb_ident, cb_priv, release); if (IS_ERR(block_cb)) goto out; flow_block_indr_init(block_cb, bo, dev, sch, data, indr_cb_priv, cleanup); list_add(&block_cb->indr.list, &flow_block_indr_list); out: return block_cb; } EXPORT_SYMBOL(flow_indr_block_cb_alloc); static struct flow_indir_dev_info *find_indir_dev(void *data) { struct flow_indir_dev_info *cur; list_for_each_entry(cur, &flow_indir_dev_list, list) { if (cur->data == data) return cur; } return NULL; } static int indir_dev_add(void *data, struct net_device *dev, struct Qdisc *sch, enum tc_setup_type type, void (*cleanup)(struct flow_block_cb *block_cb), struct flow_block_offload *bo) { struct flow_indir_dev_info *info; info = find_indir_dev(data); if (info) return -EEXIST; info = kzalloc(sizeof(*info), GFP_KERNEL); if (!info) return -ENOMEM; info->data = data; info->dev = dev; info->sch = sch; info->type = type; info->cleanup = cleanup; info->command = bo->command; info->binder_type = bo->binder_type; info->cb_list = bo->cb_list_head; list_add(&info->list, &flow_indir_dev_list); return 0; } static int indir_dev_remove(void *data) { struct flow_indir_dev_info *info; info = find_indir_dev(data); if (!info) return -ENOENT; list_del(&info->list); kfree(info); return 0; } int flow_indr_dev_setup_offload(struct net_device *dev, struct Qdisc *sch, enum tc_setup_type type, void *data, struct flow_block_offload *bo, void (*cleanup)(struct flow_block_cb *block_cb)) { struct flow_indr_dev *this; u32 count = 0; int err; mutex_lock(&flow_indr_block_lock); if (bo) { if (bo->command == FLOW_BLOCK_BIND) indir_dev_add(data, dev, sch, type, cleanup, bo); else if (bo->command == FLOW_BLOCK_UNBIND) indir_dev_remove(data); } list_for_each_entry(this, &flow_block_indr_dev_list, list) { err = this->cb(dev, sch, this->cb_priv, type, bo, data, cleanup); if (!err) count++; } mutex_unlock(&flow_indr_block_lock); return (bo && list_empty(&bo->cb_list)) ? -EOPNOTSUPP : count; } EXPORT_SYMBOL(flow_indr_dev_setup_offload); bool flow_indr_dev_exists(void) { return !list_empty(&flow_block_indr_dev_list); } EXPORT_SYMBOL(flow_indr_dev_exists);
47 46 5 1 6 6 7 7 7 7 5 6 7 7 5 2 5 5 5 1 1 1 1 1 1 1 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 // SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) ST-Ericsson AB 2010 * Author: Sjur Brendeland */ #define pr_fmt(fmt) KBUILD_MODNAME ":%s(): " fmt, __func__ #include <linux/kernel.h> #include <linux/stddef.h> #include <linux/slab.h> #include <linux/netdevice.h> #include <linux/module.h> #include <net/caif/caif_layer.h> #include <net/caif/cfpkt.h> #include <net/caif/cfcnfg.h> #include <net/caif/cfctrl.h> #include <net/caif/cfmuxl.h> #include <net/caif/cffrml.h> #include <net/caif/cfserl.h> #include <net/caif/cfsrvl.h> #include <net/caif/caif_dev.h> #define container_obj(layr) container_of(layr, struct cfcnfg, layer) /* Information about CAIF physical interfaces held by Config Module in order * to manage physical interfaces */ struct cfcnfg_phyinfo { struct list_head node; bool up; /* Pointer to the layer below the MUX (framing layer) */ struct cflayer *frm_layer; /* Pointer to the lowest actual physical layer */ struct cflayer *phy_layer; /* Unique identifier of the physical interface */ unsigned int id; /* Preference of the physical in interface */ enum cfcnfg_phy_preference pref; /* Information about the physical device */ struct dev_info dev_info; /* Interface index */ int ifindex; /* Protocol head room added for CAIF link layer */ int head_room; /* Use Start of frame checksum */ bool use_fcs; }; struct cfcnfg { struct cflayer layer; struct cflayer *ctrl; struct cflayer *mux; struct list_head phys; struct mutex lock; }; static void cfcnfg_linkup_rsp(struct cflayer *layer, u8 channel_id, enum cfctrl_srv serv, u8 phyid, struct cflayer *adapt_layer); static void cfcnfg_linkdestroy_rsp(struct cflayer *layer, u8 channel_id); static void cfcnfg_reject_rsp(struct cflayer *layer, u8 channel_id, struct cflayer *adapt_layer); static void cfctrl_resp_func(void); static void cfctrl_enum_resp(void); struct cfcnfg *cfcnfg_create(void) { struct cfcnfg *this; struct cfctrl_rsp *resp; might_sleep(); /* Initiate this layer */ this = kzalloc(sizeof(struct cfcnfg), GFP_ATOMIC); if (!this) return NULL; this->mux = cfmuxl_create(); if (!this->mux) goto out_of_mem; this->ctrl = cfctrl_create(); if (!this->ctrl) goto out_of_mem; /* Initiate response functions */ resp = cfctrl_get_respfuncs(this->ctrl); resp->enum_rsp = cfctrl_enum_resp; resp->linkerror_ind = cfctrl_resp_func; resp->linkdestroy_rsp = cfcnfg_linkdestroy_rsp; resp->sleep_rsp = cfctrl_resp_func; resp->wake_rsp = cfctrl_resp_func; resp->restart_rsp = cfctrl_resp_func; resp->radioset_rsp = cfctrl_resp_func; resp->linksetup_rsp = cfcnfg_linkup_rsp; resp->reject_rsp = cfcnfg_reject_rsp; INIT_LIST_HEAD(&this->phys); cfmuxl_set_uplayer(this->mux, this->ctrl, 0); layer_set_dn(this->ctrl, this->mux); layer_set_up(this->ctrl, this); mutex_init(&this->lock); return this; out_of_mem: synchronize_rcu(); kfree(this->mux); kfree(this->ctrl); kfree(this); return NULL; } void cfcnfg_remove(struct cfcnfg *cfg) { might_sleep(); if (cfg) { synchronize_rcu(); kfree(cfg->mux); cfctrl_remove(cfg->ctrl); kfree(cfg); } } static void cfctrl_resp_func(void) { } static struct cfcnfg_phyinfo *cfcnfg_get_phyinfo_rcu(struct cfcnfg *cnfg, u8 phyid) { struct cfcnfg_phyinfo *phy; list_for_each_entry_rcu(phy, &cnfg->phys, node) if (phy->id == phyid) return phy; return NULL; } static void cfctrl_enum_resp(void) { } static struct dev_info *cfcnfg_get_phyid(struct cfcnfg *cnfg, enum cfcnfg_phy_preference phy_pref) { /* Try to match with specified preference */ struct cfcnfg_phyinfo *phy; list_for_each_entry_rcu(phy, &cnfg->phys, node) { if (phy->up && phy->pref == phy_pref && phy->frm_layer != NULL) return &phy->dev_info; } /* Otherwise just return something */ list_for_each_entry_rcu(phy, &cnfg->phys, node) if (phy->up) return &phy->dev_info; return NULL; } static int cfcnfg_get_id_from_ifi(struct cfcnfg *cnfg, int ifi) { struct cfcnfg_phyinfo *phy; list_for_each_entry_rcu(phy, &cnfg->phys, node) if (phy->ifindex == ifi && phy->up) return phy->id; return -ENODEV; } int caif_disconnect_client(struct net *net, struct cflayer *adap_layer) { u8 channel_id; struct cfcnfg *cfg = get_cfcnfg(net); caif_assert(adap_layer != NULL); cfctrl_cancel_req(cfg->ctrl, adap_layer); channel_id = adap_layer->id; if (channel_id != 0) { struct cflayer *servl; servl = cfmuxl_remove_uplayer(cfg->mux, channel_id); cfctrl_linkdown_req(cfg->ctrl, channel_id, adap_layer); if (servl != NULL) layer_set_up(servl, NULL); } else pr_debug("nothing to disconnect\n"); /* Do RCU sync before initiating cleanup */ synchronize_rcu(); if (adap_layer->ctrlcmd != NULL) adap_layer->ctrlcmd(adap_layer, CAIF_CTRLCMD_DEINIT_RSP, 0); return 0; } EXPORT_SYMBOL(caif_disconnect_client); static void cfcnfg_linkdestroy_rsp(struct cflayer *layer, u8 channel_id) { } static const int protohead[CFCTRL_SRV_MASK] = { [CFCTRL_SRV_VEI] = 4, [CFCTRL_SRV_DATAGRAM] = 7, [CFCTRL_SRV_UTIL] = 4, [CFCTRL_SRV_RFM] = 3, [CFCTRL_SRV_DBG] = 3, }; static int caif_connect_req_to_link_param(struct cfcnfg *cnfg, struct caif_connect_request *s, struct cfctrl_link_param *l) { struct dev_info *dev_info; enum cfcnfg_phy_preference pref; int res; memset(l, 0, sizeof(*l)); /* In caif protocol low value is high priority */ l->priority = CAIF_PRIO_MAX - s->priority + 1; if (s->ifindex != 0) { res = cfcnfg_get_id_from_ifi(cnfg, s->ifindex); if (res < 0) return res; l->phyid = res; } else { switch (s->link_selector) { case CAIF_LINK_HIGH_BANDW: pref = CFPHYPREF_HIGH_BW; break; case CAIF_LINK_LOW_LATENCY: pref = CFPHYPREF_LOW_LAT; break; default: return -EINVAL; } dev_info = cfcnfg_get_phyid(cnfg, pref); if (dev_info == NULL) return -ENODEV; l->phyid = dev_info->id; } switch (s->protocol) { case CAIFPROTO_AT: l->linktype = CFCTRL_SRV_VEI; l->endpoint = (s->sockaddr.u.at.type >> 2) & 0x3; l->chtype = s->sockaddr.u.at.type & 0x3; break; case CAIFPROTO_DATAGRAM: l->linktype = CFCTRL_SRV_DATAGRAM; l->chtype = 0x00; l->u.datagram.connid = s->sockaddr.u.dgm.connection_id; break; case CAIFPROTO_DATAGRAM_LOOP: l->linktype = CFCTRL_SRV_DATAGRAM; l->chtype = 0x03; l->endpoint = 0x00; l->u.datagram.connid = s->sockaddr.u.dgm.connection_id; break; case CAIFPROTO_RFM: l->linktype = CFCTRL_SRV_RFM; l->u.datagram.connid = s->sockaddr.u.rfm.connection_id; strscpy(l->u.rfm.volume, s->sockaddr.u.rfm.volume, sizeof(l->u.rfm.volume)); break; case CAIFPROTO_UTIL: l->linktype = CFCTRL_SRV_UTIL; l->endpoint = 0x00; l->chtype = 0x00; strscpy(l->u.utility.name, s->sockaddr.u.util.service, sizeof(l->u.utility.name)); caif_assert(sizeof(l->u.utility.name) > 10); l->u.utility.paramlen = s->param.size; if (l->u.utility.paramlen > sizeof(l->u.utility.params)) l->u.utility.paramlen = sizeof(l->u.utility.params); memcpy(l->u.utility.params, s->param.data, l->u.utility.paramlen); break; case CAIFPROTO_DEBUG: l->linktype = CFCTRL_SRV_DBG; l->endpoint = s->sockaddr.u.dbg.service; l->chtype = s->sockaddr.u.dbg.type; break; default: return -EINVAL; } return 0; } int caif_connect_client(struct net *net, struct caif_connect_request *conn_req, struct cflayer *adap_layer, int *ifindex, int *proto_head, int *proto_tail) { struct cflayer *frml; struct cfcnfg_phyinfo *phy; int err; struct cfctrl_link_param param; struct cfcnfg *cfg = get_cfcnfg(net); rcu_read_lock(); err = caif_connect_req_to_link_param(cfg, conn_req, &param); if (err) goto unlock; phy = cfcnfg_get_phyinfo_rcu(cfg, param.phyid); if (!phy) { err = -ENODEV; goto unlock; } err = -EINVAL; if (adap_layer == NULL) { pr_err("adap_layer is zero\n"); goto unlock; } if (adap_layer->receive == NULL) { pr_err("adap_layer->receive is NULL\n"); goto unlock; } if (adap_layer->ctrlcmd == NULL) { pr_err("adap_layer->ctrlcmd == NULL\n"); goto unlock; } err = -ENODEV; frml = phy->frm_layer; if (frml == NULL) { pr_err("Specified PHY type does not exist!\n"); goto unlock; } caif_assert(param.phyid == phy->id); caif_assert(phy->frm_layer->id == param.phyid); caif_assert(phy->phy_layer->id == param.phyid); *ifindex = phy->ifindex; *proto_tail = 2; *proto_head = protohead[param.linktype] + phy->head_room; rcu_read_unlock(); /* FIXME: ENUMERATE INITIALLY WHEN ACTIVATING PHYSICAL INTERFACE */ cfctrl_enum_req(cfg->ctrl, param.phyid); return cfctrl_linkup_request(cfg->ctrl, &param, adap_layer); unlock: rcu_read_unlock(); return err; } EXPORT_SYMBOL(caif_connect_client); static void cfcnfg_reject_rsp(struct cflayer *layer, u8 channel_id, struct cflayer *adapt_layer) { if (adapt_layer != NULL && adapt_layer->ctrlcmd != NULL) adapt_layer->ctrlcmd(adapt_layer, CAIF_CTRLCMD_INIT_FAIL_RSP, 0); } static void cfcnfg_linkup_rsp(struct cflayer *layer, u8 channel_id, enum cfctrl_srv serv, u8 phyid, struct cflayer *adapt_layer) { struct cfcnfg *cnfg = container_obj(layer); struct cflayer *servicel = NULL; struct cfcnfg_phyinfo *phyinfo; struct net_device *netdev; if (channel_id == 0) { pr_warn("received channel_id zero\n"); if (adapt_layer != NULL && adapt_layer->ctrlcmd != NULL) adapt_layer->ctrlcmd(adapt_layer, CAIF_CTRLCMD_INIT_FAIL_RSP, 0); return; } rcu_read_lock(); if (adapt_layer == NULL) { pr_debug("link setup response but no client exist, send linkdown back\n"); cfctrl_linkdown_req(cnfg->ctrl, channel_id, NULL); goto unlock; } caif_assert(cnfg != NULL); caif_assert(phyid != 0); phyinfo = cfcnfg_get_phyinfo_rcu(cnfg, phyid); if (phyinfo == NULL) { pr_err("ERROR: Link Layer Device disappeared while connecting\n"); goto unlock; } caif_assert(phyinfo != NULL); caif_assert(phyinfo->id == phyid); caif_assert(phyinfo->phy_layer != NULL); caif_assert(phyinfo->phy_layer->id == phyid); adapt_layer->id = channel_id; switch (serv) { case CFCTRL_SRV_VEI: servicel = cfvei_create(channel_id, &phyinfo->dev_info); break; case CFCTRL_SRV_DATAGRAM: servicel = cfdgml_create(channel_id, &phyinfo->dev_info); break; case CFCTRL_SRV_RFM: netdev = phyinfo->dev_info.dev; servicel = cfrfml_create(channel_id, &phyinfo->dev_info, netdev->mtu); break; case CFCTRL_SRV_UTIL: servicel = cfutill_create(channel_id, &phyinfo->dev_info); break; case CFCTRL_SRV_VIDEO: servicel = cfvidl_create(channel_id, &phyinfo->dev_info); break; case CFCTRL_SRV_DBG: servicel = cfdbgl_create(channel_id, &phyinfo->dev_info); break; default: pr_err("Protocol error. Link setup response - unknown channel type\n"); goto unlock; } if (!servicel) goto unlock; layer_set_dn(servicel, cnfg->mux); cfmuxl_set_uplayer(cnfg->mux, servicel, channel_id); layer_set_up(servicel, adapt_layer); layer_set_dn(adapt_layer, servicel); rcu_read_unlock(); servicel->ctrlcmd(servicel, CAIF_CTRLCMD_INIT_RSP, 0); return; unlock: rcu_read_unlock(); } int cfcnfg_add_phy_layer(struct cfcnfg *cnfg, struct net_device *dev, struct cflayer *phy_layer, enum cfcnfg_phy_preference pref, struct cflayer *link_support, bool fcs, int head_room) { struct cflayer *frml; struct cfcnfg_phyinfo *phyinfo = NULL; int i, res = 0; u8 phyid; mutex_lock(&cnfg->lock); /* CAIF protocol allow maximum 6 link-layers */ for (i = 0; i < 7; i++) { phyid = (dev->ifindex + i) & 0x7; if (phyid == 0) continue; if (cfcnfg_get_phyinfo_rcu(cnfg, phyid) == NULL) goto got_phyid; } pr_warn("Too many CAIF Link Layers (max 6)\n"); res = -EEXIST; goto out; got_phyid: phyinfo = kzalloc(sizeof(struct cfcnfg_phyinfo), GFP_ATOMIC); if (!phyinfo) { res = -ENOMEM; goto out; } phy_layer->id = phyid; phyinfo->pref = pref; phyinfo->id = phyid; phyinfo->dev_info.id = phyid; phyinfo->dev_info.dev = dev; phyinfo->phy_layer = phy_layer; phyinfo->ifindex = dev->ifindex; phyinfo->head_room = head_room; phyinfo->use_fcs = fcs; frml = cffrml_create(phyid, fcs); if (!frml) { res = -ENOMEM; goto out_err; } phyinfo->frm_layer = frml; layer_set_up(frml, cnfg->mux); if (link_support != NULL) { link_support->id = phyid; layer_set_dn(frml, link_support); layer_set_up(link_support, frml); layer_set_dn(link_support, phy_layer); layer_set_up(phy_layer, link_support); } else { layer_set_dn(frml, phy_layer); layer_set_up(phy_layer, frml); } list_add_rcu(&phyinfo->node, &cnfg->phys); out: mutex_unlock(&cnfg->lock); return res; out_err: kfree(phyinfo); mutex_unlock(&cnfg->lock); return res; } EXPORT_SYMBOL(cfcnfg_add_phy_layer); int cfcnfg_set_phy_state(struct cfcnfg *cnfg, struct cflayer *phy_layer, bool up) { struct cfcnfg_phyinfo *phyinfo; rcu_read_lock(); phyinfo = cfcnfg_get_phyinfo_rcu(cnfg, phy_layer->id); if (phyinfo == NULL) { rcu_read_unlock(); return -ENODEV; } if (phyinfo->up == up) { rcu_read_unlock(); return 0; } phyinfo->up = up; if (up) { cffrml_hold(phyinfo->frm_layer); cfmuxl_set_dnlayer(cnfg->mux, phyinfo->frm_layer, phy_layer->id); } else { cfmuxl_remove_dnlayer(cnfg->mux, phy_layer->id); cffrml_put(phyinfo->frm_layer); } rcu_read_unlock(); return 0; } EXPORT_SYMBOL(cfcnfg_set_phy_state); int cfcnfg_del_phy_layer(struct cfcnfg *cnfg, struct cflayer *phy_layer) { struct cflayer *frml, *frml_dn; u16 phyid; struct cfcnfg_phyinfo *phyinfo; might_sleep(); mutex_lock(&cnfg->lock); phyid = phy_layer->id; phyinfo = cfcnfg_get_phyinfo_rcu(cnfg, phyid); if (phyinfo == NULL) { mutex_unlock(&cnfg->lock); return 0; } caif_assert(phyid == phyinfo->id); caif_assert(phy_layer == phyinfo->phy_layer); caif_assert(phy_layer->id == phyid); caif_assert(phyinfo->frm_layer->id == phyid); list_del_rcu(&phyinfo->node); synchronize_rcu(); /* Fail if reference count is not zero */ if (cffrml_refcnt_read(phyinfo->frm_layer) != 0) { pr_info("Wait for device inuse\n"); list_add_rcu(&phyinfo->node, &cnfg->phys); mutex_unlock(&cnfg->lock); return -EAGAIN; } frml = phyinfo->frm_layer; frml_dn = frml->dn; cffrml_set_uplayer(frml, NULL); cffrml_set_dnlayer(frml, NULL); if (phy_layer != frml_dn) { layer_set_up(frml_dn, NULL); layer_set_dn(frml_dn, NULL); } layer_set_up(phy_layer, NULL); if (phyinfo->phy_layer != frml_dn) kfree(frml_dn); cffrml_free(frml); kfree(phyinfo); mutex_unlock(&cnfg->lock); return 0; } EXPORT_SYMBOL(cfcnfg_del_phy_layer);
8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 // SPDX-License-Identifier: GPL-2.0 #include "bcachefs.h" #include "bkey_buf.h" #include "bset.h" #include "btree_cache.h" #include "btree_journal_iter.h" #include "journal_io.h" #include <linux/sort.h> /* * For managing keys we read from the journal: until journal replay works normal * btree lookups need to be able to find and return keys from the journal where * they overwrite what's in the btree, so we have a special iterator and * operations for the regular btree iter code to use: */ static inline size_t idx_to_pos(struct journal_keys *keys, size_t idx) { size_t gap_size = keys->size - keys->nr; if (idx >= keys->gap) idx += gap_size; return idx; } static inline struct journal_key *idx_to_key(struct journal_keys *keys, size_t idx) { return keys->data + idx_to_pos(keys, idx); } static size_t __bch2_journal_key_search(struct journal_keys *keys, enum btree_id id, unsigned level, struct bpos pos) { size_t l = 0, r = keys->nr, m; while (l < r) { m = l + ((r - l) >> 1); if (__journal_key_cmp(id, level, pos, idx_to_key(keys, m)) > 0) l = m + 1; else r = m; } BUG_ON(l < keys->nr && __journal_key_cmp(id, level, pos, idx_to_key(keys, l)) > 0); BUG_ON(l && __journal_key_cmp(id, level, pos, idx_to_key(keys, l - 1)) <= 0); return l; } static size_t bch2_journal_key_search(struct journal_keys *keys, enum btree_id id, unsigned level, struct bpos pos) { return idx_to_pos(keys, __bch2_journal_key_search(keys, id, level, pos)); } /* Returns first non-overwritten key >= search key: */ struct bkey_i *bch2_journal_keys_peek_upto(struct bch_fs *c, enum btree_id btree_id, unsigned level, struct bpos pos, struct bpos end_pos, size_t *idx) { struct journal_keys *keys = &c->journal_keys; unsigned iters = 0; struct journal_key *k; BUG_ON(*idx > keys->nr); search: if (!*idx) *idx = __bch2_journal_key_search(keys, btree_id, level, pos); while (*idx && __journal_key_cmp(btree_id, level, end_pos, idx_to_key(keys, *idx - 1)) <= 0) { --(*idx); iters++; if (iters == 10) { *idx = 0; goto search; } } while ((k = *idx < keys->nr ? idx_to_key(keys, *idx) : NULL)) { if (__journal_key_cmp(btree_id, level, end_pos, k) < 0) return NULL; if (k->overwritten) { (*idx)++; continue; } if (__journal_key_cmp(btree_id, level, pos, k) <= 0) return k->k; (*idx)++; iters++; if (iters == 10) { *idx = 0; goto search; } } return NULL; } struct bkey_i *bch2_journal_keys_peek_slot(struct bch_fs *c, enum btree_id btree_id, unsigned level, struct bpos pos) { size_t idx = 0; return bch2_journal_keys_peek_upto(c, btree_id, level, pos, pos, &idx); } static void journal_iter_verify(struct journal_iter *iter) { struct journal_keys *keys = iter->keys; size_t gap_size = keys->size - keys->nr; BUG_ON(iter->idx >= keys->gap && iter->idx < keys->gap + gap_size); if (iter->idx < keys->size) { struct journal_key *k = keys->data + iter->idx; int cmp = cmp_int(k->btree_id, iter->btree_id) ?: cmp_int(k->level, iter->level); BUG_ON(cmp < 0); } } static void journal_iters_fix(struct bch_fs *c) { struct journal_keys *keys = &c->journal_keys; /* The key we just inserted is immediately before the gap: */ size_t gap_end = keys->gap + (keys->size - keys->nr); struct journal_key *new_key = &keys->data[keys->gap - 1]; struct journal_iter *iter; /* * If an iterator points one after the key we just inserted, decrement * the iterator so it points at the key we just inserted - if the * decrement was unnecessary, bch2_btree_and_journal_iter_peek() will * handle that: */ list_for_each_entry(iter, &c->journal_iters, list) { journal_iter_verify(iter); if (iter->idx == gap_end && new_key->btree_id == iter->btree_id && new_key->level == iter->level) iter->idx = keys->gap - 1; journal_iter_verify(iter); } } static void journal_iters_move_gap(struct bch_fs *c, size_t old_gap, size_t new_gap) { struct journal_keys *keys = &c->journal_keys; struct journal_iter *iter; size_t gap_size = keys->size - keys->nr; list_for_each_entry(iter, &c->journal_iters, list) { if (iter->idx > old_gap) iter->idx -= gap_size; if (iter->idx >= new_gap) iter->idx += gap_size; } } int bch2_journal_key_insert_take(struct bch_fs *c, enum btree_id id, unsigned level, struct bkey_i *k) { struct journal_key n = { .btree_id = id, .level = level, .k = k, .allocated = true, /* * Ensure these keys are done last by journal replay, to unblock * journal reclaim: */ .journal_seq = U32_MAX, }; struct journal_keys *keys = &c->journal_keys; size_t idx = bch2_journal_key_search(keys, id, level, k->k.p); BUG_ON(test_bit(BCH_FS_rw, &c->flags)); if (idx < keys->size && journal_key_cmp(&n, &keys->data[idx]) == 0) { if (keys->data[idx].allocated) kfree(keys->data[idx].k); keys->data[idx] = n; return 0; } if (idx > keys->gap) idx -= keys->size - keys->nr; size_t old_gap = keys->gap; if (keys->nr == keys->size) { journal_iters_move_gap(c, old_gap, keys->size); old_gap = keys->size; struct journal_keys new_keys = { .nr = keys->nr, .size = max_t(size_t, keys->size, 8) * 2, }; new_keys.data = kvmalloc_array(new_keys.size, sizeof(new_keys.data[0]), GFP_KERNEL); if (!new_keys.data) { bch_err(c, "%s: error allocating new key array (size %zu)", __func__, new_keys.size); return -BCH_ERR_ENOMEM_journal_key_insert; } /* Since @keys was full, there was no gap: */ memcpy(new_keys.data, keys->data, sizeof(keys->data[0]) * keys->nr); kvfree(keys->data); keys->data = new_keys.data; keys->nr = new_keys.nr; keys->size = new_keys.size; /* And now the gap is at the end: */ keys->gap = keys->nr; } journal_iters_move_gap(c, old_gap, idx); move_gap(keys, idx); keys->nr++; keys->data[keys->gap++] = n; journal_iters_fix(c); return 0; } /* * Can only be used from the recovery thread while we're still RO - can't be * used once we've got RW, as journal_keys is at that point used by multiple * threads: */ int bch2_journal_key_insert(struct bch_fs *c, enum btree_id id, unsigned level, struct bkey_i *k) { struct bkey_i *n; int ret; n = kmalloc(bkey_bytes(&k->k), GFP_KERNEL); if (!n) return -BCH_ERR_ENOMEM_journal_key_insert; bkey_copy(n, k); ret = bch2_journal_key_insert_take(c, id, level, n); if (ret) kfree(n); return ret; } int bch2_journal_key_delete(struct bch_fs *c, enum btree_id id, unsigned level, struct bpos pos) { struct bkey_i whiteout; bkey_init(&whiteout.k); whiteout.k.p = pos; return bch2_journal_key_insert(c, id, level, &whiteout); } bool bch2_key_deleted_in_journal(struct btree_trans *trans, enum btree_id btree, unsigned level, struct bpos pos) { struct journal_keys *keys = &trans->c->journal_keys; size_t idx = bch2_journal_key_search(keys, btree, level, pos); if (!trans->journal_replay_not_finished) return false; return (idx < keys->size && keys->data[idx].btree_id == btree && keys->data[idx].level == level && bpos_eq(keys->data[idx].k->k.p, pos) && bkey_deleted(&keys->data[idx].k->k)); } void bch2_journal_key_overwritten(struct bch_fs *c, enum btree_id btree, unsigned level, struct bpos pos) { struct journal_keys *keys = &c->journal_keys; size_t idx = bch2_journal_key_search(keys, btree, level, pos); if (idx < keys->size && keys->data[idx].btree_id == btree && keys->data[idx].level == level && bpos_eq(keys->data[idx].k->k.p, pos)) keys->data[idx].overwritten = true; } static void bch2_journal_iter_advance(struct journal_iter *iter) { if (iter->idx < iter->keys->size) { iter->idx++; if (iter->idx == iter->keys->gap) iter->idx += iter->keys->size - iter->keys->nr; } } static struct bkey_s_c bch2_journal_iter_peek(struct journal_iter *iter) { journal_iter_verify(iter); while (iter->idx < iter->keys->size) { struct journal_key *k = iter->keys->data + iter->idx; int cmp = cmp_int(k->btree_id, iter->btree_id) ?: cmp_int(k->level, iter->level); if (cmp > 0) break; BUG_ON(cmp); if (!k->overwritten) return bkey_i_to_s_c(k->k); bch2_journal_iter_advance(iter); } return bkey_s_c_null; } static void bch2_journal_iter_exit(struct journal_iter *iter) { list_del(&iter->list); } static void bch2_journal_iter_init(struct bch_fs *c, struct journal_iter *iter, enum btree_id id, unsigned level, struct bpos pos) { iter->btree_id = id; iter->level = level; iter->keys = &c->journal_keys; iter->idx = bch2_journal_key_search(&c->journal_keys, id, level, pos); journal_iter_verify(iter); } static struct bkey_s_c bch2_journal_iter_peek_btree(struct btree_and_journal_iter *iter) { return bch2_btree_node_iter_peek_unpack(&iter->node_iter, iter->b, &iter->unpacked); } static void bch2_journal_iter_advance_btree(struct btree_and_journal_iter *iter) { bch2_btree_node_iter_advance(&iter->node_iter, iter->b); } void bch2_btree_and_journal_iter_advance(struct btree_and_journal_iter *iter) { if (bpos_eq(iter->pos, SPOS_MAX)) iter->at_end = true; else iter->pos = bpos_successor(iter->pos); } static void btree_and_journal_iter_prefetch(struct btree_and_journal_iter *_iter) { struct btree_and_journal_iter iter = *_iter; struct bch_fs *c = iter.trans->c; unsigned level = iter.journal.level; struct bkey_buf tmp; unsigned nr = test_bit(BCH_FS_started, &c->flags) ? (level > 1 ? 0 : 2) : (level > 1 ? 1 : 16); iter.prefetch = false; bch2_bkey_buf_init(&tmp); while (nr--) { bch2_btree_and_journal_iter_advance(&iter); struct bkey_s_c k = bch2_btree_and_journal_iter_peek(&iter); if (!k.k) break; bch2_bkey_buf_reassemble(&tmp, c, k); bch2_btree_node_prefetch(iter.trans, NULL, tmp.k, iter.journal.btree_id, level - 1); } bch2_bkey_buf_exit(&tmp, c); } struct bkey_s_c bch2_btree_and_journal_iter_peek(struct btree_and_journal_iter *iter) { struct bkey_s_c btree_k, journal_k = bkey_s_c_null, ret; if (iter->prefetch && iter->journal.level) btree_and_journal_iter_prefetch(iter); again: if (iter->at_end) return bkey_s_c_null; while ((btree_k = bch2_journal_iter_peek_btree(iter)).k && bpos_lt(btree_k.k->p, iter->pos)) bch2_journal_iter_advance_btree(iter); if (iter->trans->journal_replay_not_finished) while ((journal_k = bch2_journal_iter_peek(&iter->journal)).k && bpos_lt(journal_k.k->p, iter->pos)) bch2_journal_iter_advance(&iter->journal); ret = journal_k.k && (!btree_k.k || bpos_le(journal_k.k->p, btree_k.k->p)) ? journal_k : btree_k; if (ret.k && iter->b && bpos_gt(ret.k->p, iter->b->data->max_key)) ret = bkey_s_c_null; if (ret.k) { iter->pos = ret.k->p; if (bkey_deleted(ret.k)) { bch2_btree_and_journal_iter_advance(iter); goto again; } } else { iter->pos = SPOS_MAX; iter->at_end = true; } return ret; } void bch2_btree_and_journal_iter_exit(struct btree_and_journal_iter *iter) { bch2_journal_iter_exit(&iter->journal); } void __bch2_btree_and_journal_iter_init_node_iter(struct btree_trans *trans, struct btree_and_journal_iter *iter, struct btree *b, struct btree_node_iter node_iter, struct bpos pos) { memset(iter, 0, sizeof(*iter)); iter->trans = trans; iter->b = b; iter->node_iter = node_iter; iter->pos = b->data->min_key; iter->at_end = false; INIT_LIST_HEAD(&iter->journal.list); if (trans->journal_replay_not_finished) { bch2_journal_iter_init(trans->c, &iter->journal, b->c.btree_id, b->c.level, pos); if (!test_bit(BCH_FS_may_go_rw, &trans->c->flags)) list_add(&iter->journal.list, &trans->c->journal_iters); } } /* * this version is used by btree_gc before filesystem has gone RW and * multithreaded, so uses the journal_iters list: */ void bch2_btree_and_journal_iter_init_node_iter(struct btree_trans *trans, struct btree_and_journal_iter *iter, struct btree *b) { struct btree_node_iter node_iter; bch2_btree_node_iter_init_from_start(&node_iter, b); __bch2_btree_and_journal_iter_init_node_iter(trans, iter, b, node_iter, b->data->min_key); } /* sort and dedup all keys in the journal: */ void bch2_journal_entries_free(struct bch_fs *c) { struct journal_replay **i; struct genradix_iter iter; genradix_for_each(&c->journal_entries, iter, i) kvfree(*i); genradix_free(&c->journal_entries); } /* * When keys compare equal, oldest compares first: */ static int journal_sort_key_cmp(const void *_l, const void *_r) { const struct journal_key *l = _l; const struct journal_key *r = _r; return journal_key_cmp(l, r) ?: cmp_int(l->journal_seq, r->journal_seq) ?: cmp_int(l->journal_offset, r->journal_offset); } void bch2_journal_keys_put(struct bch_fs *c) { struct journal_keys *keys = &c->journal_keys; BUG_ON(atomic_read(&keys->ref) <= 0); if (!atomic_dec_and_test(&keys->ref)) return; move_gap(keys, keys->nr); darray_for_each(*keys, i) if (i->allocated) kfree(i->k); kvfree(keys->data); keys->data = NULL; keys->nr = keys->gap = keys->size = 0; bch2_journal_entries_free(c); } static void __journal_keys_sort(struct journal_keys *keys) { sort(keys->data, keys->nr, sizeof(keys->data[0]), journal_sort_key_cmp, NULL); cond_resched(); struct journal_key *dst = keys->data; darray_for_each(*keys, src) { /* * We don't accumulate accounting keys here because we have to * compare each individual accounting key against the version in * the btree during replay: */ if (src->k->k.type != KEY_TYPE_accounting && src + 1 < &darray_top(*keys) && !journal_key_cmp(src, src + 1)) continue; *dst++ = *src; } keys->nr = dst - keys->data; } int bch2_journal_keys_sort(struct bch_fs *c) { struct genradix_iter iter; struct journal_replay *i, **_i; struct journal_keys *keys = &c->journal_keys; size_t nr_read = 0; genradix_for_each(&c->journal_entries, iter, _i) { i = *_i; if (journal_replay_ignore(i)) continue; cond_resched(); for_each_jset_key(k, entry, &i->j) { struct journal_key n = (struct journal_key) { .btree_id = entry->btree_id, .level = entry->level, .k = k, .journal_seq = le64_to_cpu(i->j.seq), .journal_offset = k->_data - i->j._data, }; if (darray_push(keys, n)) { __journal_keys_sort(keys); if (keys->nr * 8 > keys->size * 7) { bch_err(c, "Too many journal keys for slowpath; have %zu compacted, buf size %zu, processed %zu keys at seq %llu", keys->nr, keys->size, nr_read, le64_to_cpu(i->j.seq)); return -BCH_ERR_ENOMEM_journal_keys_sort; } BUG_ON(darray_push(keys, n)); } nr_read++; } } __journal_keys_sort(keys); keys->gap = keys->nr; bch_verbose(c, "Journal keys: %zu read, %zu after sorting and compacting", nr_read, keys->nr); return 0; } void bch2_shoot_down_journal_keys(struct bch_fs *c, enum btree_id btree, unsigned level_min, unsigned level_max, struct bpos start, struct bpos end) { struct journal_keys *keys = &c->journal_keys; size_t dst = 0; move_gap(keys, keys->nr); darray_for_each(*keys, i) if (!(i->btree_id == btree && i->level >= level_min && i->level <= level_max && bpos_ge(i->k->k.p, start) && bpos_le(i->k->k.p, end))) keys->data[dst++] = *i; keys->nr = keys->gap = dst; } void bch2_journal_keys_dump(struct bch_fs *c) { struct journal_keys *keys = &c->journal_keys; struct printbuf buf = PRINTBUF; pr_info("%zu keys:", keys->nr); move_gap(keys, keys->nr); darray_for_each(*keys, i) { printbuf_reset(&buf); bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(i->k)); pr_err("%s l=%u %s", bch2_btree_id_str(i->btree_id), i->level, buf.buf); } printbuf_exit(&buf); }
19 28 1 19 27 5 22 22 21 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 // SPDX-License-Identifier: GPL-2.0-or-later /* * lib/textsearch.c Generic text search interface * * Authors: Thomas Graf <tgraf@suug.ch> * Pablo Neira Ayuso <pablo@netfilter.org> * * ========================================================================== */ /** * DOC: ts_intro * INTRODUCTION * * The textsearch infrastructure provides text searching facilities for * both linear and non-linear data. Individual search algorithms are * implemented in modules and chosen by the user. * * ARCHITECTURE * * .. code-block:: none * * User * +----------------+ * | finish()|<--------------(6)-----------------+ * |get_next_block()|<--------------(5)---------------+ | * | | Algorithm | | * | | +------------------------------+ * | | | init() find() destroy() | * | | +------------------------------+ * | | Core API ^ ^ ^ * | | +---------------+ (2) (4) (8) * | (1)|----->| prepare() |---+ | | * | (3)|----->| find()/next() |-----------+ | * | (7)|----->| destroy() |----------------------+ * +----------------+ +---------------+ * * (1) User configures a search by calling textsearch_prepare() specifying * the search parameters such as the pattern and algorithm name. * (2) Core requests the algorithm to allocate and initialize a search * configuration according to the specified parameters. * (3) User starts the search(es) by calling textsearch_find() or * textsearch_next() to fetch subsequent occurrences. A state variable * is provided to the algorithm to store persistent variables. * (4) Core eventually resets the search offset and forwards the find() * request to the algorithm. * (5) Algorithm calls get_next_block() provided by the user continuously * to fetch the data to be searched in block by block. * (6) Algorithm invokes finish() after the last call to get_next_block * to clean up any leftovers from get_next_block. (Optional) * (7) User destroys the configuration by calling textsearch_destroy(). * (8) Core notifies the algorithm to destroy algorithm specific * allocations. (Optional) * * USAGE * * Before a search can be performed, a configuration must be created * by calling textsearch_prepare() specifying the searching algorithm, * the pattern to look for and flags. As a flag, you can set TS_IGNORECASE * to perform case insensitive matching. But it might slow down * performance of algorithm, so you should use it at own your risk. * The returned configuration may then be used for an arbitrary * amount of times and even in parallel as long as a separate struct * ts_state variable is provided to every instance. * * The actual search is performed by either calling * textsearch_find_continuous() for linear data or by providing * an own get_next_block() implementation and * calling textsearch_find(). Both functions return * the position of the first occurrence of the pattern or UINT_MAX if * no match was found. Subsequent occurrences can be found by calling * textsearch_next() regardless of the linearity of the data. * * Once you're done using a configuration it must be given back via * textsearch_destroy. * * EXAMPLE:: * * int pos; * struct ts_config *conf; * struct ts_state state; * const char *pattern = "chicken"; * const char *example = "We dance the funky chicken"; * * conf = textsearch_prepare("kmp", pattern, strlen(pattern), * GFP_KERNEL, TS_AUTOLOAD); * if (IS_ERR(conf)) { * err = PTR_ERR(conf); * goto errout; * } * * pos = textsearch_find_continuous(conf, &state, example, strlen(example)); * if (pos != UINT_MAX) * panic("Oh my god, dancing chickens at %d\n", pos); * * textsearch_destroy(conf); */ /* ========================================================================== */ #include <linux/module.h> #include <linux/types.h> #include <linux/string.h> #include <linux/init.h> #include <linux/rculist.h> #include <linux/rcupdate.h> #include <linux/err.h> #include <linux/textsearch.h> #include <linux/slab.h> static LIST_HEAD(ts_ops); static DEFINE_SPINLOCK(ts_mod_lock); static inline struct ts_ops *lookup_ts_algo(const char *name) { struct ts_ops *o; rcu_read_lock(); list_for_each_entry_rcu(o, &ts_ops, list) { if (!strcmp(name, o->name)) { if (!try_module_get(o->owner)) o = NULL; rcu_read_unlock(); return o; } } rcu_read_unlock(); return NULL; } /** * textsearch_register - register a textsearch module * @ops: operations lookup table * * This function must be called by textsearch modules to announce * their presence. The specified &@ops must have %name set to a * unique identifier and the callbacks find(), init(), get_pattern(), * and get_pattern_len() must be implemented. * * Returns 0 or -EEXISTS if another module has already registered * with same name. */ int textsearch_register(struct ts_ops *ops) { int err = -EEXIST; struct ts_ops *o; if (ops->name == NULL || ops->find == NULL || ops->init == NULL || ops->get_pattern == NULL || ops->get_pattern_len == NULL) return -EINVAL; spin_lock(&ts_mod_lock); list_for_each_entry(o, &ts_ops, list) { if (!strcmp(ops->name, o->name)) goto errout; } list_add_tail_rcu(&ops->list, &ts_ops); err = 0; errout: spin_unlock(&ts_mod_lock); return err; } EXPORT_SYMBOL(textsearch_register); /** * textsearch_unregister - unregister a textsearch module * @ops: operations lookup table * * This function must be called by textsearch modules to announce * their disappearance for examples when the module gets unloaded. * The &ops parameter must be the same as the one during the * registration. * * Returns 0 on success or -ENOENT if no matching textsearch * registration was found. */ int textsearch_unregister(struct ts_ops *ops) { int err = 0; struct ts_ops *o; spin_lock(&ts_mod_lock); list_for_each_entry(o, &ts_ops, list) { if (o == ops) { list_del_rcu(&o->list); goto out; } } err = -ENOENT; out: spin_unlock(&ts_mod_lock); return err; } EXPORT_SYMBOL(textsearch_unregister); struct ts_linear_state { unsigned int len; const void *data; }; static unsigned int get_linear_data(unsigned int consumed, const u8 **dst, struct ts_config *conf, struct ts_state *state) { struct ts_linear_state *st = (struct ts_linear_state *) state->cb; if (likely(consumed < st->len)) { *dst = st->data + consumed; return st->len - consumed; } return 0; } /** * textsearch_find_continuous - search a pattern in continuous/linear data * @conf: search configuration * @state: search state * @data: data to search in * @len: length of data * * A simplified version of textsearch_find() for continuous/linear data. * Call textsearch_next() to retrieve subsequent matches. * * Returns the position of first occurrence of the pattern or * %UINT_MAX if no occurrence was found. */ unsigned int textsearch_find_continuous(struct ts_config *conf, struct ts_state *state, const void *data, unsigned int len) { struct ts_linear_state *st = (struct ts_linear_state *) state->cb; conf->get_next_block = get_linear_data; st->data = data; st->len = len; return textsearch_find(conf, state); } EXPORT_SYMBOL(textsearch_find_continuous); /** * textsearch_prepare - Prepare a search * @algo: name of search algorithm * @pattern: pattern data * @len: length of pattern * @gfp_mask: allocation mask * @flags: search flags * * Looks up the search algorithm module and creates a new textsearch * configuration for the specified pattern. * * Note: The format of the pattern may not be compatible between * the various search algorithms. * * Returns a new textsearch configuration according to the specified * parameters or a ERR_PTR(). If a zero length pattern is passed, this * function returns EINVAL. */ struct ts_config *textsearch_prepare(const char *algo, const void *pattern, unsigned int len, gfp_t gfp_mask, int flags) { int err = -ENOENT; struct ts_config *conf; struct ts_ops *ops; if (len == 0) return ERR_PTR(-EINVAL); ops = lookup_ts_algo(algo); #ifdef CONFIG_MODULES /* * Why not always autoload you may ask. Some users are * in a situation where requesting a module may deadlock, * especially when the module is located on a NFS mount. */ if (ops == NULL && flags & TS_AUTOLOAD) { request_module("ts_%s", algo); ops = lookup_ts_algo(algo); } #endif if (ops == NULL) goto errout; conf = ops->init(pattern, len, gfp_mask, flags); if (IS_ERR(conf)) { err = PTR_ERR(conf); goto errout; } conf->ops = ops; return conf; errout: if (ops) module_put(ops->owner); return ERR_PTR(err); } EXPORT_SYMBOL(textsearch_prepare); /** * textsearch_destroy - destroy a search configuration * @conf: search configuration * * Releases all references of the configuration and frees * up the memory. */ void textsearch_destroy(struct ts_config *conf) { if (conf->ops) { if (conf->ops->destroy) conf->ops->destroy(conf); module_put(conf->ops->owner); } kfree(conf); } EXPORT_SYMBOL(textsearch_destroy);
267 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 // SPDX-License-Identifier: GPL-2.0-or-later /* * Information interface for ALSA driver * Copyright (c) by Jaroslav Kysela <perex@perex.cz> */ #include <linux/slab.h> #include <linux/time.h> #include <linux/string.h> #include <linux/export.h> #include <sound/core.h> #include <sound/minors.h> #include <sound/info.h> #include <linux/utsname.h> #include <linux/mutex.h> /* * OSS compatible part */ static DEFINE_MUTEX(strings); static char *snd_sndstat_strings[SNDRV_CARDS][SNDRV_OSS_INFO_DEV_COUNT]; int snd_oss_info_register(int dev, int num, char *string) { char *x; if (snd_BUG_ON(dev < 0 || dev >= SNDRV_OSS_INFO_DEV_COUNT)) return -ENXIO; if (snd_BUG_ON(num < 0 || num >= SNDRV_CARDS)) return -ENXIO; guard(mutex)(&strings); if (string == NULL) { x = snd_sndstat_strings[num][dev]; kfree(x); x = NULL; } else { x = kstrdup(string, GFP_KERNEL); if (x == NULL) return -ENOMEM; } snd_sndstat_strings[num][dev] = x; return 0; } EXPORT_SYMBOL(snd_oss_info_register); static int snd_sndstat_show_strings(struct snd_info_buffer *buf, char *id, int dev) { int idx, ok = -1; char *str; snd_iprintf(buf, "\n%s:", id); guard(mutex)(&strings); for (idx = 0; idx < SNDRV_CARDS; idx++) { str = snd_sndstat_strings[idx][dev]; if (str) { if (ok < 0) { snd_iprintf(buf, "\n"); ok++; } snd_iprintf(buf, "%i: %s\n", idx, str); } } if (ok < 0) snd_iprintf(buf, " NOT ENABLED IN CONFIG\n"); return ok; } static void snd_sndstat_proc_read(struct snd_info_entry *entry, struct snd_info_buffer *buffer) { snd_iprintf(buffer, "Sound Driver:3.8.1a-980706 (ALSA emulation code)\n"); snd_iprintf(buffer, "Kernel: %s %s %s %s %s\n", init_utsname()->sysname, init_utsname()->nodename, init_utsname()->release, init_utsname()->version, init_utsname()->machine); snd_iprintf(buffer, "Config options: 0\n"); snd_iprintf(buffer, "\nInstalled drivers: \n"); snd_iprintf(buffer, "Type 10: ALSA emulation\n"); snd_iprintf(buffer, "\nCard config: \n"); snd_card_info_read_oss(buffer); snd_sndstat_show_strings(buffer, "Audio devices", SNDRV_OSS_INFO_DEV_AUDIO); snd_sndstat_show_strings(buffer, "Synth devices", SNDRV_OSS_INFO_DEV_SYNTH); snd_sndstat_show_strings(buffer, "Midi devices", SNDRV_OSS_INFO_DEV_MIDI); snd_sndstat_show_strings(buffer, "Timers", SNDRV_OSS_INFO_DEV_TIMERS); snd_sndstat_show_strings(buffer, "Mixers", SNDRV_OSS_INFO_DEV_MIXERS); } int __init snd_info_minor_register(void) { struct snd_info_entry *entry; memset(snd_sndstat_strings, 0, sizeof(snd_sndstat_strings)); entry = snd_info_create_module_entry(THIS_MODULE, "sndstat", snd_oss_root); if (!entry) return -ENOMEM; entry->c.text.read = snd_sndstat_proc_read; return snd_info_register(entry); /* freed in error path */ }
31 27 1 26 5 31 22 26 26 1 5 1 7 7 1 6 10 10 11 12 12 12 11 11 11 11 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 // SPDX-License-Identifier: GPL-2.0-or-later /* * net/sched/sch_skbprio.c SKB Priority Queue. * * Authors: Nishanth Devarajan, <ndev2021@gmail.com> * Cody Doucette, <doucette@bu.edu> * original idea by Michel Machado, Cody Doucette, and Qiaobin Fu */ #include <linux/string.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/skbuff.h> #include <net/pkt_sched.h> #include <net/sch_generic.h> #include <net/inet_ecn.h> /* SKB Priority Queue * ================================= * * Skbprio (SKB Priority Queue) is a queueing discipline that prioritizes * packets according to their skb->priority field. Under congestion, * Skbprio drops already-enqueued lower priority packets to make space * available for higher priority packets; it was conceived as a solution * for denial-of-service defenses that need to route packets with different * priorities as a mean to overcome DoS attacks. */ struct skbprio_sched_data { /* Queue state. */ struct sk_buff_head qdiscs[SKBPRIO_MAX_PRIORITY]; struct gnet_stats_queue qstats[SKBPRIO_MAX_PRIORITY]; u16 highest_prio; u16 lowest_prio; }; static u16 calc_new_high_prio(const struct skbprio_sched_data *q) { int prio; for (prio = q->highest_prio - 1; prio >= q->lowest_prio; prio--) { if (!skb_queue_empty(&q->qdiscs[prio])) return prio; } /* SKB queue is empty, return 0 (default highest priority setting). */ return 0; } static u16 calc_new_low_prio(const struct skbprio_sched_data *q) { int prio; for (prio = q->lowest_prio + 1; prio <= q->highest_prio; prio++) { if (!skb_queue_empty(&q->qdiscs[prio])) return prio; } /* SKB queue is empty, return SKBPRIO_MAX_PRIORITY - 1 * (default lowest priority setting). */ return SKBPRIO_MAX_PRIORITY - 1; } static int skbprio_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) { const unsigned int max_priority = SKBPRIO_MAX_PRIORITY - 1; struct skbprio_sched_data *q = qdisc_priv(sch); struct sk_buff_head *qdisc; struct sk_buff_head *lp_qdisc; struct sk_buff *to_drop; u16 prio, lp; /* Obtain the priority of @skb. */ prio = min(skb->priority, max_priority); qdisc = &q->qdiscs[prio]; /* sch->limit can change under us from skbprio_change() */ if (sch->q.qlen < READ_ONCE(sch->limit)) { __skb_queue_tail(qdisc, skb); qdisc_qstats_backlog_inc(sch, skb); q->qstats[prio].backlog += qdisc_pkt_len(skb); /* Check to update highest and lowest priorities. */ if (prio > q->highest_prio) q->highest_prio = prio; if (prio < q->lowest_prio) q->lowest_prio = prio; sch->q.qlen++; return NET_XMIT_SUCCESS; } /* If this packet has the lowest priority, drop it. */ lp = q->lowest_prio; if (prio <= lp) { q->qstats[prio].drops++; q->qstats[prio].overlimits++; return qdisc_drop(skb, sch, to_free); } __skb_queue_tail(qdisc, skb); qdisc_qstats_backlog_inc(sch, skb); q->qstats[prio].backlog += qdisc_pkt_len(skb); /* Drop the packet at the tail of the lowest priority qdisc. */ lp_qdisc = &q->qdiscs[lp]; to_drop = __skb_dequeue_tail(lp_qdisc); BUG_ON(!to_drop); qdisc_qstats_backlog_dec(sch, to_drop); qdisc_drop(to_drop, sch, to_free); q->qstats[lp].backlog -= qdisc_pkt_len(to_drop); q->qstats[lp].drops++; q->qstats[lp].overlimits++; /* Check to update highest and lowest priorities. */ if (skb_queue_empty(lp_qdisc)) { if (q->lowest_prio == q->highest_prio) { /* The incoming packet is the only packet in queue. */ BUG_ON(sch->q.qlen != 1); q->lowest_prio = prio; q->highest_prio = prio; } else { q->lowest_prio = calc_new_low_prio(q); } } if (prio > q->highest_prio) q->highest_prio = prio; return NET_XMIT_CN; } static struct sk_buff *skbprio_dequeue(struct Qdisc *sch) { struct skbprio_sched_data *q = qdisc_priv(sch); struct sk_buff_head *hpq = &q->qdiscs[q->highest_prio]; struct sk_buff *skb = __skb_dequeue(hpq); if (unlikely(!skb)) return NULL; sch->q.qlen--; qdisc_qstats_backlog_dec(sch, skb); qdisc_bstats_update(sch, skb); q->qstats[q->highest_prio].backlog -= qdisc_pkt_len(skb); /* Update highest priority field. */ if (skb_queue_empty(hpq)) { if (q->lowest_prio == q->highest_prio) { BUG_ON(sch->q.qlen); q->highest_prio = 0; q->lowest_prio = SKBPRIO_MAX_PRIORITY - 1; } else { q->highest_prio = calc_new_high_prio(q); } } return skb; } static int skbprio_change(struct Qdisc *sch, struct nlattr *opt, struct netlink_ext_ack *extack) { struct tc_skbprio_qopt *ctl = nla_data(opt); if (opt->nla_len != nla_attr_size(sizeof(*ctl))) return -EINVAL; WRITE_ONCE(sch->limit, ctl->limit); return 0; } static int skbprio_init(struct Qdisc *sch, struct nlattr *opt, struct netlink_ext_ack *extack) { struct skbprio_sched_data *q = qdisc_priv(sch); int prio; /* Initialise all queues, one for each possible priority. */ for (prio = 0; prio < SKBPRIO_MAX_PRIORITY; prio++) __skb_queue_head_init(&q->qdiscs[prio]); memset(&q->qstats, 0, sizeof(q->qstats)); q->highest_prio = 0; q->lowest_prio = SKBPRIO_MAX_PRIORITY - 1; sch->limit = 64; if (!opt) return 0; return skbprio_change(sch, opt, extack); } static int skbprio_dump(struct Qdisc *sch, struct sk_buff *skb) { struct tc_skbprio_qopt opt; opt.limit = READ_ONCE(sch->limit); if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt)) return -1; return skb->len; } static void skbprio_reset(struct Qdisc *sch) { struct skbprio_sched_data *q = qdisc_priv(sch); int prio; for (prio = 0; prio < SKBPRIO_MAX_PRIORITY; prio++) __skb_queue_purge(&q->qdiscs[prio]); memset(&q->qstats, 0, sizeof(q->qstats)); q->highest_prio = 0; q->lowest_prio = SKBPRIO_MAX_PRIORITY - 1; } static void skbprio_destroy(struct Qdisc *sch) { struct skbprio_sched_data *q = qdisc_priv(sch); int prio; for (prio = 0; prio < SKBPRIO_MAX_PRIORITY; prio++) __skb_queue_purge(&q->qdiscs[prio]); } static struct Qdisc *skbprio_leaf(struct Qdisc *sch, unsigned long arg) { return NULL; } static unsigned long skbprio_find(struct Qdisc *sch, u32 classid) { return 0; } static int skbprio_dump_class(struct Qdisc *sch, unsigned long cl, struct sk_buff *skb, struct tcmsg *tcm) { tcm->tcm_handle |= TC_H_MIN(cl); return 0; } static int skbprio_dump_class_stats(struct Qdisc *sch, unsigned long cl, struct gnet_dump *d) { struct skbprio_sched_data *q = qdisc_priv(sch); if (gnet_stats_copy_queue(d, NULL, &q->qstats[cl - 1], q->qstats[cl - 1].qlen) < 0) return -1; return 0; } static void skbprio_walk(struct Qdisc *sch, struct qdisc_walker *arg) { unsigned int i; if (arg->stop) return; for (i = 0; i < SKBPRIO_MAX_PRIORITY; i++) { if (!tc_qdisc_stats_dump(sch, i + 1, arg)) break; } } static const struct Qdisc_class_ops skbprio_class_ops = { .leaf = skbprio_leaf, .find = skbprio_find, .dump = skbprio_dump_class, .dump_stats = skbprio_dump_class_stats, .walk = skbprio_walk, }; static struct Qdisc_ops skbprio_qdisc_ops __read_mostly = { .cl_ops = &skbprio_class_ops, .id = "skbprio", .priv_size = sizeof(struct skbprio_sched_data), .enqueue = skbprio_enqueue, .dequeue = skbprio_dequeue, .peek = qdisc_peek_dequeued, .init = skbprio_init, .reset = skbprio_reset, .change = skbprio_change, .dump = skbprio_dump, .destroy = skbprio_destroy, .owner = THIS_MODULE, }; MODULE_ALIAS_NET_SCH("skbprio"); static int __init skbprio_module_init(void) { return register_qdisc(&skbprio_qdisc_ops); } static void __exit skbprio_module_exit(void) { unregister_qdisc(&skbprio_qdisc_ops); } module_init(skbprio_module_init) module_exit(skbprio_module_exit) MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("SKB priority based scheduling qdisc");
11 11 11 11 1 1 1 10 10 4 2 10 10 10 1 1 1 1 1 1 1 1 1 1 1 1 1 10 10 10 10 20 20 10 10 10 10 10 1 1 1 1 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 3422 3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434 3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456 3457 3458 3459 3460 3461 3462 3463 3464 3465 3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479 3480 3481 3482 3483 3484 3485 3486 3487 3488 3489 3490 3491 3492 3493 3494 3495 3496 3497 3498 3499 3500 3501 3502 3503 3504 3505 3506 3507 3508 3509 3510 3511 3512 3513 3514 3515 3516 3517 3518 3519 3520 3521 3522 3523 3524 3525 3526 3527 3528 3529 3530 3531 3532 3533 3534 3535 3536 3537 3538 3539 3540 3541 3542 3543 3544 3545 3546 3547 3548 3549 3550 3551 3552 3553 3554 3555 3556 3557 3558 3559 3560 3561 3562 3563 3564 3565 3566 3567 3568 3569 3570 3571 3572 3573 3574 3575 3576 3577 3578 3579 3580 3581 3582 3583 3584 3585 3586 3587 3588 3589 3590 3591 3592 3593 3594 3595 3596 3597 3598 3599 3600 3601 3602 3603 3604 3605 3606 3607 3608 3609 3610 3611 3612 3613 3614 3615 3616 3617 3618 3619 3620 3621 3622 3623 3624 3625 3626 3627 3628 3629 3630 3631 3632 3633 3634 3635 3636 3637 3638 3639 3640 3641 3642 3643 3644 3645 3646 3647 3648 3649 3650 3651 3652 3653 3654 3655 3656 3657 3658 3659 3660 3661 3662 3663 3664 3665 3666 3667 3668 3669 3670 3671 3672 3673 3674 3675 3676 3677 3678 3679 3680 3681 3682 3683 3684 3685 3686 3687 3688 3689 3690 3691 3692 3693 3694 3695 3696 3697 3698 3699 3700 3701 3702 3703 3704 3705 3706 3707 3708 3709 3710 3711 3712 3713 3714 3715 3716 3717 3718 3719 3720 3721 3722 3723 3724 3725 3726 3727 3728 3729 3730 3731 3732 3733 3734 3735 3736 3737 3738 3739 3740 3741 3742 3743 3744 3745 3746 3747 3748 3749 3750 3751 3752 3753 3754 3755 3756 3757 3758 3759 3760 3761 3762 3763 3764 3765 3766 3767 3768 3769 3770 3771 3772 3773 3774 3775 3776 3777 3778 3779 3780 3781 3782 3783 3784 3785 3786 3787 3788 3789 3790 3791 3792 3793 3794 3795 3796 3797 3798 3799 3800 3801 3802 3803 3804 3805 3806 3807 3808 3809 3810 3811 3812 3813 3814 3815 3816 3817 3818 3819 3820 3821 3822 3823 3824 3825 3826 3827 3828 3829 3830 3831 3832 3833 3834 3835 3836 3837 3838 3839 3840 3841 3842 3843 3844 3845 3846 3847 3848 3849 3850 3851 3852 3853 3854 3855 3856 3857 3858 3859 3860 3861 3862 3863 3864 3865 3866 3867 3868 3869 3870 3871 3872 3873 3874 3875 3876 3877 3878 3879 3880 3881 3882 3883 3884 3885 3886 3887 3888 3889 3890 3891 3892 3893 3894 3895 3896 3897 3898 3899 3900 3901 3902 3903 3904 3905 3906 3907 3908 3909 3910 3911 3912 3913 3914 3915 3916 3917 3918 3919 3920 3921 3922 3923 3924 3925 3926 3927 3928 3929 3930 3931 3932 3933 3934 3935 3936 3937 3938 3939 3940 3941 3942 3943 3944 3945 3946 3947 3948 3949 3950 3951 3952 3953 3954 3955 3956 3957 3958 3959 3960 3961 3962 3963 3964 3965 3966 3967 3968 3969 3970 3971 3972 3973 3974 3975 3976 3977 3978 3979 3980 3981 3982 3983 3984 3985 3986 3987 3988 3989 3990 3991 3992 3993 3994 3995 3996 3997 3998 3999 4000 4001 4002 4003 4004 4005 4006 4007 4008 4009 4010 4011 4012 4013 4014 4015 4016 4017 4018 4019 4020 4021 4022 4023 4024 4025 4026 4027 4028 4029 4030 4031 4032 4033 4034 4035 4036 4037 4038 4039 4040 4041 4042 4043 4044 4045 4046 4047 4048 4049 4050 4051 4052 4053 4054 4055 4056 4057 4058 4059 4060 4061 4062 4063 4064 4065 4066 4067 4068 4069 4070 4071 4072 4073 4074 4075 4076 4077 4078 4079 4080 4081 4082 4083 4084 4085 4086 4087 4088 4089 4090 4091 4092 4093 4094 4095 4096 4097 4098 4099 4100 4101 4102 4103 4104 4105 4106 4107 4108 4109 4110 4111 4112 4113 4114 4115 4116 4117 4118 4119 4120 4121 4122 4123 4124 4125 4126 4127 4128 4129 4130 4131 4132 4133 4134 4135 4136 4137 4138 4139 4140 4141 4142 4143 4144 4145 4146 4147 4148 4149 4150 4151 4152 4153 4154 4155 4156 4157 4158 4159 4160 4161 4162 4163 4164 4165 4166 4167 4168 4169 4170 4171 4172 4173 4174 4175 4176 4177 4178 4179 4180 4181 4182 4183 4184 4185 4186 4187 4188 4189 4190 4191 4192 4193 4194 4195 4196 4197 4198 4199 4200 4201 4202 4203 4204 4205 4206 4207 4208 4209 4210 4211 4212 4213 4214 4215 4216 4217 4218 4219 4220 4221 4222 4223 4224 4225 4226 4227 4228 4229 4230 4231 4232 4233 4234 4235 4236 4237 4238 4239 4240 4241 4242 4243 4244 4245 4246 4247 4248 4249 4250 4251 4252 4253 4254 4255 4256 4257 4258 4259 4260 4261 4262 4263 4264 4265 4266 4267 4268 4269 4270 4271 4272 4273 4274 4275 4276 4277 4278 4279 4280 4281 4282 4283 4284 4285 4286 4287 4288 4289 4290 4291 4292 4293 4294 4295 4296 4297 4298 4299 4300 4301 4302 4303 4304 4305 4306 4307 4308 4309 4310 4311 4312 4313 4314 4315 4316 4317 4318 4319 4320 4321 4322 4323 4324 4325 4326 4327 4328 4329 4330 4331 4332 4333 4334 4335 4336 4337 4338 4339 4340 4341 4342 4343 4344 4345 4346 4347 4348 4349 4350 4351 4352 4353 4354 4355 4356 4357 4358 4359 4360 4361 4362 4363 4364 4365 4366 4367 4368 4369 4370 4371 4372 4373 4374 4375 4376 4377 4378 4379 4380 4381 4382 4383 4384 4385 4386 4387 4388 4389 4390 4391 4392 4393 4394 4395 4396 4397 4398 4399 4400 4401 4402 4403 4404 4405 4406 4407 4408 4409 4410 4411 4412 4413 4414 4415 4416 4417 4418 4419 4420 4421 4422 4423 4424 4425 4426 4427 4428 4429 4430 4431 4432 4433 4434 4435 4436 4437 4438 4439 4440 4441 4442 4443 4444 4445 4446 4447 4448 4449 4450 4451 4452 4453 4454 4455 4456 4457 4458 4459 4460 4461 4462 4463 4464 4465 4466 4467 4468 4469 4470 4471 4472 4473 4474 4475 4476 4477 4478 4479 4480 4481 4482 4483 4484 4485 4486 4487 4488 4489 4490 4491 4492 4493 4494 4495 4496 4497 4498 4499 4500 4501 4502 4503 4504 4505 4506 4507 4508 4509 4510 4511 4512 4513 4514 4515 4516 4517 4518 4519 4520 4521 4522 4523 4524 4525 4526 4527 4528 4529 4530 4531 4532 4533 4534 4535 4536 4537 4538 4539 4540 4541 4542 4543 4544 4545 4546 4547 4548 4549 4550 4551 4552 4553 4554 4555 4556 4557 4558 4559 4560 4561 4562 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB /* * Copyright (c) 2004-2007 Intel Corporation. All rights reserved. * Copyright (c) 2004 Topspin Corporation. All rights reserved. * Copyright (c) 2004, 2005 Voltaire Corporation. All rights reserved. * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. * Copyright (c) 2019, Mellanox Technologies inc. All rights reserved. */ #include <linux/completion.h> #include <linux/dma-mapping.h> #include <linux/device.h> #include <linux/module.h> #include <linux/err.h> #include <linux/idr.h> #include <linux/interrupt.h> #include <linux/random.h> #include <linux/rbtree.h> #include <linux/spinlock.h> #include <linux/slab.h> #include <linux/sysfs.h> #include <linux/workqueue.h> #include <linux/kdev_t.h> #include <linux/etherdevice.h> #include <rdma/ib_cache.h> #include <rdma/ib_cm.h> #include <rdma/ib_sysfs.h> #include "cm_msgs.h" #include "core_priv.h" #include "cm_trace.h" MODULE_AUTHOR("Sean Hefty"); MODULE_DESCRIPTION("InfiniBand CM"); MODULE_LICENSE("Dual BSD/GPL"); #define CM_DESTROY_ID_WAIT_TIMEOUT 10000 /* msecs */ #define CM_DIRECT_RETRY_CTX ((void *) 1UL) static const char * const ibcm_rej_reason_strs[] = { [IB_CM_REJ_NO_QP] = "no QP", [IB_CM_REJ_NO_EEC] = "no EEC", [IB_CM_REJ_NO_RESOURCES] = "no resources", [IB_CM_REJ_TIMEOUT] = "timeout", [IB_CM_REJ_UNSUPPORTED] = "unsupported", [IB_CM_REJ_INVALID_COMM_ID] = "invalid comm ID", [IB_CM_REJ_INVALID_COMM_INSTANCE] = "invalid comm instance", [IB_CM_REJ_INVALID_SERVICE_ID] = "invalid service ID", [IB_CM_REJ_INVALID_TRANSPORT_TYPE] = "invalid transport type", [IB_CM_REJ_STALE_CONN] = "stale conn", [IB_CM_REJ_RDC_NOT_EXIST] = "RDC not exist", [IB_CM_REJ_INVALID_GID] = "invalid GID", [IB_CM_REJ_INVALID_LID] = "invalid LID", [IB_CM_REJ_INVALID_SL] = "invalid SL", [IB_CM_REJ_INVALID_TRAFFIC_CLASS] = "invalid traffic class", [IB_CM_REJ_INVALID_HOP_LIMIT] = "invalid hop limit", [IB_CM_REJ_INVALID_PACKET_RATE] = "invalid packet rate", [IB_CM_REJ_INVALID_ALT_GID] = "invalid alt GID", [IB_CM_REJ_INVALID_ALT_LID] = "invalid alt LID", [IB_CM_REJ_INVALID_ALT_SL] = "invalid alt SL", [IB_CM_REJ_INVALID_ALT_TRAFFIC_CLASS] = "invalid alt traffic class", [IB_CM_REJ_INVALID_ALT_HOP_LIMIT] = "invalid alt hop limit", [IB_CM_REJ_INVALID_ALT_PACKET_RATE] = "invalid alt packet rate", [IB_CM_REJ_PORT_CM_REDIRECT] = "port CM redirect", [IB_CM_REJ_PORT_REDIRECT] = "port redirect", [IB_CM_REJ_INVALID_MTU] = "invalid MTU", [IB_CM_REJ_INSUFFICIENT_RESP_RESOURCES] = "insufficient resp resources", [IB_CM_REJ_CONSUMER_DEFINED] = "consumer defined", [IB_CM_REJ_INVALID_RNR_RETRY] = "invalid RNR retry", [IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID] = "duplicate local comm ID", [IB_CM_REJ_INVALID_CLASS_VERSION] = "invalid class version", [IB_CM_REJ_INVALID_FLOW_LABEL] = "invalid flow label", [IB_CM_REJ_INVALID_ALT_FLOW_LABEL] = "invalid alt flow label", [IB_CM_REJ_VENDOR_OPTION_NOT_SUPPORTED] = "vendor option is not supported", }; const char *__attribute_const__ ibcm_reject_msg(int reason) { size_t index = reason; if (index < ARRAY_SIZE(ibcm_rej_reason_strs) && ibcm_rej_reason_strs[index]) return ibcm_rej_reason_strs[index]; else return "unrecognized reason"; } EXPORT_SYMBOL(ibcm_reject_msg); struct cm_id_private; struct cm_work; static int cm_add_one(struct ib_device *device); static void cm_remove_one(struct ib_device *device, void *client_data); static void cm_process_work(struct cm_id_private *cm_id_priv, struct cm_work *work); static int cm_send_sidr_rep_locked(struct cm_id_private *cm_id_priv, struct ib_cm_sidr_rep_param *param); static void cm_issue_dreq(struct cm_id_private *cm_id_priv); static int cm_send_drep_locked(struct cm_id_private *cm_id_priv, void *private_data, u8 private_data_len); static int cm_send_rej_locked(struct cm_id_private *cm_id_priv, enum ib_cm_rej_reason reason, void *ari, u8 ari_length, const void *private_data, u8 private_data_len); static struct ib_client cm_client = { .name = "cm", .add = cm_add_one, .remove = cm_remove_one }; static struct ib_cm { spinlock_t lock; struct list_head device_list; rwlock_t device_lock; struct rb_root listen_service_table; u64 listen_service_id; /* struct rb_root peer_service_table; todo: fix peer to peer */ struct rb_root remote_qp_table; struct rb_root remote_id_table; struct rb_root remote_sidr_table; struct xarray local_id_table; u32 local_id_next; __be32 random_id_operand; struct list_head timewait_list; struct workqueue_struct *wq; } cm; /* Counter indexes ordered by attribute ID */ enum { CM_REQ_COUNTER, CM_MRA_COUNTER, CM_REJ_COUNTER, CM_REP_COUNTER, CM_RTU_COUNTER, CM_DREQ_COUNTER, CM_DREP_COUNTER, CM_SIDR_REQ_COUNTER, CM_SIDR_REP_COUNTER, CM_LAP_COUNTER, CM_APR_COUNTER, CM_ATTR_COUNT, CM_ATTR_ID_OFFSET = 0x0010, }; enum { CM_XMIT, CM_XMIT_RETRIES, CM_RECV, CM_RECV_DUPLICATES, CM_COUNTER_GROUPS }; struct cm_counter_attribute { struct ib_port_attribute attr; unsigned short group; unsigned short index; }; struct cm_port { struct cm_device *cm_dev; struct ib_mad_agent *mad_agent; u32 port_num; atomic_long_t counters[CM_COUNTER_GROUPS][CM_ATTR_COUNT]; }; struct cm_device { struct kref kref; struct list_head list; spinlock_t mad_agent_lock; struct ib_device *ib_device; u8 ack_delay; int going_down; struct cm_port *port[]; }; struct cm_av { struct cm_port *port; struct rdma_ah_attr ah_attr; u16 dlid_datapath; u16 pkey_index; u8 timeout; }; struct cm_work { struct delayed_work work; struct list_head list; struct cm_port *port; struct ib_mad_recv_wc *mad_recv_wc; /* Received MADs */ __be32 local_id; /* Established / timewait */ __be32 remote_id; struct ib_cm_event cm_event; struct sa_path_rec path[]; }; struct cm_timewait_info { struct cm_work work; struct list_head list; struct rb_node remote_qp_node; struct rb_node remote_id_node; __be64 remote_ca_guid; __be32 remote_qpn; u8 inserted_remote_qp; u8 inserted_remote_id; }; struct cm_id_private { struct ib_cm_id id; struct rb_node service_node; struct rb_node sidr_id_node; u32 sidr_slid; spinlock_t lock; /* Do not acquire inside cm.lock */ struct completion comp; refcount_t refcount; /* Number of clients sharing this ib_cm_id. Only valid for listeners. * Protected by the cm.lock spinlock. */ int listen_sharecount; struct rcu_head rcu; struct ib_mad_send_buf *msg; struct cm_timewait_info *timewait_info; /* todo: use alternate port on send failure */ struct cm_av av; struct cm_av alt_av; void *private_data; __be64 tid; __be32 local_qpn; __be32 remote_qpn; enum ib_qp_type qp_type; __be32 sq_psn; __be32 rq_psn; int timeout_ms; enum ib_mtu path_mtu; __be16 pkey; u8 private_data_len; u8 max_cm_retries; u8 responder_resources; u8 initiator_depth; u8 retry_count; u8 rnr_retry_count; u8 service_timeout; u8 target_ack_delay; struct list_head work_list; atomic_t work_count; struct rdma_ucm_ece ece; }; static void cm_dev_release(struct kref *kref) { struct cm_device *cm_dev = container_of(kref, struct cm_device, kref); u32 i; rdma_for_each_port(cm_dev->ib_device, i) kfree(cm_dev->port[i - 1]); kfree(cm_dev); } static void cm_device_put(struct cm_device *cm_dev) { kref_put(&cm_dev->kref, cm_dev_release); } static void cm_work_handler(struct work_struct *work); static inline void cm_deref_id(struct cm_id_private *cm_id_priv) { if (refcount_dec_and_test(&cm_id_priv->refcount)) complete(&cm_id_priv->comp); } static struct ib_mad_send_buf *cm_alloc_msg(struct cm_id_private *cm_id_priv) { struct ib_mad_agent *mad_agent; struct ib_mad_send_buf *m; struct ib_ah *ah; lockdep_assert_held(&cm_id_priv->lock); if (!cm_id_priv->av.port) return ERR_PTR(-EINVAL); spin_lock(&cm_id_priv->av.port->cm_dev->mad_agent_lock); mad_agent = cm_id_priv->av.port->mad_agent; if (!mad_agent) { m = ERR_PTR(-EINVAL); goto out; } ah = rdma_create_ah(mad_agent->qp->pd, &cm_id_priv->av.ah_attr, 0); if (IS_ERR(ah)) { m = ERR_CAST(ah); goto out; } m = ib_create_send_mad(mad_agent, cm_id_priv->id.remote_cm_qpn, cm_id_priv->av.pkey_index, 0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA, GFP_ATOMIC, IB_MGMT_BASE_VERSION); if (IS_ERR(m)) { rdma_destroy_ah(ah, 0); goto out; } m->ah = ah; out: spin_unlock(&cm_id_priv->av.port->cm_dev->mad_agent_lock); return m; } static void cm_free_msg(struct ib_mad_send_buf *msg) { if (msg->ah) rdma_destroy_ah(msg->ah, 0); ib_free_send_mad(msg); } static struct ib_mad_send_buf * cm_alloc_priv_msg(struct cm_id_private *cm_id_priv, enum ib_cm_state state) { struct ib_mad_send_buf *msg; lockdep_assert_held(&cm_id_priv->lock); msg = cm_alloc_msg(cm_id_priv); if (IS_ERR(msg)) return msg; cm_id_priv->msg = msg; refcount_inc(&cm_id_priv->refcount); msg->context[0] = cm_id_priv; msg->context[1] = (void *) (unsigned long) state; msg->retries = cm_id_priv->max_cm_retries; msg->timeout_ms = cm_id_priv->timeout_ms; return msg; } static void cm_free_priv_msg(struct ib_mad_send_buf *msg) { struct cm_id_private *cm_id_priv = msg->context[0]; lockdep_assert_held(&cm_id_priv->lock); if (!WARN_ON(cm_id_priv->msg != msg)) cm_id_priv->msg = NULL; if (msg->ah) rdma_destroy_ah(msg->ah, 0); cm_deref_id(cm_id_priv); ib_free_send_mad(msg); } static struct ib_mad_send_buf * cm_alloc_response_msg_no_ah(struct cm_port *port, struct ib_mad_recv_wc *mad_recv_wc, bool direct_retry) { struct ib_mad_send_buf *m; m = ib_create_send_mad(port->mad_agent, 1, mad_recv_wc->wc->pkey_index, 0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA, GFP_ATOMIC, IB_MGMT_BASE_VERSION); if (!IS_ERR(m)) m->context[0] = direct_retry ? CM_DIRECT_RETRY_CTX : NULL; return m; } static int cm_create_response_msg_ah(struct cm_port *port, struct ib_mad_recv_wc *mad_recv_wc, struct ib_mad_send_buf *msg) { struct ib_ah *ah; ah = ib_create_ah_from_wc(port->mad_agent->qp->pd, mad_recv_wc->wc, mad_recv_wc->recv_buf.grh, port->port_num); if (IS_ERR(ah)) return PTR_ERR(ah); msg->ah = ah; return 0; } static int cm_alloc_response_msg(struct cm_port *port, struct ib_mad_recv_wc *mad_recv_wc, bool direct_retry, struct ib_mad_send_buf **msg) { struct ib_mad_send_buf *m; int ret; m = cm_alloc_response_msg_no_ah(port, mad_recv_wc, direct_retry); if (IS_ERR(m)) return PTR_ERR(m); ret = cm_create_response_msg_ah(port, mad_recv_wc, m); if (ret) { ib_free_send_mad(m); return ret; } *msg = m; return 0; } static void *cm_copy_private_data(const void *private_data, u8 private_data_len) { void *data; if (!private_data || !private_data_len) return NULL; data = kmemdup(private_data, private_data_len, GFP_KERNEL); if (!data) return ERR_PTR(-ENOMEM); return data; } static void cm_set_private_data(struct cm_id_private *cm_id_priv, void *private_data, u8 private_data_len) { if (cm_id_priv->private_data && cm_id_priv->private_data_len) kfree(cm_id_priv->private_data); cm_id_priv->private_data = private_data; cm_id_priv->private_data_len = private_data_len; } static void cm_set_av_port(struct cm_av *av, struct cm_port *port) { struct cm_port *old_port = av->port; if (old_port == port) return; av->port = port; if (old_port) cm_device_put(old_port->cm_dev); if (port) kref_get(&port->cm_dev->kref); } static void cm_init_av_for_lap(struct cm_port *port, struct ib_wc *wc, struct rdma_ah_attr *ah_attr, struct cm_av *av) { cm_set_av_port(av, port); av->pkey_index = wc->pkey_index; rdma_move_ah_attr(&av->ah_attr, ah_attr); } static int cm_init_av_for_response(struct cm_port *port, struct ib_wc *wc, struct ib_grh *grh, struct cm_av *av) { cm_set_av_port(av, port); av->pkey_index = wc->pkey_index; return ib_init_ah_attr_from_wc(port->cm_dev->ib_device, port->port_num, wc, grh, &av->ah_attr); } static struct cm_port * get_cm_port_from_path(struct sa_path_rec *path, const struct ib_gid_attr *attr) { struct cm_device *cm_dev; struct cm_port *port = NULL; unsigned long flags; if (attr) { read_lock_irqsave(&cm.device_lock, flags); list_for_each_entry(cm_dev, &cm.device_list, list) { if (cm_dev->ib_device == attr->device) { port = cm_dev->port[attr->port_num - 1]; break; } } read_unlock_irqrestore(&cm.device_lock, flags); } else { /* SGID attribute can be NULL in following * conditions. * (a) Alternative path * (b) IB link layer without GRH * (c) LAP send messages */ read_lock_irqsave(&cm.device_lock, flags); list_for_each_entry(cm_dev, &cm.device_list, list) { attr = rdma_find_gid(cm_dev->ib_device, &path->sgid, sa_conv_pathrec_to_gid_type(path), NULL); if (!IS_ERR(attr)) { port = cm_dev->port[attr->port_num - 1]; break; } } read_unlock_irqrestore(&cm.device_lock, flags); if (port) rdma_put_gid_attr(attr); } return port; } static int cm_init_av_by_path(struct sa_path_rec *path, const struct ib_gid_attr *sgid_attr, struct cm_av *av) { struct rdma_ah_attr new_ah_attr; struct cm_device *cm_dev; struct cm_port *port; int ret; port = get_cm_port_from_path(path, sgid_attr); if (!port) return -EINVAL; cm_dev = port->cm_dev; ret = ib_find_cached_pkey(cm_dev->ib_device, port->port_num, be16_to_cpu(path->pkey), &av->pkey_index); if (ret) return ret; cm_set_av_port(av, port); /* * av->ah_attr might be initialized based on wc or during * request processing time which might have reference to sgid_attr. * So initialize a new ah_attr on stack. * If initialization fails, old ah_attr is used for sending any * responses. If initialization is successful, than new ah_attr * is used by overwriting the old one. So that right ah_attr * can be used to return an error response. */ ret = ib_init_ah_attr_from_path(cm_dev->ib_device, port->port_num, path, &new_ah_attr, sgid_attr); if (ret) return ret; av->timeout = path->packet_life_time + 1; rdma_move_ah_attr(&av->ah_attr, &new_ah_attr); return 0; } /* Move av created by cm_init_av_by_path(), so av.dgid is not moved */ static void cm_move_av_from_path(struct cm_av *dest, struct cm_av *src) { cm_set_av_port(dest, src->port); cm_set_av_port(src, NULL); dest->pkey_index = src->pkey_index; rdma_move_ah_attr(&dest->ah_attr, &src->ah_attr); dest->timeout = src->timeout; } static void cm_destroy_av(struct cm_av *av) { rdma_destroy_ah_attr(&av->ah_attr); cm_set_av_port(av, NULL); } static u32 cm_local_id(__be32 local_id) { return (__force u32) (local_id ^ cm.random_id_operand); } static struct cm_id_private *cm_acquire_id(__be32 local_id, __be32 remote_id) { struct cm_id_private *cm_id_priv; rcu_read_lock(); cm_id_priv = xa_load(&cm.local_id_table, cm_local_id(local_id)); if (!cm_id_priv || cm_id_priv->id.remote_id != remote_id || !refcount_inc_not_zero(&cm_id_priv->refcount)) cm_id_priv = NULL; rcu_read_unlock(); return cm_id_priv; } /* * Trivial helpers to strip endian annotation and compare; the * endianness doesn't actually matter since we just need a stable * order for the RB tree. */ static int be32_lt(__be32 a, __be32 b) { return (__force u32) a < (__force u32) b; } static int be32_gt(__be32 a, __be32 b) { return (__force u32) a > (__force u32) b; } static int be64_lt(__be64 a, __be64 b) { return (__force u64) a < (__force u64) b; } static int be64_gt(__be64 a, __be64 b) { return (__force u64) a > (__force u64) b; } /* * Inserts a new cm_id_priv into the listen_service_table. Returns cm_id_priv * if the new ID was inserted, NULL if it could not be inserted due to a * collision, or the existing cm_id_priv ready for shared usage. */ static struct cm_id_private *cm_insert_listen(struct cm_id_private *cm_id_priv, ib_cm_handler shared_handler) { struct rb_node **link = &cm.listen_service_table.rb_node; struct rb_node *parent = NULL; struct cm_id_private *cur_cm_id_priv; __be64 service_id = cm_id_priv->id.service_id; unsigned long flags; spin_lock_irqsave(&cm.lock, flags); while (*link) { parent = *link; cur_cm_id_priv = rb_entry(parent, struct cm_id_private, service_node); if (cm_id_priv->id.device < cur_cm_id_priv->id.device) link = &(*link)->rb_left; else if (cm_id_priv->id.device > cur_cm_id_priv->id.device) link = &(*link)->rb_right; else if (be64_lt(service_id, cur_cm_id_priv->id.service_id)) link = &(*link)->rb_left; else if (be64_gt(service_id, cur_cm_id_priv->id.service_id)) link = &(*link)->rb_right; else { /* * Sharing an ib_cm_id with different handlers is not * supported */ if (cur_cm_id_priv->id.cm_handler != shared_handler || cur_cm_id_priv->id.context || WARN_ON(!cur_cm_id_priv->id.cm_handler)) { spin_unlock_irqrestore(&cm.lock, flags); return NULL; } refcount_inc(&cur_cm_id_priv->refcount); cur_cm_id_priv->listen_sharecount++; spin_unlock_irqrestore(&cm.lock, flags); return cur_cm_id_priv; } } cm_id_priv->listen_sharecount++; rb_link_node(&cm_id_priv->service_node, parent, link); rb_insert_color(&cm_id_priv->service_node, &cm.listen_service_table); spin_unlock_irqrestore(&cm.lock, flags); return cm_id_priv; } static struct cm_id_private *cm_find_listen(struct ib_device *device, __be64 service_id) { struct rb_node *node = cm.listen_service_table.rb_node; struct cm_id_private *cm_id_priv; while (node) { cm_id_priv = rb_entry(node, struct cm_id_private, service_node); if (device < cm_id_priv->id.device) node = node->rb_left; else if (device > cm_id_priv->id.device) node = node->rb_right; else if (be64_lt(service_id, cm_id_priv->id.service_id)) node = node->rb_left; else if (be64_gt(service_id, cm_id_priv->id.service_id)) node = node->rb_right; else { refcount_inc(&cm_id_priv->refcount); return cm_id_priv; } } return NULL; } static struct cm_timewait_info * cm_insert_remote_id(struct cm_timewait_info *timewait_info) { struct rb_node **link = &cm.remote_id_table.rb_node; struct rb_node *parent = NULL; struct cm_timewait_info *cur_timewait_info; __be64 remote_ca_guid = timewait_info->remote_ca_guid; __be32 remote_id = timewait_info->work.remote_id; while (*link) { parent = *link; cur_timewait_info = rb_entry(parent, struct cm_timewait_info, remote_id_node); if (be32_lt(remote_id, cur_timewait_info->work.remote_id)) link = &(*link)->rb_left; else if (be32_gt(remote_id, cur_timewait_info->work.remote_id)) link = &(*link)->rb_right; else if (be64_lt(remote_ca_guid, cur_timewait_info->remote_ca_guid)) link = &(*link)->rb_left; else if (be64_gt(remote_ca_guid, cur_timewait_info->remote_ca_guid)) link = &(*link)->rb_right; else return cur_timewait_info; } timewait_info->inserted_remote_id = 1; rb_link_node(&timewait_info->remote_id_node, parent, link); rb_insert_color(&timewait_info->remote_id_node, &cm.remote_id_table); return NULL; } static struct cm_id_private *cm_find_remote_id(__be64 remote_ca_guid, __be32 remote_id) { struct rb_node *node = cm.remote_id_table.rb_node; struct cm_timewait_info *timewait_info; struct cm_id_private *res = NULL; spin_lock_irq(&cm.lock); while (node) { timewait_info = rb_entry(node, struct cm_timewait_info, remote_id_node); if (be32_lt(remote_id, timewait_info->work.remote_id)) node = node->rb_left; else if (be32_gt(remote_id, timewait_info->work.remote_id)) node = node->rb_right; else if (be64_lt(remote_ca_guid, timewait_info->remote_ca_guid)) node = node->rb_left; else if (be64_gt(remote_ca_guid, timewait_info->remote_ca_guid)) node = node->rb_right; else { res = cm_acquire_id(timewait_info->work.local_id, timewait_info->work.remote_id); break; } } spin_unlock_irq(&cm.lock); return res; } static struct cm_timewait_info * cm_insert_remote_qpn(struct cm_timewait_info *timewait_info) { struct rb_node **link = &cm.remote_qp_table.rb_node; struct rb_node *parent = NULL; struct cm_timewait_info *cur_timewait_info; __be64 remote_ca_guid = timewait_info->remote_ca_guid; __be32 remote_qpn = timewait_info->remote_qpn; while (*link) { parent = *link; cur_timewait_info = rb_entry(parent, struct cm_timewait_info, remote_qp_node); if (be32_lt(remote_qpn, cur_timewait_info->remote_qpn)) link = &(*link)->rb_left; else if (be32_gt(remote_qpn, cur_timewait_info->remote_qpn)) link = &(*link)->rb_right; else if (be64_lt(remote_ca_guid, cur_timewait_info->remote_ca_guid)) link = &(*link)->rb_left; else if (be64_gt(remote_ca_guid, cur_timewait_info->remote_ca_guid)) link = &(*link)->rb_right; else return cur_timewait_info; } timewait_info->inserted_remote_qp = 1; rb_link_node(&timewait_info->remote_qp_node, parent, link); rb_insert_color(&timewait_info->remote_qp_node, &cm.remote_qp_table); return NULL; } static struct cm_id_private * cm_insert_remote_sidr(struct cm_id_private *cm_id_priv) { struct rb_node **link = &cm.remote_sidr_table.rb_node; struct rb_node *parent = NULL; struct cm_id_private *cur_cm_id_priv; __be32 remote_id = cm_id_priv->id.remote_id; while (*link) { parent = *link; cur_cm_id_priv = rb_entry(parent, struct cm_id_private, sidr_id_node); if (be32_lt(remote_id, cur_cm_id_priv->id.remote_id)) link = &(*link)->rb_left; else if (be32_gt(remote_id, cur_cm_id_priv->id.remote_id)) link = &(*link)->rb_right; else { if (cur_cm_id_priv->sidr_slid < cm_id_priv->sidr_slid) link = &(*link)->rb_left; else if (cur_cm_id_priv->sidr_slid > cm_id_priv->sidr_slid) link = &(*link)->rb_right; else return cur_cm_id_priv; } } rb_link_node(&cm_id_priv->sidr_id_node, parent, link); rb_insert_color(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table); return NULL; } static struct cm_id_private *cm_alloc_id_priv(struct ib_device *device, ib_cm_handler cm_handler, void *context) { struct cm_id_private *cm_id_priv; u32 id; int ret; cm_id_priv = kzalloc(sizeof *cm_id_priv, GFP_KERNEL); if (!cm_id_priv) return ERR_PTR(-ENOMEM); cm_id_priv->id.state = IB_CM_IDLE; cm_id_priv->id.device = device; cm_id_priv->id.cm_handler = cm_handler; cm_id_priv->id.context = context; cm_id_priv->id.remote_cm_qpn = 1; RB_CLEAR_NODE(&cm_id_priv->service_node); RB_CLEAR_NODE(&cm_id_priv->sidr_id_node); spin_lock_init(&cm_id_priv->lock); init_completion(&cm_id_priv->comp); INIT_LIST_HEAD(&cm_id_priv->work_list); atomic_set(&cm_id_priv->work_count, -1); refcount_set(&cm_id_priv->refcount, 1); ret = xa_alloc_cyclic(&cm.local_id_table, &id, NULL, xa_limit_32b, &cm.local_id_next, GFP_KERNEL); if (ret < 0) goto error; cm_id_priv->id.local_id = (__force __be32)id ^ cm.random_id_operand; return cm_id_priv; error: kfree(cm_id_priv); return ERR_PTR(ret); } /* * Make the ID visible to the MAD handlers and other threads that use the * xarray. */ static void cm_finalize_id(struct cm_id_private *cm_id_priv) { xa_store(&cm.local_id_table, cm_local_id(cm_id_priv->id.local_id), cm_id_priv, GFP_ATOMIC); } struct ib_cm_id *ib_create_cm_id(struct ib_device *device, ib_cm_handler cm_handler, void *context) { struct cm_id_private *cm_id_priv; cm_id_priv = cm_alloc_id_priv(device, cm_handler, context); if (IS_ERR(cm_id_priv)) return ERR_CAST(cm_id_priv); cm_finalize_id(cm_id_priv); return &cm_id_priv->id; } EXPORT_SYMBOL(ib_create_cm_id); static struct cm_work *cm_dequeue_work(struct cm_id_private *cm_id_priv) { struct cm_work *work; if (list_empty(&cm_id_priv->work_list)) return NULL; work = list_entry(cm_id_priv->work_list.next, struct cm_work, list); list_del(&work->list); return work; } static void cm_free_work(struct cm_work *work) { if (work->mad_recv_wc) ib_free_recv_mad(work->mad_recv_wc); kfree(work); } static void cm_queue_work_unlock(struct cm_id_private *cm_id_priv, struct cm_work *work) __releases(&cm_id_priv->lock) { bool immediate; /* * To deliver the event to the user callback we have the drop the * spinlock, however, we need to ensure that the user callback is single * threaded and receives events in the temporal order. If there are * already events being processed then thread new events onto a list, * the thread currently processing will pick them up. */ immediate = atomic_inc_and_test(&cm_id_priv->work_count); if (!immediate) { list_add_tail(&work->list, &cm_id_priv->work_list); /* * This routine always consumes incoming reference. Once queued * to the work_list then a reference is held by the thread * currently running cm_process_work() and this reference is not * needed. */ cm_deref_id(cm_id_priv); } spin_unlock_irq(&cm_id_priv->lock); if (immediate) cm_process_work(cm_id_priv, work); } static inline int cm_convert_to_ms(int iba_time) { /* approximate conversion to ms from 4.096us x 2^iba_time */ return 1 << max(iba_time - 8, 0); } /* * calculate: 4.096x2^ack_timeout = 4.096x2^ack_delay + 2x4.096x2^life_time * Because of how ack_timeout is stored, adding one doubles the timeout. * To avoid large timeouts, select the max(ack_delay, life_time + 1), and * increment it (round up) only if the other is within 50%. */ static u8 cm_ack_timeout(u8 ca_ack_delay, u8 packet_life_time) { int ack_timeout = packet_life_time + 1; if (ack_timeout >= ca_ack_delay) ack_timeout += (ca_ack_delay >= (ack_timeout - 1)); else ack_timeout = ca_ack_delay + (ack_timeout >= (ca_ack_delay - 1)); return min(31, ack_timeout); } static void cm_remove_remote(struct cm_id_private *cm_id_priv) { struct cm_timewait_info *timewait_info = cm_id_priv->timewait_info; if (timewait_info->inserted_remote_id) { rb_erase(&timewait_info->remote_id_node, &cm.remote_id_table); timewait_info->inserted_remote_id = 0; } if (timewait_info->inserted_remote_qp) { rb_erase(&timewait_info->remote_qp_node, &cm.remote_qp_table); timewait_info->inserted_remote_qp = 0; } } static struct cm_timewait_info *cm_create_timewait_info(__be32 local_id) { struct cm_timewait_info *timewait_info; timewait_info = kzalloc(sizeof *timewait_info, GFP_KERNEL); if (!timewait_info) return ERR_PTR(-ENOMEM); timewait_info->work.local_id = local_id; INIT_DELAYED_WORK(&timewait_info->work.work, cm_work_handler); timewait_info->work.cm_event.event = IB_CM_TIMEWAIT_EXIT; return timewait_info; } static void cm_enter_timewait(struct cm_id_private *cm_id_priv) { int wait_time; unsigned long flags; struct cm_device *cm_dev; lockdep_assert_held(&cm_id_priv->lock); cm_dev = ib_get_client_data(cm_id_priv->id.device, &cm_client); if (!cm_dev) return; spin_lock_irqsave(&cm.lock, flags); cm_remove_remote(cm_id_priv); list_add_tail(&cm_id_priv->timewait_info->list, &cm.timewait_list); spin_unlock_irqrestore(&cm.lock, flags); /* * The cm_id could be destroyed by the user before we exit timewait. * To protect against this, we search for the cm_id after exiting * timewait before notifying the user that we've exited timewait. */ cm_id_priv->id.state = IB_CM_TIMEWAIT; wait_time = cm_convert_to_ms(cm_id_priv->av.timeout); /* Check if the device started its remove_one */ spin_lock_irqsave(&cm.lock, flags); if (!cm_dev->going_down) queue_delayed_work(cm.wq, &cm_id_priv->timewait_info->work.work, msecs_to_jiffies(wait_time)); spin_unlock_irqrestore(&cm.lock, flags); /* * The timewait_info is converted into a work and gets freed during * cm_free_work() in cm_timewait_handler(). */ BUILD_BUG_ON(offsetof(struct cm_timewait_info, work) != 0); cm_id_priv->timewait_info = NULL; } static void cm_reset_to_idle(struct cm_id_private *cm_id_priv) { unsigned long flags; lockdep_assert_held(&cm_id_priv->lock); cm_id_priv->id.state = IB_CM_IDLE; if (cm_id_priv->timewait_info) { spin_lock_irqsave(&cm.lock, flags); cm_remove_remote(cm_id_priv); spin_unlock_irqrestore(&cm.lock, flags); kfree(cm_id_priv->timewait_info); cm_id_priv->timewait_info = NULL; } } static noinline void cm_destroy_id_wait_timeout(struct ib_cm_id *cm_id, enum ib_cm_state old_state) { struct cm_id_private *cm_id_priv; cm_id_priv = container_of(cm_id, struct cm_id_private, id); pr_err("%s: cm_id=%p timed out. state %d -> %d, refcnt=%d\n", __func__, cm_id, old_state, cm_id->state, refcount_read(&cm_id_priv->refcount)); } static void cm_destroy_id(struct ib_cm_id *cm_id, int err) { struct cm_id_private *cm_id_priv; enum ib_cm_state old_state; struct cm_work *work; int ret; cm_id_priv = container_of(cm_id, struct cm_id_private, id); spin_lock_irq(&cm_id_priv->lock); old_state = cm_id->state; retest: switch (cm_id->state) { case IB_CM_LISTEN: spin_lock(&cm.lock); if (--cm_id_priv->listen_sharecount > 0) { /* The id is still shared. */ WARN_ON(refcount_read(&cm_id_priv->refcount) == 1); spin_unlock(&cm.lock); spin_unlock_irq(&cm_id_priv->lock); cm_deref_id(cm_id_priv); return; } cm_id->state = IB_CM_IDLE; rb_erase(&cm_id_priv->service_node, &cm.listen_service_table); RB_CLEAR_NODE(&cm_id_priv->service_node); spin_unlock(&cm.lock); break; case IB_CM_SIDR_REQ_SENT: cm_id->state = IB_CM_IDLE; ib_cancel_mad(cm_id_priv->msg); break; case IB_CM_SIDR_REQ_RCVD: cm_send_sidr_rep_locked(cm_id_priv, &(struct ib_cm_sidr_rep_param){ .status = IB_SIDR_REJECT }); /* cm_send_sidr_rep_locked will not move to IDLE if it fails */ cm_id->state = IB_CM_IDLE; break; case IB_CM_REQ_SENT: case IB_CM_MRA_REQ_RCVD: ib_cancel_mad(cm_id_priv->msg); cm_send_rej_locked(cm_id_priv, IB_CM_REJ_TIMEOUT, &cm_id_priv->id.device->node_guid, sizeof(cm_id_priv->id.device->node_guid), NULL, 0); break; case IB_CM_REQ_RCVD: if (err == -ENOMEM) { /* Do not reject to allow future retries. */ cm_reset_to_idle(cm_id_priv); } else { cm_send_rej_locked(cm_id_priv, IB_CM_REJ_CONSUMER_DEFINED, NULL, 0, NULL, 0); } break; case IB_CM_REP_SENT: case IB_CM_MRA_REP_RCVD: ib_cancel_mad(cm_id_priv->msg); cm_send_rej_locked(cm_id_priv, IB_CM_REJ_CONSUMER_DEFINED, NULL, 0, NULL, 0); goto retest; case IB_CM_MRA_REQ_SENT: case IB_CM_REP_RCVD: case IB_CM_MRA_REP_SENT: cm_send_rej_locked(cm_id_priv, IB_CM_REJ_CONSUMER_DEFINED, NULL, 0, NULL, 0); break; case IB_CM_ESTABLISHED: if (cm_id_priv->qp_type == IB_QPT_XRC_TGT) { cm_id->state = IB_CM_IDLE; break; } cm_issue_dreq(cm_id_priv); cm_enter_timewait(cm_id_priv); goto retest; case IB_CM_DREQ_SENT: ib_cancel_mad(cm_id_priv->msg); cm_enter_timewait(cm_id_priv); goto retest; case IB_CM_DREQ_RCVD: cm_send_drep_locked(cm_id_priv, NULL, 0); WARN_ON(cm_id->state != IB_CM_TIMEWAIT); goto retest; case IB_CM_TIMEWAIT: /* * The cm_acquire_id in cm_timewait_handler will stop working * once we do xa_erase below, so just move to idle here for * consistency. */ cm_id->state = IB_CM_IDLE; break; case IB_CM_IDLE: break; } WARN_ON(cm_id->state != IB_CM_IDLE); spin_lock(&cm.lock); /* Required for cleanup paths related cm_req_handler() */ if (cm_id_priv->timewait_info) { cm_remove_remote(cm_id_priv); kfree(cm_id_priv->timewait_info); cm_id_priv->timewait_info = NULL; } WARN_ON(cm_id_priv->listen_sharecount); WARN_ON(!RB_EMPTY_NODE(&cm_id_priv->service_node)); if (!RB_EMPTY_NODE(&cm_id_priv->sidr_id_node)) rb_erase(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table); spin_unlock(&cm.lock); spin_unlock_irq(&cm_id_priv->lock); xa_erase(&cm.local_id_table, cm_local_id(cm_id->local_id)); cm_deref_id(cm_id_priv); do { ret = wait_for_completion_timeout(&cm_id_priv->comp, msecs_to_jiffies( CM_DESTROY_ID_WAIT_TIMEOUT)); if (!ret) /* timeout happened */ cm_destroy_id_wait_timeout(cm_id, old_state); } while (!ret); while ((work = cm_dequeue_work(cm_id_priv)) != NULL) cm_free_work(work); cm_destroy_av(&cm_id_priv->av); cm_destroy_av(&cm_id_priv->alt_av); kfree(cm_id_priv->private_data); kfree_rcu(cm_id_priv, rcu); } void ib_destroy_cm_id(struct ib_cm_id *cm_id) { cm_destroy_id(cm_id, 0); } EXPORT_SYMBOL(ib_destroy_cm_id); static int cm_init_listen(struct cm_id_private *cm_id_priv, __be64 service_id) { if ((service_id & IB_SERVICE_ID_AGN_MASK) == IB_CM_ASSIGN_SERVICE_ID && (service_id != IB_CM_ASSIGN_SERVICE_ID)) return -EINVAL; if (service_id == IB_CM_ASSIGN_SERVICE_ID) cm_id_priv->id.service_id = cpu_to_be64(cm.listen_service_id++); else cm_id_priv->id.service_id = service_id; return 0; } /** * ib_cm_listen - Initiates listening on the specified service ID for * connection and service ID resolution requests. * @cm_id: Connection identifier associated with the listen request. * @service_id: Service identifier matched against incoming connection * and service ID resolution requests. The service ID should be specified * network-byte order. If set to IB_CM_ASSIGN_SERVICE_ID, the CM will * assign a service ID to the caller. */ int ib_cm_listen(struct ib_cm_id *cm_id, __be64 service_id) { struct cm_id_private *cm_id_priv = container_of(cm_id, struct cm_id_private, id); unsigned long flags; int ret; spin_lock_irqsave(&cm_id_priv->lock, flags); if (cm_id_priv->id.state != IB_CM_IDLE) { ret = -EINVAL; goto out; } ret = cm_init_listen(cm_id_priv, service_id); if (ret) goto out; if (!cm_insert_listen(cm_id_priv, NULL)) { ret = -EBUSY; goto out; } cm_id_priv->id.state = IB_CM_LISTEN; ret = 0; out: spin_unlock_irqrestore(&cm_id_priv->lock, flags); return ret; } EXPORT_SYMBOL(ib_cm_listen); /** * ib_cm_insert_listen - Create a new listening ib_cm_id and listen on * the given service ID. * * If there's an existing ID listening on that same device and service ID, * return it. * * @device: Device associated with the cm_id. All related communication will * be associated with the specified device. * @cm_handler: Callback invoked to notify the user of CM events. * @service_id: Service identifier matched against incoming connection * and service ID resolution requests. The service ID should be specified * network-byte order. If set to IB_CM_ASSIGN_SERVICE_ID, the CM will * assign a service ID to the caller. * * Callers should call ib_destroy_cm_id when done with the listener ID. */ struct ib_cm_id *ib_cm_insert_listen(struct ib_device *device, ib_cm_handler cm_handler, __be64 service_id) { struct cm_id_private *listen_id_priv; struct cm_id_private *cm_id_priv; int err = 0; /* Create an ID in advance, since the creation may sleep */ cm_id_priv = cm_alloc_id_priv(device, cm_handler, NULL); if (IS_ERR(cm_id_priv)) return ERR_CAST(cm_id_priv); err = cm_init_listen(cm_id_priv, service_id); if (err) { ib_destroy_cm_id(&cm_id_priv->id); return ERR_PTR(err); } spin_lock_irq(&cm_id_priv->lock); listen_id_priv = cm_insert_listen(cm_id_priv, cm_handler); if (listen_id_priv != cm_id_priv) { spin_unlock_irq(&cm_id_priv->lock); ib_destroy_cm_id(&cm_id_priv->id); if (!listen_id_priv) return ERR_PTR(-EINVAL); return &listen_id_priv->id; } cm_id_priv->id.state = IB_CM_LISTEN; spin_unlock_irq(&cm_id_priv->lock); /* * A listen ID does not need to be in the xarray since it does not * receive mads, is not placed in the remote_id or remote_qpn rbtree, * and does not enter timewait. */ return &cm_id_priv->id; } EXPORT_SYMBOL(ib_cm_insert_listen); static __be64 cm_form_tid(struct cm_id_private *cm_id_priv) { u64 hi_tid = 0, low_tid; lockdep_assert_held(&cm_id_priv->lock); low_tid = (u64)cm_id_priv->id.local_id; if (!cm_id_priv->av.port) return cpu_to_be64(low_tid); spin_lock(&cm_id_priv->av.port->cm_dev->mad_agent_lock); if (cm_id_priv->av.port->mad_agent) hi_tid = ((u64)cm_id_priv->av.port->mad_agent->hi_tid) << 32; spin_unlock(&cm_id_priv->av.port->cm_dev->mad_agent_lock); return cpu_to_be64(hi_tid | low_tid); } static void cm_format_mad_hdr(struct ib_mad_hdr *hdr, __be16 attr_id, __be64 tid) { hdr->base_version = IB_MGMT_BASE_VERSION; hdr->mgmt_class = IB_MGMT_CLASS_CM; hdr->class_version = IB_CM_CLASS_VERSION; hdr->method = IB_MGMT_METHOD_SEND; hdr->attr_id = attr_id; hdr->tid = tid; } static void cm_format_mad_ece_hdr(struct ib_mad_hdr *hdr, __be16 attr_id, __be64 tid, u32 attr_mod) { cm_format_mad_hdr(hdr, attr_id, tid); hdr->attr_mod = cpu_to_be32(attr_mod); } static void cm_format_req(struct cm_req_msg *req_msg, struct cm_id_private *cm_id_priv, struct ib_cm_req_param *param) { struct sa_path_rec *pri_path = param->primary_path; struct sa_path_rec *alt_path = param->alternate_path; bool pri_ext = false; __be16 lid; if (pri_path->rec_type == SA_PATH_REC_TYPE_OPA) pri_ext = opa_is_extended_lid(pri_path->opa.dlid, pri_path->opa.slid); cm_format_mad_ece_hdr(&req_msg->hdr, CM_REQ_ATTR_ID, cm_form_tid(cm_id_priv), param->ece.attr_mod); IBA_SET(CM_REQ_LOCAL_COMM_ID, req_msg, be32_to_cpu(cm_id_priv->id.local_id)); IBA_SET(CM_REQ_SERVICE_ID, req_msg, be64_to_cpu(param->service_id)); IBA_SET(CM_REQ_LOCAL_CA_GUID, req_msg, be64_to_cpu(cm_id_priv->id.device->node_guid)); IBA_SET(CM_REQ_LOCAL_QPN, req_msg, param->qp_num); IBA_SET(CM_REQ_INITIATOR_DEPTH, req_msg, param->initiator_depth); IBA_SET(CM_REQ_REMOTE_CM_RESPONSE_TIMEOUT, req_msg, param->remote_cm_response_timeout); cm_req_set_qp_type(req_msg, param->qp_type); IBA_SET(CM_REQ_END_TO_END_FLOW_CONTROL, req_msg, param->flow_control); IBA_SET(CM_REQ_STARTING_PSN, req_msg, param->starting_psn); IBA_SET(CM_REQ_LOCAL_CM_RESPONSE_TIMEOUT, req_msg, param->local_cm_response_timeout); IBA_SET(CM_REQ_PARTITION_KEY, req_msg, be16_to_cpu(param->primary_path->pkey)); IBA_SET(CM_REQ_PATH_PACKET_PAYLOAD_MTU, req_msg, param->primary_path->mtu); IBA_SET(CM_REQ_MAX_CM_RETRIES, req_msg, param->max_cm_retries); if (param->qp_type != IB_QPT_XRC_INI) { IBA_SET(CM_REQ_RESPONDER_RESOURCES, req_msg, param->responder_resources); IBA_SET(CM_REQ_RETRY_COUNT, req_msg, param->retry_count); IBA_SET(CM_REQ_RNR_RETRY_COUNT, req_msg, param->rnr_retry_count); IBA_SET(CM_REQ_SRQ, req_msg, param->srq); } *IBA_GET_MEM_PTR(CM_REQ_PRIMARY_LOCAL_PORT_GID, req_msg) = pri_path->sgid; *IBA_GET_MEM_PTR(CM_REQ_PRIMARY_REMOTE_PORT_GID, req_msg) = pri_path->dgid; if (pri_ext) { IBA_GET_MEM_PTR(CM_REQ_PRIMARY_LOCAL_PORT_GID, req_msg) ->global.interface_id = OPA_MAKE_ID(be32_to_cpu(pri_path->opa.slid)); IBA_GET_MEM_PTR(CM_REQ_PRIMARY_REMOTE_PORT_GID, req_msg) ->global.interface_id = OPA_MAKE_ID(be32_to_cpu(pri_path->opa.dlid)); } if (pri_path->hop_limit <= 1) { IBA_SET(CM_REQ_PRIMARY_LOCAL_PORT_LID, req_msg, be16_to_cpu(pri_ext ? 0 : htons(ntohl(sa_path_get_slid( pri_path))))); IBA_SET(CM_REQ_PRIMARY_REMOTE_PORT_LID, req_msg, be16_to_cpu(pri_ext ? 0 : htons(ntohl(sa_path_get_dlid( pri_path))))); } else { if (param->primary_path_inbound) { lid = param->primary_path_inbound->ib.dlid; IBA_SET(CM_REQ_PRIMARY_LOCAL_PORT_LID, req_msg, be16_to_cpu(lid)); } else IBA_SET(CM_REQ_PRIMARY_LOCAL_PORT_LID, req_msg, be16_to_cpu(IB_LID_PERMISSIVE)); /* Work-around until there's a way to obtain remote LID info */ IBA_SET(CM_REQ_PRIMARY_REMOTE_PORT_LID, req_msg, be16_to_cpu(IB_LID_PERMISSIVE)); } IBA_SET(CM_REQ_PRIMARY_FLOW_LABEL, req_msg, be32_to_cpu(pri_path->flow_label)); IBA_SET(CM_REQ_PRIMARY_PACKET_RATE, req_msg, pri_path->rate); IBA_SET(CM_REQ_PRIMARY_TRAFFIC_CLASS, req_msg, pri_path->traffic_class); IBA_SET(CM_REQ_PRIMARY_HOP_LIMIT, req_msg, pri_path->hop_limit); IBA_SET(CM_REQ_PRIMARY_SL, req_msg, pri_path->sl); IBA_SET(CM_REQ_PRIMARY_SUBNET_LOCAL, req_msg, (pri_path->hop_limit <= 1)); IBA_SET(CM_REQ_PRIMARY_LOCAL_ACK_TIMEOUT, req_msg, cm_ack_timeout(cm_id_priv->av.port->cm_dev->ack_delay, pri_path->packet_life_time)); if (alt_path) { bool alt_ext = false; if (alt_path->rec_type == SA_PATH_REC_TYPE_OPA) alt_ext = opa_is_extended_lid(alt_path->opa.dlid, alt_path->opa.slid); *IBA_GET_MEM_PTR(CM_REQ_ALTERNATE_LOCAL_PORT_GID, req_msg) = alt_path->sgid; *IBA_GET_MEM_PTR(CM_REQ_ALTERNATE_REMOTE_PORT_GID, req_msg) = alt_path->dgid; if (alt_ext) { IBA_GET_MEM_PTR(CM_REQ_ALTERNATE_LOCAL_PORT_GID, req_msg) ->global.interface_id = OPA_MAKE_ID(be32_to_cpu(alt_path->opa.slid)); IBA_GET_MEM_PTR(CM_REQ_ALTERNATE_REMOTE_PORT_GID, req_msg) ->global.interface_id = OPA_MAKE_ID(be32_to_cpu(alt_path->opa.dlid)); } if (alt_path->hop_limit <= 1) { IBA_SET(CM_REQ_ALTERNATE_LOCAL_PORT_LID, req_msg, be16_to_cpu( alt_ext ? 0 : htons(ntohl(sa_path_get_slid( alt_path))))); IBA_SET(CM_REQ_ALTERNATE_REMOTE_PORT_LID, req_msg, be16_to_cpu( alt_ext ? 0 : htons(ntohl(sa_path_get_dlid( alt_path))))); } else { IBA_SET(CM_REQ_ALTERNATE_LOCAL_PORT_LID, req_msg, be16_to_cpu(IB_LID_PERMISSIVE)); IBA_SET(CM_REQ_ALTERNATE_REMOTE_PORT_LID, req_msg, be16_to_cpu(IB_LID_PERMISSIVE)); } IBA_SET(CM_REQ_ALTERNATE_FLOW_LABEL, req_msg, be32_to_cpu(alt_path->flow_label)); IBA_SET(CM_REQ_ALTERNATE_PACKET_RATE, req_msg, alt_path->rate); IBA_SET(CM_REQ_ALTERNATE_TRAFFIC_CLASS, req_msg, alt_path->traffic_class); IBA_SET(CM_REQ_ALTERNATE_HOP_LIMIT, req_msg, alt_path->hop_limit); IBA_SET(CM_REQ_ALTERNATE_SL, req_msg, alt_path->sl); IBA_SET(CM_REQ_ALTERNATE_SUBNET_LOCAL, req_msg, (alt_path->hop_limit <= 1)); IBA_SET(CM_REQ_ALTERNATE_LOCAL_ACK_TIMEOUT, req_msg, cm_ack_timeout(cm_id_priv->av.port->cm_dev->ack_delay, alt_path->packet_life_time)); } IBA_SET(CM_REQ_VENDOR_ID, req_msg, param->ece.vendor_id); if (param->private_data && param->private_data_len) IBA_SET_MEM(CM_REQ_PRIVATE_DATA, req_msg, param->private_data, param->private_data_len); } static int cm_validate_req_param(struct ib_cm_req_param *param) { if (!param->primary_path) return -EINVAL; if (param->qp_type != IB_QPT_RC && param->qp_type != IB_QPT_UC && param->qp_type != IB_QPT_XRC_INI) return -EINVAL; if (param->private_data && param->private_data_len > IB_CM_REQ_PRIVATE_DATA_SIZE) return -EINVAL; if (param->alternate_path && (param->alternate_path->pkey != param->primary_path->pkey || param->alternate_path->mtu != param->primary_path->mtu)) return -EINVAL; return 0; } int ib_send_cm_req(struct ib_cm_id *cm_id, struct ib_cm_req_param *param) { struct cm_av av = {}, alt_av = {}; struct cm_id_private *cm_id_priv; struct ib_mad_send_buf *msg; struct cm_req_msg *req_msg; unsigned long flags; int ret; ret = cm_validate_req_param(param); if (ret) return ret; /* Verify that we're not in timewait. */ cm_id_priv = container_of(cm_id, struct cm_id_private, id); spin_lock_irqsave(&cm_id_priv->lock, flags); if (cm_id->state != IB_CM_IDLE || WARN_ON(cm_id_priv->timewait_info)) { spin_unlock_irqrestore(&cm_id_priv->lock, flags); return -EINVAL; } spin_unlock_irqrestore(&cm_id_priv->lock, flags); cm_id_priv->timewait_info = cm_create_timewait_info(cm_id_priv-> id.local_id); if (IS_ERR(cm_id_priv->timewait_info)) { ret = PTR_ERR(cm_id_priv->timewait_info); cm_id_priv->timewait_info = NULL; return ret; } ret = cm_init_av_by_path(param->primary_path, param->ppath_sgid_attr, &av); if (ret) return ret; if (param->alternate_path) { ret = cm_init_av_by_path(param->alternate_path, NULL, &alt_av); if (ret) { cm_destroy_av(&av); return ret; } } cm_id->service_id = param->service_id; cm_id_priv->timeout_ms = cm_convert_to_ms( param->primary_path->packet_life_time) * 2 + cm_convert_to_ms( param->remote_cm_response_timeout); cm_id_priv->max_cm_retries = param->max_cm_retries; cm_id_priv->initiator_depth = param->initiator_depth; cm_id_priv->responder_resources = param->responder_resources; cm_id_priv->retry_count = param->retry_count; cm_id_priv->path_mtu = param->primary_path->mtu; cm_id_priv->pkey = param->primary_path->pkey; cm_id_priv->qp_type = param->qp_type; spin_lock_irqsave(&cm_id_priv->lock, flags); cm_move_av_from_path(&cm_id_priv->av, &av); if (param->primary_path_outbound) cm_id_priv->av.dlid_datapath = be16_to_cpu(param->primary_path_outbound->ib.dlid); if (param->alternate_path) cm_move_av_from_path(&cm_id_priv->alt_av, &alt_av); msg = cm_alloc_priv_msg(cm_id_priv, IB_CM_REQ_SENT); if (IS_ERR(msg)) { ret = PTR_ERR(msg); goto out_unlock; } req_msg = (struct cm_req_msg *)msg->mad; cm_format_req(req_msg, cm_id_priv, param); cm_id_priv->tid = req_msg->hdr.tid; cm_id_priv->local_qpn = cpu_to_be32(IBA_GET(CM_REQ_LOCAL_QPN, req_msg)); cm_id_priv->rq_psn = cpu_to_be32(IBA_GET(CM_REQ_STARTING_PSN, req_msg)); trace_icm_send_req(&cm_id_priv->id); ret = ib_post_send_mad(msg, NULL); if (ret) goto out_free; BUG_ON(cm_id->state != IB_CM_IDLE); cm_id->state = IB_CM_REQ_SENT; spin_unlock_irqrestore(&cm_id_priv->lock, flags); return 0; out_free: cm_free_priv_msg(msg); out_unlock: spin_unlock_irqrestore(&cm_id_priv->lock, flags); return ret; } EXPORT_SYMBOL(ib_send_cm_req); static int cm_issue_rej(struct cm_port *port, struct ib_mad_recv_wc *mad_recv_wc, enum ib_cm_rej_reason reason, enum cm_msg_response msg_rejected, void *ari, u8 ari_length) { struct ib_mad_send_buf *msg = NULL; struct cm_rej_msg *rej_msg, *rcv_msg; int ret; ret = cm_alloc_response_msg(port, mad_recv_wc, false, &msg); if (ret) return ret; /* We just need common CM header information. Cast to any message. */ rcv_msg = (struct cm_rej_msg *) mad_recv_wc->recv_buf.mad; rej_msg = (struct cm_rej_msg *) msg->mad; cm_format_mad_hdr(&rej_msg->hdr, CM_REJ_ATTR_ID, rcv_msg->hdr.tid); IBA_SET(CM_REJ_REMOTE_COMM_ID, rej_msg, IBA_GET(CM_REJ_LOCAL_COMM_ID, rcv_msg)); IBA_SET(CM_REJ_LOCAL_COMM_ID, rej_msg, IBA_GET(CM_REJ_REMOTE_COMM_ID, rcv_msg)); IBA_SET(CM_REJ_MESSAGE_REJECTED, rej_msg, msg_rejected); IBA_SET(CM_REJ_REASON, rej_msg, reason); if (ari && ari_length) { IBA_SET(CM_REJ_REJECTED_INFO_LENGTH, rej_msg, ari_length); IBA_SET_MEM(CM_REJ_ARI, rej_msg, ari, ari_length); } trace_icm_issue_rej( IBA_GET(CM_REJ_LOCAL_COMM_ID, rcv_msg), IBA_GET(CM_REJ_REMOTE_COMM_ID, rcv_msg)); ret = ib_post_send_mad(msg, NULL); if (ret) cm_free_msg(msg); return ret; } static bool cm_req_has_alt_path(struct cm_req_msg *req_msg) { return ((cpu_to_be16( IBA_GET(CM_REQ_ALTERNATE_LOCAL_PORT_LID, req_msg))) || (ib_is_opa_gid(IBA_GET_MEM_PTR(CM_REQ_ALTERNATE_LOCAL_PORT_GID, req_msg)))); } static void cm_path_set_rec_type(struct ib_device *ib_device, u32 port_num, struct sa_path_rec *path, union ib_gid *gid) { if (ib_is_opa_gid(gid) && rdma_cap_opa_ah(ib_device, port_num)) path->rec_type = SA_PATH_REC_TYPE_OPA; else path->rec_type = SA_PATH_REC_TYPE_IB; } static void cm_format_path_lid_from_req(struct cm_req_msg *req_msg, struct sa_path_rec *primary_path, struct sa_path_rec *alt_path, struct ib_wc *wc) { u32 lid; if (primary_path->rec_type != SA_PATH_REC_TYPE_OPA) { sa_path_set_dlid(primary_path, wc->slid); sa_path_set_slid(primary_path, IBA_GET(CM_REQ_PRIMARY_REMOTE_PORT_LID, req_msg)); } else { lid = opa_get_lid_from_gid(IBA_GET_MEM_PTR( CM_REQ_PRIMARY_LOCAL_PORT_GID, req_msg)); sa_path_set_dlid(primary_path, lid); lid = opa_get_lid_from_gid(IBA_GET_MEM_PTR( CM_REQ_PRIMARY_REMOTE_PORT_GID, req_msg)); sa_path_set_slid(primary_path, lid); } if (!cm_req_has_alt_path(req_msg)) return; if (alt_path->rec_type != SA_PATH_REC_TYPE_OPA) { sa_path_set_dlid(alt_path, IBA_GET(CM_REQ_ALTERNATE_LOCAL_PORT_LID, req_msg)); sa_path_set_slid(alt_path, IBA_GET(CM_REQ_ALTERNATE_REMOTE_PORT_LID, req_msg)); } else { lid = opa_get_lid_from_gid(IBA_GET_MEM_PTR( CM_REQ_ALTERNATE_LOCAL_PORT_GID, req_msg)); sa_path_set_dlid(alt_path, lid); lid = opa_get_lid_from_gid(IBA_GET_MEM_PTR( CM_REQ_ALTERNATE_REMOTE_PORT_GID, req_msg)); sa_path_set_slid(alt_path, lid); } } static void cm_format_paths_from_req(struct cm_req_msg *req_msg, struct sa_path_rec *primary_path, struct sa_path_rec *alt_path, struct ib_wc *wc) { primary_path->dgid = *IBA_GET_MEM_PTR(CM_REQ_PRIMARY_LOCAL_PORT_GID, req_msg); primary_path->sgid = *IBA_GET_MEM_PTR(CM_REQ_PRIMARY_REMOTE_PORT_GID, req_msg); primary_path->flow_label = cpu_to_be32(IBA_GET(CM_REQ_PRIMARY_FLOW_LABEL, req_msg)); primary_path->hop_limit = IBA_GET(CM_REQ_PRIMARY_HOP_LIMIT, req_msg); primary_path->traffic_class = IBA_GET(CM_REQ_PRIMARY_TRAFFIC_CLASS, req_msg); primary_path->reversible = 1; primary_path->pkey = cpu_to_be16(IBA_GET(CM_REQ_PARTITION_KEY, req_msg)); primary_path->sl = IBA_GET(CM_REQ_PRIMARY_SL, req_msg); primary_path->mtu_selector = IB_SA_EQ; primary_path->mtu = IBA_GET(CM_REQ_PATH_PACKET_PAYLOAD_MTU, req_msg); primary_path->rate_selector = IB_SA_EQ; primary_path->rate = IBA_GET(CM_REQ_PRIMARY_PACKET_RATE, req_msg); primary_path->packet_life_time_selector = IB_SA_EQ; primary_path->packet_life_time = IBA_GET(CM_REQ_PRIMARY_LOCAL_ACK_TIMEOUT, req_msg); primary_path->packet_life_time -= (primary_path->packet_life_time > 0); primary_path->service_id = cpu_to_be64(IBA_GET(CM_REQ_SERVICE_ID, req_msg)); if (sa_path_is_roce(primary_path)) primary_path->roce.route_resolved = false; if (cm_req_has_alt_path(req_msg)) { alt_path->dgid = *IBA_GET_MEM_PTR( CM_REQ_ALTERNATE_LOCAL_PORT_GID, req_msg); alt_path->sgid = *IBA_GET_MEM_PTR( CM_REQ_ALTERNATE_REMOTE_PORT_GID, req_msg); alt_path->flow_label = cpu_to_be32( IBA_GET(CM_REQ_ALTERNATE_FLOW_LABEL, req_msg)); alt_path->hop_limit = IBA_GET(CM_REQ_ALTERNATE_HOP_LIMIT, req_msg); alt_path->traffic_class = IBA_GET(CM_REQ_ALTERNATE_TRAFFIC_CLASS, req_msg); alt_path->reversible = 1; alt_path->pkey = cpu_to_be16(IBA_GET(CM_REQ_PARTITION_KEY, req_msg)); alt_path->sl = IBA_GET(CM_REQ_ALTERNATE_SL, req_msg); alt_path->mtu_selector = IB_SA_EQ; alt_path->mtu = IBA_GET(CM_REQ_PATH_PACKET_PAYLOAD_MTU, req_msg); alt_path->rate_selector = IB_SA_EQ; alt_path->rate = IBA_GET(CM_REQ_ALTERNATE_PACKET_RATE, req_msg); alt_path->packet_life_time_selector = IB_SA_EQ; alt_path->packet_life_time = IBA_GET(CM_REQ_ALTERNATE_LOCAL_ACK_TIMEOUT, req_msg); alt_path->packet_life_time -= (alt_path->packet_life_time > 0); alt_path->service_id = cpu_to_be64(IBA_GET(CM_REQ_SERVICE_ID, req_msg)); if (sa_path_is_roce(alt_path)) alt_path->roce.route_resolved = false; } cm_format_path_lid_from_req(req_msg, primary_path, alt_path, wc); } static u16 cm_get_bth_pkey(struct cm_work *work) { struct ib_device *ib_dev = work->port->cm_dev->ib_device; u32 port_num = work->port->port_num; u16 pkey_index = work->mad_recv_wc->wc->pkey_index; u16 pkey; int ret; ret = ib_get_cached_pkey(ib_dev, port_num, pkey_index, &pkey); if (ret) { dev_warn_ratelimited(&ib_dev->dev, "ib_cm: Couldn't retrieve pkey for incoming request (port %u, pkey index %u). %d\n", port_num, pkey_index, ret); return 0; } return pkey; } /** * cm_opa_to_ib_sgid - Convert OPA SGID to IB SGID * ULPs (such as IPoIB) do not understand OPA GIDs and will * reject them as the local_gid will not match the sgid. Therefore, * change the pathrec's SGID to an IB SGID. * * @work: Work completion * @path: Path record */ static void cm_opa_to_ib_sgid(struct cm_work *work, struct sa_path_rec *path) { struct ib_device *dev = work->port->cm_dev->ib_device; u32 port_num = work->port->port_num; if (rdma_cap_opa_ah(dev, port_num) && (ib_is_opa_gid(&path->sgid))) { union ib_gid sgid; if (rdma_query_gid(dev, port_num, 0, &sgid)) { dev_warn(&dev->dev, "Error updating sgid in CM request\n"); return; } path->sgid = sgid; } } static void cm_format_req_event(struct cm_work *work, struct cm_id_private *cm_id_priv, struct ib_cm_id *listen_id) { struct cm_req_msg *req_msg; struct ib_cm_req_event_param *param; req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad; param = &work->cm_event.param.req_rcvd; param->listen_id = listen_id; param->bth_pkey = cm_get_bth_pkey(work); param->port = cm_id_priv->av.port->port_num; param->primary_path = &work->path[0]; cm_opa_to_ib_sgid(work, param->primary_path); if (cm_req_has_alt_path(req_msg)) { param->alternate_path = &work->path[1]; cm_opa_to_ib_sgid(work, param->alternate_path); } else { param->alternate_path = NULL; } param->remote_ca_guid = cpu_to_be64(IBA_GET(CM_REQ_LOCAL_CA_GUID, req_msg)); param->remote_qkey = IBA_GET(CM_REQ_LOCAL_Q_KEY, req_msg); param->remote_qpn = IBA_GET(CM_REQ_LOCAL_QPN, req_msg); param->qp_type = cm_req_get_qp_type(req_msg); param->starting_psn = IBA_GET(CM_REQ_STARTING_PSN, req_msg); param->responder_resources = IBA_GET(CM_REQ_INITIATOR_DEPTH, req_msg); param->initiator_depth = IBA_GET(CM_REQ_RESPONDER_RESOURCES, req_msg); param->local_cm_response_timeout = IBA_GET(CM_REQ_REMOTE_CM_RESPONSE_TIMEOUT, req_msg); param->flow_control = IBA_GET(CM_REQ_END_TO_END_FLOW_CONTROL, req_msg); param->remote_cm_response_timeout = IBA_GET(CM_REQ_LOCAL_CM_RESPONSE_TIMEOUT, req_msg); param->retry_count = IBA_GET(CM_REQ_RETRY_COUNT, req_msg); param->rnr_retry_count = IBA_GET(CM_REQ_RNR_RETRY_COUNT, req_msg); param->srq = IBA_GET(CM_REQ_SRQ, req_msg); param->ppath_sgid_attr = cm_id_priv->av.ah_attr.grh.sgid_attr; param->ece.vendor_id = IBA_GET(CM_REQ_VENDOR_ID, req_msg); param->ece.attr_mod = be32_to_cpu(req_msg->hdr.attr_mod); work->cm_event.private_data = IBA_GET_MEM_PTR(CM_REQ_PRIVATE_DATA, req_msg); } static void cm_process_work(struct cm_id_private *cm_id_priv, struct cm_work *work) { int ret; /* We will typically only have the current event to report. */ ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &work->cm_event); cm_free_work(work); while (!ret && !atomic_add_negative(-1, &cm_id_priv->work_count)) { spin_lock_irq(&cm_id_priv->lock); work = cm_dequeue_work(cm_id_priv); spin_unlock_irq(&cm_id_priv->lock); if (!work) return; ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &work->cm_event); cm_free_work(work); } cm_deref_id(cm_id_priv); if (ret) cm_destroy_id(&cm_id_priv->id, ret); } static void cm_format_mra(struct cm_mra_msg *mra_msg, struct cm_id_private *cm_id_priv, enum cm_msg_response msg_mraed, u8 service_timeout, const void *private_data, u8 private_data_len) { cm_format_mad_hdr(&mra_msg->hdr, CM_MRA_ATTR_ID, cm_id_priv->tid); IBA_SET(CM_MRA_MESSAGE_MRAED, mra_msg, msg_mraed); IBA_SET(CM_MRA_LOCAL_COMM_ID, mra_msg, be32_to_cpu(cm_id_priv->id.local_id)); IBA_SET(CM_MRA_REMOTE_COMM_ID, mra_msg, be32_to_cpu(cm_id_priv->id.remote_id)); IBA_SET(CM_MRA_SERVICE_TIMEOUT, mra_msg, service_timeout); if (private_data && private_data_len) IBA_SET_MEM(CM_MRA_PRIVATE_DATA, mra_msg, private_data, private_data_len); } static void cm_format_rej(struct cm_rej_msg *rej_msg, struct cm_id_private *cm_id_priv, enum ib_cm_rej_reason reason, void *ari, u8 ari_length, const void *private_data, u8 private_data_len, enum ib_cm_state state) { lockdep_assert_held(&cm_id_priv->lock); cm_format_mad_hdr(&rej_msg->hdr, CM_REJ_ATTR_ID, cm_id_priv->tid); IBA_SET(CM_REJ_REMOTE_COMM_ID, rej_msg, be32_to_cpu(cm_id_priv->id.remote_id)); switch (state) { case IB_CM_REQ_RCVD: IBA_SET(CM_REJ_LOCAL_COMM_ID, rej_msg, be32_to_cpu(0)); IBA_SET(CM_REJ_MESSAGE_REJECTED, rej_msg, CM_MSG_RESPONSE_REQ); break; case IB_CM_MRA_REQ_SENT: IBA_SET(CM_REJ_LOCAL_COMM_ID, rej_msg, be32_to_cpu(cm_id_priv->id.local_id)); IBA_SET(CM_REJ_MESSAGE_REJECTED, rej_msg, CM_MSG_RESPONSE_REQ); break; case IB_CM_REP_RCVD: case IB_CM_MRA_REP_SENT: IBA_SET(CM_REJ_LOCAL_COMM_ID, rej_msg, be32_to_cpu(cm_id_priv->id.local_id)); IBA_SET(CM_REJ_MESSAGE_REJECTED, rej_msg, CM_MSG_RESPONSE_REP); break; default: IBA_SET(CM_REJ_LOCAL_COMM_ID, rej_msg, be32_to_cpu(cm_id_priv->id.local_id)); IBA_SET(CM_REJ_MESSAGE_REJECTED, rej_msg, CM_MSG_RESPONSE_OTHER); break; } IBA_SET(CM_REJ_REASON, rej_msg, reason); if (ari && ari_length) { IBA_SET(CM_REJ_REJECTED_INFO_LENGTH, rej_msg, ari_length); IBA_SET_MEM(CM_REJ_ARI, rej_msg, ari, ari_length); } if (private_data && private_data_len) IBA_SET_MEM(CM_REJ_PRIVATE_DATA, rej_msg, private_data, private_data_len); } static void cm_dup_req_handler(struct cm_work *work, struct cm_id_private *cm_id_priv) { struct ib_mad_send_buf *msg = NULL; int ret; atomic_long_inc( &work->port->counters[CM_RECV_DUPLICATES][CM_REQ_COUNTER]); /* Quick state check to discard duplicate REQs. */ spin_lock_irq(&cm_id_priv->lock); if (cm_id_priv->id.state == IB_CM_REQ_RCVD) { spin_unlock_irq(&cm_id_priv->lock); return; } spin_unlock_irq(&cm_id_priv->lock); ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, true, &msg); if (ret) return; spin_lock_irq(&cm_id_priv->lock); switch (cm_id_priv->id.state) { case IB_CM_MRA_REQ_SENT: cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv, CM_MSG_RESPONSE_REQ, cm_id_priv->service_timeout, cm_id_priv->private_data, cm_id_priv->private_data_len); break; case IB_CM_TIMEWAIT: cm_format_rej((struct cm_rej_msg *)msg->mad, cm_id_priv, IB_CM_REJ_STALE_CONN, NULL, 0, NULL, 0, IB_CM_TIMEWAIT); break; default: goto unlock; } spin_unlock_irq(&cm_id_priv->lock); trace_icm_send_dup_req(&cm_id_priv->id); ret = ib_post_send_mad(msg, NULL); if (ret) goto free; return; unlock: spin_unlock_irq(&cm_id_priv->lock); free: cm_free_msg(msg); } static struct cm_id_private *cm_match_req(struct cm_work *work, struct cm_id_private *cm_id_priv) { struct cm_id_private *listen_cm_id_priv, *cur_cm_id_priv; struct cm_timewait_info *timewait_info; struct cm_req_msg *req_msg; req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad; /* Check for possible duplicate REQ. */ spin_lock_irq(&cm.lock); timewait_info = cm_insert_remote_id(cm_id_priv->timewait_info); if (timewait_info) { cur_cm_id_priv = cm_acquire_id(timewait_info->work.local_id, timewait_info->work.remote_id); spin_unlock_irq(&cm.lock); if (cur_cm_id_priv) { cm_dup_req_handler(work, cur_cm_id_priv); cm_deref_id(cur_cm_id_priv); } return NULL; } /* Check for stale connections. */ timewait_info = cm_insert_remote_qpn(cm_id_priv->timewait_info); if (timewait_info) { cm_remove_remote(cm_id_priv); cur_cm_id_priv = cm_acquire_id(timewait_info->work.local_id, timewait_info->work.remote_id); spin_unlock_irq(&cm.lock); cm_issue_rej(work->port, work->mad_recv_wc, IB_CM_REJ_STALE_CONN, CM_MSG_RESPONSE_REQ, NULL, 0); if (cur_cm_id_priv) { ib_send_cm_dreq(&cur_cm_id_priv->id, NULL, 0); cm_deref_id(cur_cm_id_priv); } return NULL; } /* Find matching listen request. */ listen_cm_id_priv = cm_find_listen( cm_id_priv->id.device, cpu_to_be64(IBA_GET(CM_REQ_SERVICE_ID, req_msg))); if (!listen_cm_id_priv) { cm_remove_remote(cm_id_priv); spin_unlock_irq(&cm.lock); cm_issue_rej(work->port, work->mad_recv_wc, IB_CM_REJ_INVALID_SERVICE_ID, CM_MSG_RESPONSE_REQ, NULL, 0); return NULL; } spin_unlock_irq(&cm.lock); return listen_cm_id_priv; } /* * Work-around for inter-subnet connections. If the LIDs are permissive, * we need to override the LID/SL data in the REQ with the LID information * in the work completion. */ static void cm_process_routed_req(struct cm_req_msg *req_msg, struct ib_wc *wc) { if (!IBA_GET(CM_REQ_PRIMARY_SUBNET_LOCAL, req_msg)) { if (cpu_to_be16(IBA_GET(CM_REQ_PRIMARY_LOCAL_PORT_LID, req_msg)) == IB_LID_PERMISSIVE) { IBA_SET(CM_REQ_PRIMARY_LOCAL_PORT_LID, req_msg, be16_to_cpu(ib_lid_be16(wc->slid))); IBA_SET(CM_REQ_PRIMARY_SL, req_msg, wc->sl); } if (cpu_to_be16(IBA_GET(CM_REQ_PRIMARY_REMOTE_PORT_LID, req_msg)) == IB_LID_PERMISSIVE) IBA_SET(CM_REQ_PRIMARY_REMOTE_PORT_LID, req_msg, wc->dlid_path_bits); } if (!IBA_GET(CM_REQ_ALTERNATE_SUBNET_LOCAL, req_msg)) { if (cpu_to_be16(IBA_GET(CM_REQ_ALTERNATE_LOCAL_PORT_LID, req_msg)) == IB_LID_PERMISSIVE) { IBA_SET(CM_REQ_ALTERNATE_LOCAL_PORT_LID, req_msg, be16_to_cpu(ib_lid_be16(wc->slid))); IBA_SET(CM_REQ_ALTERNATE_SL, req_msg, wc->sl); } if (cpu_to_be16(IBA_GET(CM_REQ_ALTERNATE_REMOTE_PORT_LID, req_msg)) == IB_LID_PERMISSIVE) IBA_SET(CM_REQ_ALTERNATE_REMOTE_PORT_LID, req_msg, wc->dlid_path_bits); } } static int cm_req_handler(struct cm_work *work) { struct cm_id_private *cm_id_priv, *listen_cm_id_priv; struct cm_req_msg *req_msg; const struct ib_global_route *grh; const struct ib_gid_attr *gid_attr; int ret; req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad; cm_id_priv = cm_alloc_id_priv(work->port->cm_dev->ib_device, NULL, NULL); if (IS_ERR(cm_id_priv)) return PTR_ERR(cm_id_priv); cm_id_priv->id.remote_id = cpu_to_be32(IBA_GET(CM_REQ_LOCAL_COMM_ID, req_msg)); cm_id_priv->id.service_id = cpu_to_be64(IBA_GET(CM_REQ_SERVICE_ID, req_msg)); cm_id_priv->tid = req_msg->hdr.tid; cm_id_priv->timeout_ms = cm_convert_to_ms( IBA_GET(CM_REQ_LOCAL_CM_RESPONSE_TIMEOUT, req_msg)); cm_id_priv->max_cm_retries = IBA_GET(CM_REQ_MAX_CM_RETRIES, req_msg); cm_id_priv->remote_qpn = cpu_to_be32(IBA_GET(CM_REQ_LOCAL_QPN, req_msg)); cm_id_priv->initiator_depth = IBA_GET(CM_REQ_RESPONDER_RESOURCES, req_msg); cm_id_priv->responder_resources = IBA_GET(CM_REQ_INITIATOR_DEPTH, req_msg); cm_id_priv->path_mtu = IBA_GET(CM_REQ_PATH_PACKET_PAYLOAD_MTU, req_msg); cm_id_priv->pkey = cpu_to_be16(IBA_GET(CM_REQ_PARTITION_KEY, req_msg)); cm_id_priv->sq_psn = cpu_to_be32(IBA_GET(CM_REQ_STARTING_PSN, req_msg)); cm_id_priv->retry_count = IBA_GET(CM_REQ_RETRY_COUNT, req_msg); cm_id_priv->rnr_retry_count = IBA_GET(CM_REQ_RNR_RETRY_COUNT, req_msg); cm_id_priv->qp_type = cm_req_get_qp_type(req_msg); ret = cm_init_av_for_response(work->port, work->mad_recv_wc->wc, work->mad_recv_wc->recv_buf.grh, &cm_id_priv->av); if (ret) goto destroy; cm_id_priv->timewait_info = cm_create_timewait_info(cm_id_priv-> id.local_id); if (IS_ERR(cm_id_priv->timewait_info)) { ret = PTR_ERR(cm_id_priv->timewait_info); cm_id_priv->timewait_info = NULL; goto destroy; } cm_id_priv->timewait_info->work.remote_id = cm_id_priv->id.remote_id; cm_id_priv->timewait_info->remote_ca_guid = cpu_to_be64(IBA_GET(CM_REQ_LOCAL_CA_GUID, req_msg)); cm_id_priv->timewait_info->remote_qpn = cm_id_priv->remote_qpn; /* * Note that the ID pointer is not in the xarray at this point, * so this set is only visible to the local thread. */ cm_id_priv->id.state = IB_CM_REQ_RCVD; listen_cm_id_priv = cm_match_req(work, cm_id_priv); if (!listen_cm_id_priv) { trace_icm_no_listener_err(&cm_id_priv->id); cm_id_priv->id.state = IB_CM_IDLE; ret = -EINVAL; goto destroy; } memset(&work->path[0], 0, sizeof(work->path[0])); if (cm_req_has_alt_path(req_msg)) memset(&work->path[1], 0, sizeof(work->path[1])); grh = rdma_ah_read_grh(&cm_id_priv->av.ah_attr); gid_attr = grh->sgid_attr; if (cm_id_priv->av.ah_attr.type == RDMA_AH_ATTR_TYPE_ROCE) { work->path[0].rec_type = sa_conv_gid_to_pathrec_type(gid_attr->gid_type); } else { cm_process_routed_req(req_msg, work->mad_recv_wc->wc); cm_path_set_rec_type( work->port->cm_dev->ib_device, work->port->port_num, &work->path[0], IBA_GET_MEM_PTR(CM_REQ_PRIMARY_LOCAL_PORT_GID, req_msg)); } if (cm_req_has_alt_path(req_msg)) work->path[1].rec_type = work->path[0].rec_type; cm_format_paths_from_req(req_msg, &work->path[0], &work->path[1], work->mad_recv_wc->wc); if (cm_id_priv->av.ah_attr.type == RDMA_AH_ATTR_TYPE_ROCE) sa_path_set_dmac(&work->path[0], cm_id_priv->av.ah_attr.roce.dmac); work->path[0].hop_limit = grh->hop_limit; /* This destroy call is needed to pair with cm_init_av_for_response */ cm_destroy_av(&cm_id_priv->av); ret = cm_init_av_by_path(&work->path[0], gid_attr, &cm_id_priv->av); if (ret) { int err; err = rdma_query_gid(work->port->cm_dev->ib_device, work->port->port_num, 0, &work->path[0].sgid); if (err) ib_send_cm_rej(&cm_id_priv->id, IB_CM_REJ_INVALID_GID, NULL, 0, NULL, 0); else ib_send_cm_rej(&cm_id_priv->id, IB_CM_REJ_INVALID_GID, &work->path[0].sgid, sizeof(work->path[0].sgid), NULL, 0); goto rejected; } if (cm_id_priv->av.ah_attr.type == RDMA_AH_ATTR_TYPE_IB) cm_id_priv->av.dlid_datapath = IBA_GET(CM_REQ_PRIMARY_LOCAL_PORT_LID, req_msg); if (cm_req_has_alt_path(req_msg)) { ret = cm_init_av_by_path(&work->path[1], NULL, &cm_id_priv->alt_av); if (ret) { ib_send_cm_rej(&cm_id_priv->id, IB_CM_REJ_INVALID_ALT_GID, &work->path[0].sgid, sizeof(work->path[0].sgid), NULL, 0); goto rejected; } } cm_id_priv->id.cm_handler = listen_cm_id_priv->id.cm_handler; cm_id_priv->id.context = listen_cm_id_priv->id.context; cm_format_req_event(work, cm_id_priv, &listen_cm_id_priv->id); /* Now MAD handlers can see the new ID */ spin_lock_irq(&cm_id_priv->lock); cm_finalize_id(cm_id_priv); /* Refcount belongs to the event, pairs with cm_process_work() */ refcount_inc(&cm_id_priv->refcount); cm_queue_work_unlock(cm_id_priv, work); /* * Since this ID was just created and was not made visible to other MAD * handlers until the cm_finalize_id() above we know that the * cm_process_work() will deliver the event and the listen_cm_id * embedded in the event can be derefed here. */ cm_deref_id(listen_cm_id_priv); return 0; rejected: cm_deref_id(listen_cm_id_priv); destroy: ib_destroy_cm_id(&cm_id_priv->id); return ret; } static void cm_format_rep(struct cm_rep_msg *rep_msg, struct cm_id_private *cm_id_priv, struct ib_cm_rep_param *param) { cm_format_mad_ece_hdr(&rep_msg->hdr, CM_REP_ATTR_ID, cm_id_priv->tid, param->ece.attr_mod); IBA_SET(CM_REP_LOCAL_COMM_ID, rep_msg, be32_to_cpu(cm_id_priv->id.local_id)); IBA_SET(CM_REP_REMOTE_COMM_ID, rep_msg, be32_to_cpu(cm_id_priv->id.remote_id)); IBA_SET(CM_REP_STARTING_PSN, rep_msg, param->starting_psn); IBA_SET(CM_REP_RESPONDER_RESOURCES, rep_msg, param->responder_resources); IBA_SET(CM_REP_TARGET_ACK_DELAY, rep_msg, cm_id_priv->av.port->cm_dev->ack_delay); IBA_SET(CM_REP_FAILOVER_ACCEPTED, rep_msg, param->failover_accepted); IBA_SET(CM_REP_RNR_RETRY_COUNT, rep_msg, param->rnr_retry_count); IBA_SET(CM_REP_LOCAL_CA_GUID, rep_msg, be64_to_cpu(cm_id_priv->id.device->node_guid)); if (cm_id_priv->qp_type != IB_QPT_XRC_TGT) { IBA_SET(CM_REP_INITIATOR_DEPTH, rep_msg, param->initiator_depth); IBA_SET(CM_REP_END_TO_END_FLOW_CONTROL, rep_msg, param->flow_control); IBA_SET(CM_REP_SRQ, rep_msg, param->srq); IBA_SET(CM_REP_LOCAL_QPN, rep_msg, param->qp_num); } else { IBA_SET(CM_REP_SRQ, rep_msg, 1); IBA_SET(CM_REP_LOCAL_EE_CONTEXT_NUMBER, rep_msg, param->qp_num); } IBA_SET(CM_REP_VENDOR_ID_L, rep_msg, param->ece.vendor_id); IBA_SET(CM_REP_VENDOR_ID_M, rep_msg, param->ece.vendor_id >> 8); IBA_SET(CM_REP_VENDOR_ID_H, rep_msg, param->ece.vendor_id >> 16); if (param->private_data && param->private_data_len) IBA_SET_MEM(CM_REP_PRIVATE_DATA, rep_msg, param->private_data, param->private_data_len); } int ib_send_cm_rep(struct ib_cm_id *cm_id, struct ib_cm_rep_param *param) { struct cm_id_private *cm_id_priv; struct ib_mad_send_buf *msg; struct cm_rep_msg *rep_msg; unsigned long flags; int ret; if (param->private_data && param->private_data_len > IB_CM_REP_PRIVATE_DATA_SIZE) return -EINVAL; cm_id_priv = container_of(cm_id, struct cm_id_private, id); spin_lock_irqsave(&cm_id_priv->lock, flags); if (cm_id->state != IB_CM_REQ_RCVD && cm_id->state != IB_CM_MRA_REQ_SENT) { trace_icm_send_rep_err(cm_id_priv->id.local_id, cm_id->state); ret = -EINVAL; goto out; } msg = cm_alloc_priv_msg(cm_id_priv, IB_CM_REP_SENT); if (IS_ERR(msg)) { ret = PTR_ERR(msg); goto out; } rep_msg = (struct cm_rep_msg *) msg->mad; cm_format_rep(rep_msg, cm_id_priv, param); trace_icm_send_rep(cm_id); ret = ib_post_send_mad(msg, NULL); if (ret) goto out_free; cm_id->state = IB_CM_REP_SENT; cm_id_priv->initiator_depth = param->initiator_depth; cm_id_priv->responder_resources = param->responder_resources; cm_id_priv->rq_psn = cpu_to_be32(IBA_GET(CM_REP_STARTING_PSN, rep_msg)); WARN_ONCE(param->qp_num & 0xFF000000, "IBTA declares QPN to be 24 bits, but it is 0x%X\n", param->qp_num); cm_id_priv->local_qpn = cpu_to_be32(param->qp_num & 0xFFFFFF); spin_unlock_irqrestore(&cm_id_priv->lock, flags); return 0; out_free: cm_free_priv_msg(msg); out: spin_unlock_irqrestore(&cm_id_priv->lock, flags); return ret; } EXPORT_SYMBOL(ib_send_cm_rep); static void cm_format_rtu(struct cm_rtu_msg *rtu_msg, struct cm_id_private *cm_id_priv, const void *private_data, u8 private_data_len) { cm_format_mad_hdr(&rtu_msg->hdr, CM_RTU_ATTR_ID, cm_id_priv->tid); IBA_SET(CM_RTU_LOCAL_COMM_ID, rtu_msg, be32_to_cpu(cm_id_priv->id.local_id)); IBA_SET(CM_RTU_REMOTE_COMM_ID, rtu_msg, be32_to_cpu(cm_id_priv->id.remote_id)); if (private_data && private_data_len) IBA_SET_MEM(CM_RTU_PRIVATE_DATA, rtu_msg, private_data, private_data_len); } int ib_send_cm_rtu(struct ib_cm_id *cm_id, const void *private_data, u8 private_data_len) { struct cm_id_private *cm_id_priv; struct ib_mad_send_buf *msg; unsigned long flags; void *data; int ret; if (private_data && private_data_len > IB_CM_RTU_PRIVATE_DATA_SIZE) return -EINVAL; data = cm_copy_private_data(private_data, private_data_len); if (IS_ERR(data)) return PTR_ERR(data); cm_id_priv = container_of(cm_id, struct cm_id_private, id); spin_lock_irqsave(&cm_id_priv->lock, flags); if (cm_id->state != IB_CM_REP_RCVD && cm_id->state != IB_CM_MRA_REP_SENT) { trace_icm_send_cm_rtu_err(cm_id); ret = -EINVAL; goto error; } msg = cm_alloc_msg(cm_id_priv); if (IS_ERR(msg)) { ret = PTR_ERR(msg); goto error; } cm_format_rtu((struct cm_rtu_msg *) msg->mad, cm_id_priv, private_data, private_data_len); trace_icm_send_rtu(cm_id); ret = ib_post_send_mad(msg, NULL); if (ret) { spin_unlock_irqrestore(&cm_id_priv->lock, flags); cm_free_msg(msg); kfree(data); return ret; } cm_id->state = IB_CM_ESTABLISHED; cm_set_private_data(cm_id_priv, data, private_data_len); spin_unlock_irqrestore(&cm_id_priv->lock, flags); return 0; error: spin_unlock_irqrestore(&cm_id_priv->lock, flags); kfree(data); return ret; } EXPORT_SYMBOL(ib_send_cm_rtu); static void cm_format_rep_event(struct cm_work *work, enum ib_qp_type qp_type) { struct cm_rep_msg *rep_msg; struct ib_cm_rep_event_param *param; rep_msg = (struct cm_rep_msg *)work->mad_recv_wc->recv_buf.mad; param = &work->cm_event.param.rep_rcvd; param->remote_ca_guid = cpu_to_be64(IBA_GET(CM_REP_LOCAL_CA_GUID, rep_msg)); param->remote_qkey = IBA_GET(CM_REP_LOCAL_Q_KEY, rep_msg); param->remote_qpn = be32_to_cpu(cm_rep_get_qpn(rep_msg, qp_type)); param->starting_psn = IBA_GET(CM_REP_STARTING_PSN, rep_msg); param->responder_resources = IBA_GET(CM_REP_INITIATOR_DEPTH, rep_msg); param->initiator_depth = IBA_GET(CM_REP_RESPONDER_RESOURCES, rep_msg); param->target_ack_delay = IBA_GET(CM_REP_TARGET_ACK_DELAY, rep_msg); param->failover_accepted = IBA_GET(CM_REP_FAILOVER_ACCEPTED, rep_msg); param->flow_control = IBA_GET(CM_REP_END_TO_END_FLOW_CONTROL, rep_msg); param->rnr_retry_count = IBA_GET(CM_REP_RNR_RETRY_COUNT, rep_msg); param->srq = IBA_GET(CM_REP_SRQ, rep_msg); param->ece.vendor_id = IBA_GET(CM_REP_VENDOR_ID_H, rep_msg) << 16; param->ece.vendor_id |= IBA_GET(CM_REP_VENDOR_ID_M, rep_msg) << 8; param->ece.vendor_id |= IBA_GET(CM_REP_VENDOR_ID_L, rep_msg); param->ece.attr_mod = be32_to_cpu(rep_msg->hdr.attr_mod); work->cm_event.private_data = IBA_GET_MEM_PTR(CM_REP_PRIVATE_DATA, rep_msg); } static void cm_dup_rep_handler(struct cm_work *work) { struct cm_id_private *cm_id_priv; struct cm_rep_msg *rep_msg; struct ib_mad_send_buf *msg = NULL; int ret; rep_msg = (struct cm_rep_msg *) work->mad_recv_wc->recv_buf.mad; cm_id_priv = cm_acquire_id( cpu_to_be32(IBA_GET(CM_REP_REMOTE_COMM_ID, rep_msg)), cpu_to_be32(IBA_GET(CM_REP_LOCAL_COMM_ID, rep_msg))); if (!cm_id_priv) return; atomic_long_inc( &work->port->counters[CM_RECV_DUPLICATES][CM_REP_COUNTER]); ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, true, &msg); if (ret) goto deref; spin_lock_irq(&cm_id_priv->lock); if (cm_id_priv->id.state == IB_CM_ESTABLISHED) cm_format_rtu((struct cm_rtu_msg *) msg->mad, cm_id_priv, cm_id_priv->private_data, cm_id_priv->private_data_len); else if (cm_id_priv->id.state == IB_CM_MRA_REP_SENT) cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv, CM_MSG_RESPONSE_REP, cm_id_priv->service_timeout, cm_id_priv->private_data, cm_id_priv->private_data_len); else goto unlock; spin_unlock_irq(&cm_id_priv->lock); trace_icm_send_dup_rep(&cm_id_priv->id); ret = ib_post_send_mad(msg, NULL); if (ret) goto free; goto deref; unlock: spin_unlock_irq(&cm_id_priv->lock); free: cm_free_msg(msg); deref: cm_deref_id(cm_id_priv); } static int cm_rep_handler(struct cm_work *work) { struct cm_id_private *cm_id_priv; struct cm_rep_msg *rep_msg; int ret; struct cm_id_private *cur_cm_id_priv; struct cm_timewait_info *timewait_info; rep_msg = (struct cm_rep_msg *)work->mad_recv_wc->recv_buf.mad; cm_id_priv = cm_acquire_id( cpu_to_be32(IBA_GET(CM_REP_REMOTE_COMM_ID, rep_msg)), 0); if (!cm_id_priv) { cm_dup_rep_handler(work); trace_icm_remote_no_priv_err( IBA_GET(CM_REP_REMOTE_COMM_ID, rep_msg)); return -EINVAL; } cm_format_rep_event(work, cm_id_priv->qp_type); spin_lock_irq(&cm_id_priv->lock); switch (cm_id_priv->id.state) { case IB_CM_REQ_SENT: case IB_CM_MRA_REQ_RCVD: break; default: ret = -EINVAL; trace_icm_rep_unknown_err( IBA_GET(CM_REP_LOCAL_COMM_ID, rep_msg), IBA_GET(CM_REP_REMOTE_COMM_ID, rep_msg), cm_id_priv->id.state); spin_unlock_irq(&cm_id_priv->lock); goto error; } cm_id_priv->timewait_info->work.remote_id = cpu_to_be32(IBA_GET(CM_REP_LOCAL_COMM_ID, rep_msg)); cm_id_priv->timewait_info->remote_ca_guid = cpu_to_be64(IBA_GET(CM_REP_LOCAL_CA_GUID, rep_msg)); cm_id_priv->timewait_info->remote_qpn = cm_rep_get_qpn(rep_msg, cm_id_priv->qp_type); spin_lock(&cm.lock); /* Check for duplicate REP. */ if (cm_insert_remote_id(cm_id_priv->timewait_info)) { spin_unlock(&cm.lock); spin_unlock_irq(&cm_id_priv->lock); ret = -EINVAL; trace_icm_insert_failed_err( IBA_GET(CM_REP_REMOTE_COMM_ID, rep_msg)); goto error; } /* Check for a stale connection. */ timewait_info = cm_insert_remote_qpn(cm_id_priv->timewait_info); if (timewait_info) { cm_remove_remote(cm_id_priv); cur_cm_id_priv = cm_acquire_id(timewait_info->work.local_id, timewait_info->work.remote_id); spin_unlock(&cm.lock); spin_unlock_irq(&cm_id_priv->lock); cm_issue_rej(work->port, work->mad_recv_wc, IB_CM_REJ_STALE_CONN, CM_MSG_RESPONSE_REP, NULL, 0); ret = -EINVAL; trace_icm_staleconn_err( IBA_GET(CM_REP_LOCAL_COMM_ID, rep_msg), IBA_GET(CM_REP_REMOTE_COMM_ID, rep_msg)); if (cur_cm_id_priv) { ib_send_cm_dreq(&cur_cm_id_priv->id, NULL, 0); cm_deref_id(cur_cm_id_priv); } goto error; } spin_unlock(&cm.lock); cm_id_priv->id.state = IB_CM_REP_RCVD; cm_id_priv->id.remote_id = cpu_to_be32(IBA_GET(CM_REP_LOCAL_COMM_ID, rep_msg)); cm_id_priv->remote_qpn = cm_rep_get_qpn(rep_msg, cm_id_priv->qp_type); cm_id_priv->initiator_depth = IBA_GET(CM_REP_RESPONDER_RESOURCES, rep_msg); cm_id_priv->responder_resources = IBA_GET(CM_REP_INITIATOR_DEPTH, rep_msg); cm_id_priv->sq_psn = cpu_to_be32(IBA_GET(CM_REP_STARTING_PSN, rep_msg)); cm_id_priv->rnr_retry_count = IBA_GET(CM_REP_RNR_RETRY_COUNT, rep_msg); cm_id_priv->target_ack_delay = IBA_GET(CM_REP_TARGET_ACK_DELAY, rep_msg); cm_id_priv->av.timeout = cm_ack_timeout(cm_id_priv->target_ack_delay, cm_id_priv->av.timeout - 1); cm_id_priv->alt_av.timeout = cm_ack_timeout(cm_id_priv->target_ack_delay, cm_id_priv->alt_av.timeout - 1); ib_cancel_mad(cm_id_priv->msg); cm_queue_work_unlock(cm_id_priv, work); return 0; error: cm_deref_id(cm_id_priv); return ret; } static int cm_establish_handler(struct cm_work *work) { struct cm_id_private *cm_id_priv; /* See comment in cm_establish about lookup. */ cm_id_priv = cm_acquire_id(work->local_id, work->remote_id); if (!cm_id_priv) return -EINVAL; spin_lock_irq(&cm_id_priv->lock); if (cm_id_priv->id.state != IB_CM_ESTABLISHED) { spin_unlock_irq(&cm_id_priv->lock); goto out; } ib_cancel_mad(cm_id_priv->msg); cm_queue_work_unlock(cm_id_priv, work); return 0; out: cm_deref_id(cm_id_priv); return -EINVAL; } static int cm_rtu_handler(struct cm_work *work) { struct cm_id_private *cm_id_priv; struct cm_rtu_msg *rtu_msg; rtu_msg = (struct cm_rtu_msg *)work->mad_recv_wc->recv_buf.mad; cm_id_priv = cm_acquire_id( cpu_to_be32(IBA_GET(CM_RTU_REMOTE_COMM_ID, rtu_msg)), cpu_to_be32(IBA_GET(CM_RTU_LOCAL_COMM_ID, rtu_msg))); if (!cm_id_priv) return -EINVAL; work->cm_event.private_data = IBA_GET_MEM_PTR(CM_RTU_PRIVATE_DATA, rtu_msg); spin_lock_irq(&cm_id_priv->lock); if (cm_id_priv->id.state != IB_CM_REP_SENT && cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) { spin_unlock_irq(&cm_id_priv->lock); atomic_long_inc(&work->port->counters[CM_RECV_DUPLICATES] [CM_RTU_COUNTER]); goto out; } cm_id_priv->id.state = IB_CM_ESTABLISHED; ib_cancel_mad(cm_id_priv->msg); cm_queue_work_unlock(cm_id_priv, work); return 0; out: cm_deref_id(cm_id_priv); return -EINVAL; } static void cm_format_dreq(struct cm_dreq_msg *dreq_msg, struct cm_id_private *cm_id_priv, const void *private_data, u8 private_data_len) { cm_format_mad_hdr(&dreq_msg->hdr, CM_DREQ_ATTR_ID, cm_form_tid(cm_id_priv)); IBA_SET(CM_DREQ_LOCAL_COMM_ID, dreq_msg, be32_to_cpu(cm_id_priv->id.local_id)); IBA_SET(CM_DREQ_REMOTE_COMM_ID, dreq_msg, be32_to_cpu(cm_id_priv->id.remote_id)); IBA_SET(CM_DREQ_REMOTE_QPN_EECN, dreq_msg, be32_to_cpu(cm_id_priv->remote_qpn)); if (private_data && private_data_len) IBA_SET_MEM(CM_DREQ_PRIVATE_DATA, dreq_msg, private_data, private_data_len); } static void cm_issue_dreq(struct cm_id_private *cm_id_priv) { struct ib_mad_send_buf *msg; int ret; lockdep_assert_held(&cm_id_priv->lock); msg = cm_alloc_msg(cm_id_priv); if (IS_ERR(msg)) return; cm_format_dreq((struct cm_dreq_msg *) msg->mad, cm_id_priv, NULL, 0); trace_icm_send_dreq(&cm_id_priv->id); ret = ib_post_send_mad(msg, NULL); if (ret) cm_free_msg(msg); } int ib_send_cm_dreq(struct ib_cm_id *cm_id, const void *private_data, u8 private_data_len) { struct cm_id_private *cm_id_priv = container_of(cm_id, struct cm_id_private, id); struct ib_mad_send_buf *msg; unsigned long flags; int ret; if (private_data && private_data_len > IB_CM_DREQ_PRIVATE_DATA_SIZE) return -EINVAL; spin_lock_irqsave(&cm_id_priv->lock, flags); if (cm_id_priv->id.state != IB_CM_ESTABLISHED) { trace_icm_dreq_skipped(&cm_id_priv->id); ret = -EINVAL; goto unlock; } if (cm_id_priv->id.lap_state == IB_CM_LAP_SENT || cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD) ib_cancel_mad(cm_id_priv->msg); msg = cm_alloc_priv_msg(cm_id_priv, IB_CM_DREQ_SENT); if (IS_ERR(msg)) { cm_enter_timewait(cm_id_priv); ret = PTR_ERR(msg); goto unlock; } cm_format_dreq((struct cm_dreq_msg *) msg->mad, cm_id_priv, private_data, private_data_len); trace_icm_send_dreq(&cm_id_priv->id); ret = ib_post_send_mad(msg, NULL); if (ret) { cm_enter_timewait(cm_id_priv); cm_free_priv_msg(msg); goto unlock; } cm_id_priv->id.state = IB_CM_DREQ_SENT; unlock: spin_unlock_irqrestore(&cm_id_priv->lock, flags); return ret; } EXPORT_SYMBOL(ib_send_cm_dreq); static void cm_format_drep(struct cm_drep_msg *drep_msg, struct cm_id_private *cm_id_priv, const void *private_data, u8 private_data_len) { cm_format_mad_hdr(&drep_msg->hdr, CM_DREP_ATTR_ID, cm_id_priv->tid); IBA_SET(CM_DREP_LOCAL_COMM_ID, drep_msg, be32_to_cpu(cm_id_priv->id.local_id)); IBA_SET(CM_DREP_REMOTE_COMM_ID, drep_msg, be32_to_cpu(cm_id_priv->id.remote_id)); if (private_data && private_data_len) IBA_SET_MEM(CM_DREP_PRIVATE_DATA, drep_msg, private_data, private_data_len); } static int cm_send_drep_locked(struct cm_id_private *cm_id_priv, void *private_data, u8 private_data_len) { struct ib_mad_send_buf *msg; int ret; lockdep_assert_held(&cm_id_priv->lock); if (private_data && private_data_len > IB_CM_DREP_PRIVATE_DATA_SIZE) return -EINVAL; if (cm_id_priv->id.state != IB_CM_DREQ_RCVD) { trace_icm_send_drep_err(&cm_id_priv->id); kfree(private_data); return -EINVAL; } cm_set_private_data(cm_id_priv, private_data, private_data_len); cm_enter_timewait(cm_id_priv); msg = cm_alloc_msg(cm_id_priv); if (IS_ERR(msg)) return PTR_ERR(msg); cm_format_drep((struct cm_drep_msg *) msg->mad, cm_id_priv, private_data, private_data_len); trace_icm_send_drep(&cm_id_priv->id); ret = ib_post_send_mad(msg, NULL); if (ret) { cm_free_msg(msg); return ret; } return 0; } int ib_send_cm_drep(struct ib_cm_id *cm_id, const void *private_data, u8 private_data_len) { struct cm_id_private *cm_id_priv = container_of(cm_id, struct cm_id_private, id); unsigned long flags; void *data; int ret; data = cm_copy_private_data(private_data, private_data_len); if (IS_ERR(data)) return PTR_ERR(data); spin_lock_irqsave(&cm_id_priv->lock, flags); ret = cm_send_drep_locked(cm_id_priv, data, private_data_len); spin_unlock_irqrestore(&cm_id_priv->lock, flags); return ret; } EXPORT_SYMBOL(ib_send_cm_drep); static int cm_issue_drep(struct cm_port *port, struct ib_mad_recv_wc *mad_recv_wc) { struct ib_mad_send_buf *msg = NULL; struct cm_dreq_msg *dreq_msg; struct cm_drep_msg *drep_msg; int ret; ret = cm_alloc_response_msg(port, mad_recv_wc, true, &msg); if (ret) return ret; dreq_msg = (struct cm_dreq_msg *) mad_recv_wc->recv_buf.mad; drep_msg = (struct cm_drep_msg *) msg->mad; cm_format_mad_hdr(&drep_msg->hdr, CM_DREP_ATTR_ID, dreq_msg->hdr.tid); IBA_SET(CM_DREP_REMOTE_COMM_ID, drep_msg, IBA_GET(CM_DREQ_LOCAL_COMM_ID, dreq_msg)); IBA_SET(CM_DREP_LOCAL_COMM_ID, drep_msg, IBA_GET(CM_DREQ_REMOTE_COMM_ID, dreq_msg)); trace_icm_issue_drep( IBA_GET(CM_DREQ_LOCAL_COMM_ID, dreq_msg), IBA_GET(CM_DREQ_REMOTE_COMM_ID, dreq_msg)); ret = ib_post_send_mad(msg, NULL); if (ret) cm_free_msg(msg); return ret; } static int cm_dreq_handler(struct cm_work *work) { struct cm_id_private *cm_id_priv; struct cm_dreq_msg *dreq_msg; struct ib_mad_send_buf *msg = NULL; dreq_msg = (struct cm_dreq_msg *)work->mad_recv_wc->recv_buf.mad; cm_id_priv = cm_acquire_id( cpu_to_be32(IBA_GET(CM_DREQ_REMOTE_COMM_ID, dreq_msg)), cpu_to_be32(IBA_GET(CM_DREQ_LOCAL_COMM_ID, dreq_msg))); if (!cm_id_priv) { atomic_long_inc(&work->port->counters[CM_RECV_DUPLICATES] [CM_DREQ_COUNTER]); cm_issue_drep(work->port, work->mad_recv_wc); trace_icm_no_priv_err( IBA_GET(CM_DREQ_LOCAL_COMM_ID, dreq_msg), IBA_GET(CM_DREQ_REMOTE_COMM_ID, dreq_msg)); return -EINVAL; } work->cm_event.private_data = IBA_GET_MEM_PTR(CM_DREQ_PRIVATE_DATA, dreq_msg); spin_lock_irq(&cm_id_priv->lock); if (cm_id_priv->local_qpn != cpu_to_be32(IBA_GET(CM_DREQ_REMOTE_QPN_EECN, dreq_msg))) goto unlock; switch (cm_id_priv->id.state) { case IB_CM_REP_SENT: case IB_CM_DREQ_SENT: case IB_CM_MRA_REP_RCVD: ib_cancel_mad(cm_id_priv->msg); break; case IB_CM_ESTABLISHED: if (cm_id_priv->id.lap_state == IB_CM_LAP_SENT || cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD) ib_cancel_mad(cm_id_priv->msg); break; case IB_CM_TIMEWAIT: atomic_long_inc(&work->port->counters[CM_RECV_DUPLICATES] [CM_DREQ_COUNTER]); msg = cm_alloc_response_msg_no_ah(work->port, work->mad_recv_wc, true); if (IS_ERR(msg)) goto unlock; cm_format_drep((struct cm_drep_msg *) msg->mad, cm_id_priv, cm_id_priv->private_data, cm_id_priv->private_data_len); spin_unlock_irq(&cm_id_priv->lock); if (cm_create_response_msg_ah(work->port, work->mad_recv_wc, msg) || ib_post_send_mad(msg, NULL)) cm_free_msg(msg); goto deref; case IB_CM_DREQ_RCVD: atomic_long_inc(&work->port->counters[CM_RECV_DUPLICATES] [CM_DREQ_COUNTER]); goto unlock; default: trace_icm_dreq_unknown_err(&cm_id_priv->id); goto unlock; } cm_id_priv->id.state = IB_CM_DREQ_RCVD; cm_id_priv->tid = dreq_msg->hdr.tid; cm_queue_work_unlock(cm_id_priv, work); return 0; unlock: spin_unlock_irq(&cm_id_priv->lock); deref: cm_deref_id(cm_id_priv); return -EINVAL; } static int cm_drep_handler(struct cm_work *work) { struct cm_id_private *cm_id_priv; struct cm_drep_msg *drep_msg; drep_msg = (struct cm_drep_msg *)work->mad_recv_wc->recv_buf.mad; cm_id_priv = cm_acquire_id( cpu_to_be32(IBA_GET(CM_DREP_REMOTE_COMM_ID, drep_msg)), cpu_to_be32(IBA_GET(CM_DREP_LOCAL_COMM_ID, drep_msg))); if (!cm_id_priv) return -EINVAL; work->cm_event.private_data = IBA_GET_MEM_PTR(CM_DREP_PRIVATE_DATA, drep_msg); spin_lock_irq(&cm_id_priv->lock); if (cm_id_priv->id.state != IB_CM_DREQ_SENT && cm_id_priv->id.state != IB_CM_DREQ_RCVD) { spin_unlock_irq(&cm_id_priv->lock); goto out; } cm_enter_timewait(cm_id_priv); ib_cancel_mad(cm_id_priv->msg); cm_queue_work_unlock(cm_id_priv, work); return 0; out: cm_deref_id(cm_id_priv); return -EINVAL; } static int cm_send_rej_locked(struct cm_id_private *cm_id_priv, enum ib_cm_rej_reason reason, void *ari, u8 ari_length, const void *private_data, u8 private_data_len) { enum ib_cm_state state = cm_id_priv->id.state; struct ib_mad_send_buf *msg; int ret; lockdep_assert_held(&cm_id_priv->lock); if ((private_data && private_data_len > IB_CM_REJ_PRIVATE_DATA_SIZE) || (ari && ari_length > IB_CM_REJ_ARI_LENGTH)) return -EINVAL; trace_icm_send_rej(&cm_id_priv->id, reason); switch (state) { case IB_CM_REQ_SENT: case IB_CM_MRA_REQ_RCVD: case IB_CM_REQ_RCVD: case IB_CM_MRA_REQ_SENT: case IB_CM_REP_RCVD: case IB_CM_MRA_REP_SENT: cm_reset_to_idle(cm_id_priv); msg = cm_alloc_msg(cm_id_priv); if (IS_ERR(msg)) return PTR_ERR(msg); cm_format_rej((struct cm_rej_msg *)msg->mad, cm_id_priv, reason, ari, ari_length, private_data, private_data_len, state); break; case IB_CM_REP_SENT: case IB_CM_MRA_REP_RCVD: cm_enter_timewait(cm_id_priv); msg = cm_alloc_msg(cm_id_priv); if (IS_ERR(msg)) return PTR_ERR(msg); cm_format_rej((struct cm_rej_msg *)msg->mad, cm_id_priv, reason, ari, ari_length, private_data, private_data_len, state); break; default: trace_icm_send_unknown_rej_err(&cm_id_priv->id); return -EINVAL; } ret = ib_post_send_mad(msg, NULL); if (ret) { cm_free_msg(msg); return ret; } return 0; } int ib_send_cm_rej(struct ib_cm_id *cm_id, enum ib_cm_rej_reason reason, void *ari, u8 ari_length, const void *private_data, u8 private_data_len) { struct cm_id_private *cm_id_priv = container_of(cm_id, struct cm_id_private, id); unsigned long flags; int ret; spin_lock_irqsave(&cm_id_priv->lock, flags); ret = cm_send_rej_locked(cm_id_priv, reason, ari, ari_length, private_data, private_data_len); spin_unlock_irqrestore(&cm_id_priv->lock, flags); return ret; } EXPORT_SYMBOL(ib_send_cm_rej); static void cm_format_rej_event(struct cm_work *work) { struct cm_rej_msg *rej_msg; struct ib_cm_rej_event_param *param; rej_msg = (struct cm_rej_msg *)work->mad_recv_wc->recv_buf.mad; param = &work->cm_event.param.rej_rcvd; param->ari = IBA_GET_MEM_PTR(CM_REJ_ARI, rej_msg); param->ari_length = IBA_GET(CM_REJ_REJECTED_INFO_LENGTH, rej_msg); param->reason = IBA_GET(CM_REJ_REASON, rej_msg); work->cm_event.private_data = IBA_GET_MEM_PTR(CM_REJ_PRIVATE_DATA, rej_msg); } static struct cm_id_private *cm_acquire_rejected_id(struct cm_rej_msg *rej_msg) { struct cm_id_private *cm_id_priv; __be32 remote_id; remote_id = cpu_to_be32(IBA_GET(CM_REJ_LOCAL_COMM_ID, rej_msg)); if (IBA_GET(CM_REJ_REASON, rej_msg) == IB_CM_REJ_TIMEOUT) { cm_id_priv = cm_find_remote_id( *((__be64 *)IBA_GET_MEM_PTR(CM_REJ_ARI, rej_msg)), remote_id); } else if (IBA_GET(CM_REJ_MESSAGE_REJECTED, rej_msg) == CM_MSG_RESPONSE_REQ) cm_id_priv = cm_acquire_id( cpu_to_be32(IBA_GET(CM_REJ_REMOTE_COMM_ID, rej_msg)), 0); else cm_id_priv = cm_acquire_id( cpu_to_be32(IBA_GET(CM_REJ_REMOTE_COMM_ID, rej_msg)), remote_id); return cm_id_priv; } static int cm_rej_handler(struct cm_work *work) { struct cm_id_private *cm_id_priv; struct cm_rej_msg *rej_msg; rej_msg = (struct cm_rej_msg *)work->mad_recv_wc->recv_buf.mad; cm_id_priv = cm_acquire_rejected_id(rej_msg); if (!cm_id_priv) return -EINVAL; cm_format_rej_event(work); spin_lock_irq(&cm_id_priv->lock); switch (cm_id_priv->id.state) { case IB_CM_REQ_SENT: case IB_CM_MRA_REQ_RCVD: case IB_CM_REP_SENT: case IB_CM_MRA_REP_RCVD: ib_cancel_mad(cm_id_priv->msg); fallthrough; case IB_CM_REQ_RCVD: case IB_CM_MRA_REQ_SENT: if (IBA_GET(CM_REJ_REASON, rej_msg) == IB_CM_REJ_STALE_CONN) cm_enter_timewait(cm_id_priv); else cm_reset_to_idle(cm_id_priv); break; case IB_CM_DREQ_SENT: ib_cancel_mad(cm_id_priv->msg); fallthrough; case IB_CM_REP_RCVD: case IB_CM_MRA_REP_SENT: cm_enter_timewait(cm_id_priv); break; case IB_CM_ESTABLISHED: if (cm_id_priv->id.lap_state == IB_CM_LAP_UNINIT || cm_id_priv->id.lap_state == IB_CM_LAP_SENT) { if (cm_id_priv->id.lap_state == IB_CM_LAP_SENT) ib_cancel_mad(cm_id_priv->msg); cm_enter_timewait(cm_id_priv); break; } fallthrough; default: trace_icm_rej_unknown_err(&cm_id_priv->id); spin_unlock_irq(&cm_id_priv->lock); goto out; } cm_queue_work_unlock(cm_id_priv, work); return 0; out: cm_deref_id(cm_id_priv); return -EINVAL; } int ib_send_cm_mra(struct ib_cm_id *cm_id, u8 service_timeout, const void *private_data, u8 private_data_len) { struct cm_id_private *cm_id_priv; struct ib_mad_send_buf *msg; enum ib_cm_state cm_state; enum ib_cm_lap_state lap_state; enum cm_msg_response msg_response; void *data; unsigned long flags; int ret; if (private_data && private_data_len > IB_CM_MRA_PRIVATE_DATA_SIZE) return -EINVAL; data = cm_copy_private_data(private_data, private_data_len); if (IS_ERR(data)) return PTR_ERR(data); cm_id_priv = container_of(cm_id, struct cm_id_private, id); spin_lock_irqsave(&cm_id_priv->lock, flags); switch (cm_id_priv->id.state) { case IB_CM_REQ_RCVD: cm_state = IB_CM_MRA_REQ_SENT; lap_state = cm_id->lap_state; msg_response = CM_MSG_RESPONSE_REQ; break; case IB_CM_REP_RCVD: cm_state = IB_CM_MRA_REP_SENT; lap_state = cm_id->lap_state; msg_response = CM_MSG_RESPONSE_REP; break; case IB_CM_ESTABLISHED: if (cm_id->lap_state == IB_CM_LAP_RCVD) { cm_state = cm_id->state; lap_state = IB_CM_MRA_LAP_SENT; msg_response = CM_MSG_RESPONSE_OTHER; break; } fallthrough; default: trace_icm_send_mra_unknown_err(&cm_id_priv->id); ret = -EINVAL; goto error_unlock; } if (!(service_timeout & IB_CM_MRA_FLAG_DELAY)) { msg = cm_alloc_msg(cm_id_priv); if (IS_ERR(msg)) { ret = PTR_ERR(msg); goto error_unlock; } cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv, msg_response, service_timeout, private_data, private_data_len); trace_icm_send_mra(cm_id); ret = ib_post_send_mad(msg, NULL); if (ret) goto error_free_msg; } cm_id->state = cm_state; cm_id->lap_state = lap_state; cm_id_priv->service_timeout = service_timeout; cm_set_private_data(cm_id_priv, data, private_data_len); spin_unlock_irqrestore(&cm_id_priv->lock, flags); return 0; error_free_msg: cm_free_msg(msg); error_unlock: spin_unlock_irqrestore(&cm_id_priv->lock, flags); kfree(data); return ret; } EXPORT_SYMBOL(ib_send_cm_mra); static struct cm_id_private *cm_acquire_mraed_id(struct cm_mra_msg *mra_msg) { switch (IBA_GET(CM_MRA_MESSAGE_MRAED, mra_msg)) { case CM_MSG_RESPONSE_REQ: return cm_acquire_id( cpu_to_be32(IBA_GET(CM_MRA_REMOTE_COMM_ID, mra_msg)), 0); case CM_MSG_RESPONSE_REP: case CM_MSG_RESPONSE_OTHER: return cm_acquire_id( cpu_to_be32(IBA_GET(CM_MRA_REMOTE_COMM_ID, mra_msg)), cpu_to_be32(IBA_GET(CM_MRA_LOCAL_COMM_ID, mra_msg))); default: return NULL; } } static int cm_mra_handler(struct cm_work *work) { struct cm_id_private *cm_id_priv; struct cm_mra_msg *mra_msg; int timeout; mra_msg = (struct cm_mra_msg *)work->mad_recv_wc->recv_buf.mad; cm_id_priv = cm_acquire_mraed_id(mra_msg); if (!cm_id_priv) return -EINVAL; work->cm_event.private_data = IBA_GET_MEM_PTR(CM_MRA_PRIVATE_DATA, mra_msg); work->cm_event.param.mra_rcvd.service_timeout = IBA_GET(CM_MRA_SERVICE_TIMEOUT, mra_msg); timeout = cm_convert_to_ms(IBA_GET(CM_MRA_SERVICE_TIMEOUT, mra_msg)) + cm_convert_to_ms(cm_id_priv->av.timeout); spin_lock_irq(&cm_id_priv->lock); switch (cm_id_priv->id.state) { case IB_CM_REQ_SENT: if (IBA_GET(CM_MRA_MESSAGE_MRAED, mra_msg) != CM_MSG_RESPONSE_REQ || ib_modify_mad(cm_id_priv->msg, timeout)) goto out; cm_id_priv->id.state = IB_CM_MRA_REQ_RCVD; break; case IB_CM_REP_SENT: if (IBA_GET(CM_MRA_MESSAGE_MRAED, mra_msg) != CM_MSG_RESPONSE_REP || ib_modify_mad(cm_id_priv->msg, timeout)) goto out; cm_id_priv->id.state = IB_CM_MRA_REP_RCVD; break; case IB_CM_ESTABLISHED: if (IBA_GET(CM_MRA_MESSAGE_MRAED, mra_msg) != CM_MSG_RESPONSE_OTHER || cm_id_priv->id.lap_state != IB_CM_LAP_SENT || ib_modify_mad(cm_id_priv->msg, timeout)) { if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD) atomic_long_inc( &work->port->counters[CM_RECV_DUPLICATES] [CM_MRA_COUNTER]); goto out; } cm_id_priv->id.lap_state = IB_CM_MRA_LAP_RCVD; break; case IB_CM_MRA_REQ_RCVD: case IB_CM_MRA_REP_RCVD: atomic_long_inc(&work->port->counters[CM_RECV_DUPLICATES] [CM_MRA_COUNTER]); fallthrough; default: trace_icm_mra_unknown_err(&cm_id_priv->id); goto out; } cm_id_priv->msg->context[1] = (void *) (unsigned long) cm_id_priv->id.state; cm_queue_work_unlock(cm_id_priv, work); return 0; out: spin_unlock_irq(&cm_id_priv->lock); cm_deref_id(cm_id_priv); return -EINVAL; } static void cm_format_path_lid_from_lap(struct cm_lap_msg *lap_msg, struct sa_path_rec *path) { u32 lid; if (path->rec_type != SA_PATH_REC_TYPE_OPA) { sa_path_set_dlid(path, IBA_GET(CM_LAP_ALTERNATE_LOCAL_PORT_LID, lap_msg)); sa_path_set_slid(path, IBA_GET(CM_LAP_ALTERNATE_REMOTE_PORT_LID, lap_msg)); } else { lid = opa_get_lid_from_gid(IBA_GET_MEM_PTR( CM_LAP_ALTERNATE_LOCAL_PORT_GID, lap_msg)); sa_path_set_dlid(path, lid); lid = opa_get_lid_from_gid(IBA_GET_MEM_PTR( CM_LAP_ALTERNATE_REMOTE_PORT_GID, lap_msg)); sa_path_set_slid(path, lid); } } static void cm_format_path_from_lap(struct cm_id_private *cm_id_priv, struct sa_path_rec *path, struct cm_lap_msg *lap_msg) { path->dgid = *IBA_GET_MEM_PTR(CM_LAP_ALTERNATE_LOCAL_PORT_GID, lap_msg); path->sgid = *IBA_GET_MEM_PTR(CM_LAP_ALTERNATE_REMOTE_PORT_GID, lap_msg); path->flow_label = cpu_to_be32(IBA_GET(CM_LAP_ALTERNATE_FLOW_LABEL, lap_msg)); path->hop_limit = IBA_GET(CM_LAP_ALTERNATE_HOP_LIMIT, lap_msg); path->traffic_class = IBA_GET(CM_LAP_ALTERNATE_TRAFFIC_CLASS, lap_msg); path->reversible = 1; path->pkey = cm_id_priv->pkey; path->sl = IBA_GET(CM_LAP_ALTERNATE_SL, lap_msg); path->mtu_selector = IB_SA_EQ; path->mtu = cm_id_priv->path_mtu; path->rate_selector = IB_SA_EQ; path->rate = IBA_GET(CM_LAP_ALTERNATE_PACKET_RATE, lap_msg); path->packet_life_time_selector = IB_SA_EQ; path->packet_life_time = IBA_GET(CM_LAP_ALTERNATE_LOCAL_ACK_TIMEOUT, lap_msg); path->packet_life_time -= (path->packet_life_time > 0); cm_format_path_lid_from_lap(lap_msg, path); } static int cm_lap_handler(struct cm_work *work) { struct cm_id_private *cm_id_priv; struct cm_lap_msg *lap_msg; struct ib_cm_lap_event_param *param; struct ib_mad_send_buf *msg = NULL; struct rdma_ah_attr ah_attr; struct cm_av alt_av = {}; int ret; /* Currently Alternate path messages are not supported for * RoCE link layer. */ if (rdma_protocol_roce(work->port->cm_dev->ib_device, work->port->port_num)) return -EINVAL; /* todo: verify LAP request and send reject APR if invalid. */ lap_msg = (struct cm_lap_msg *)work->mad_recv_wc->recv_buf.mad; cm_id_priv = cm_acquire_id( cpu_to_be32(IBA_GET(CM_LAP_REMOTE_COMM_ID, lap_msg)), cpu_to_be32(IBA_GET(CM_LAP_LOCAL_COMM_ID, lap_msg))); if (!cm_id_priv) return -EINVAL; param = &work->cm_event.param.lap_rcvd; memset(&work->path[0], 0, sizeof(work->path[1])); cm_path_set_rec_type(work->port->cm_dev->ib_device, work->port->port_num, &work->path[0], IBA_GET_MEM_PTR(CM_LAP_ALTERNATE_LOCAL_PORT_GID, lap_msg)); param->alternate_path = &work->path[0]; cm_format_path_from_lap(cm_id_priv, param->alternate_path, lap_msg); work->cm_event.private_data = IBA_GET_MEM_PTR(CM_LAP_PRIVATE_DATA, lap_msg); ret = ib_init_ah_attr_from_wc(work->port->cm_dev->ib_device, work->port->port_num, work->mad_recv_wc->wc, work->mad_recv_wc->recv_buf.grh, &ah_attr); if (ret) goto deref; ret = cm_init_av_by_path(param->alternate_path, NULL, &alt_av); if (ret) { rdma_destroy_ah_attr(&ah_attr); goto deref; } spin_lock_irq(&cm_id_priv->lock); cm_init_av_for_lap(work->port, work->mad_recv_wc->wc, &ah_attr, &cm_id_priv->av); cm_move_av_from_path(&cm_id_priv->alt_av, &alt_av); if (cm_id_priv->id.state != IB_CM_ESTABLISHED) goto unlock; switch (cm_id_priv->id.lap_state) { case IB_CM_LAP_UNINIT: case IB_CM_LAP_IDLE: break; case IB_CM_MRA_LAP_SENT: atomic_long_inc(&work->port->counters[CM_RECV_DUPLICATES] [CM_LAP_COUNTER]); msg = cm_alloc_response_msg_no_ah(work->port, work->mad_recv_wc, true); if (IS_ERR(msg)) goto unlock; cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv, CM_MSG_RESPONSE_OTHER, cm_id_priv->service_timeout, cm_id_priv->private_data, cm_id_priv->private_data_len); spin_unlock_irq(&cm_id_priv->lock); if (cm_create_response_msg_ah(work->port, work->mad_recv_wc, msg) || ib_post_send_mad(msg, NULL)) cm_free_msg(msg); goto deref; case IB_CM_LAP_RCVD: atomic_long_inc(&work->port->counters[CM_RECV_DUPLICATES] [CM_LAP_COUNTER]); goto unlock; default: goto unlock; } cm_id_priv->id.lap_state = IB_CM_LAP_RCVD; cm_id_priv->tid = lap_msg->hdr.tid; cm_queue_work_unlock(cm_id_priv, work); return 0; unlock: spin_unlock_irq(&cm_id_priv->lock); deref: cm_deref_id(cm_id_priv); return -EINVAL; } static int cm_apr_handler(struct cm_work *work) { struct cm_id_private *cm_id_priv; struct cm_apr_msg *apr_msg; /* Currently Alternate path messages are not supported for * RoCE link layer. */ if (rdma_protocol_roce(work->port->cm_dev->ib_device, work->port->port_num)) return -EINVAL; apr_msg = (struct cm_apr_msg *)work->mad_recv_wc->recv_buf.mad; cm_id_priv = cm_acquire_id( cpu_to_be32(IBA_GET(CM_APR_REMOTE_COMM_ID, apr_msg)), cpu_to_be32(IBA_GET(CM_APR_LOCAL_COMM_ID, apr_msg))); if (!cm_id_priv) return -EINVAL; /* Unmatched reply. */ work->cm_event.param.apr_rcvd.ap_status = IBA_GET(CM_APR_AR_STATUS, apr_msg); work->cm_event.param.apr_rcvd.apr_info = IBA_GET_MEM_PTR(CM_APR_ADDITIONAL_INFORMATION, apr_msg); work->cm_event.param.apr_rcvd.info_len = IBA_GET(CM_APR_ADDITIONAL_INFORMATION_LENGTH, apr_msg); work->cm_event.private_data = IBA_GET_MEM_PTR(CM_APR_PRIVATE_DATA, apr_msg); spin_lock_irq(&cm_id_priv->lock); if (cm_id_priv->id.state != IB_CM_ESTABLISHED || (cm_id_priv->id.lap_state != IB_CM_LAP_SENT && cm_id_priv->id.lap_state != IB_CM_MRA_LAP_RCVD)) { spin_unlock_irq(&cm_id_priv->lock); goto out; } cm_id_priv->id.lap_state = IB_CM_LAP_IDLE; ib_cancel_mad(cm_id_priv->msg); cm_queue_work_unlock(cm_id_priv, work); return 0; out: cm_deref_id(cm_id_priv); return -EINVAL; } static int cm_timewait_handler(struct cm_work *work) { struct cm_timewait_info *timewait_info; struct cm_id_private *cm_id_priv; timewait_info = container_of(work, struct cm_timewait_info, work); spin_lock_irq(&cm.lock); list_del(&timewait_info->list); spin_unlock_irq(&cm.lock); cm_id_priv = cm_acquire_id(timewait_info->work.local_id, timewait_info->work.remote_id); if (!cm_id_priv) return -EINVAL; spin_lock_irq(&cm_id_priv->lock); if (cm_id_priv->id.state != IB_CM_TIMEWAIT || cm_id_priv->remote_qpn != timewait_info->remote_qpn) { spin_unlock_irq(&cm_id_priv->lock); goto out; } cm_id_priv->id.state = IB_CM_IDLE; cm_queue_work_unlock(cm_id_priv, work); return 0; out: cm_deref_id(cm_id_priv); return -EINVAL; } static void cm_format_sidr_req(struct cm_sidr_req_msg *sidr_req_msg, struct cm_id_private *cm_id_priv, struct ib_cm_sidr_req_param *param) { cm_format_mad_hdr(&sidr_req_msg->hdr, CM_SIDR_REQ_ATTR_ID, cm_form_tid(cm_id_priv)); IBA_SET(CM_SIDR_REQ_REQUESTID, sidr_req_msg, be32_to_cpu(cm_id_priv->id.local_id)); IBA_SET(CM_SIDR_REQ_PARTITION_KEY, sidr_req_msg, be16_to_cpu(param->path->pkey)); IBA_SET(CM_SIDR_REQ_SERVICEID, sidr_req_msg, be64_to_cpu(param->service_id)); if (param->private_data && param->private_data_len) IBA_SET_MEM(CM_SIDR_REQ_PRIVATE_DATA, sidr_req_msg, param->private_data, param->private_data_len); } int ib_send_cm_sidr_req(struct ib_cm_id *cm_id, struct ib_cm_sidr_req_param *param) { struct cm_id_private *cm_id_priv; struct ib_mad_send_buf *msg; struct cm_av av = {}; unsigned long flags; int ret; if (!param->path || (param->private_data && param->private_data_len > IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE)) return -EINVAL; cm_id_priv = container_of(cm_id, struct cm_id_private, id); ret = cm_init_av_by_path(param->path, param->sgid_attr, &av); if (ret) return ret; spin_lock_irqsave(&cm_id_priv->lock, flags); cm_move_av_from_path(&cm_id_priv->av, &av); cm_id->service_id = param->service_id; cm_id_priv->timeout_ms = param->timeout_ms; cm_id_priv->max_cm_retries = param->max_cm_retries; if (cm_id->state != IB_CM_IDLE) { ret = -EINVAL; goto out_unlock; } msg = cm_alloc_priv_msg(cm_id_priv, IB_CM_SIDR_REQ_SENT); if (IS_ERR(msg)) { ret = PTR_ERR(msg); goto out_unlock; } cm_format_sidr_req((struct cm_sidr_req_msg *)msg->mad, cm_id_priv, param); trace_icm_send_sidr_req(&cm_id_priv->id); ret = ib_post_send_mad(msg, NULL); if (ret) goto out_free; cm_id->state = IB_CM_SIDR_REQ_SENT; spin_unlock_irqrestore(&cm_id_priv->lock, flags); return 0; out_free: cm_free_priv_msg(msg); out_unlock: spin_unlock_irqrestore(&cm_id_priv->lock, flags); return ret; } EXPORT_SYMBOL(ib_send_cm_sidr_req); static void cm_format_sidr_req_event(struct cm_work *work, const struct cm_id_private *rx_cm_id, struct ib_cm_id *listen_id) { struct cm_sidr_req_msg *sidr_req_msg; struct ib_cm_sidr_req_event_param *param; sidr_req_msg = (struct cm_sidr_req_msg *) work->mad_recv_wc->recv_buf.mad; param = &work->cm_event.param.sidr_req_rcvd; param->pkey = IBA_GET(CM_SIDR_REQ_PARTITION_KEY, sidr_req_msg); param->listen_id = listen_id; param->service_id = cpu_to_be64(IBA_GET(CM_SIDR_REQ_SERVICEID, sidr_req_msg)); param->bth_pkey = cm_get_bth_pkey(work); param->port = work->port->port_num; param->sgid_attr = rx_cm_id->av.ah_attr.grh.sgid_attr; work->cm_event.private_data = IBA_GET_MEM_PTR(CM_SIDR_REQ_PRIVATE_DATA, sidr_req_msg); } static int cm_sidr_req_handler(struct cm_work *work) { struct cm_id_private *cm_id_priv, *listen_cm_id_priv; struct cm_sidr_req_msg *sidr_req_msg; struct ib_wc *wc; int ret; cm_id_priv = cm_alloc_id_priv(work->port->cm_dev->ib_device, NULL, NULL); if (IS_ERR(cm_id_priv)) return PTR_ERR(cm_id_priv); /* Record SGID/SLID and request ID for lookup. */ sidr_req_msg = (struct cm_sidr_req_msg *) work->mad_recv_wc->recv_buf.mad; cm_id_priv->id.remote_id = cpu_to_be32(IBA_GET(CM_SIDR_REQ_REQUESTID, sidr_req_msg)); cm_id_priv->id.service_id = cpu_to_be64(IBA_GET(CM_SIDR_REQ_SERVICEID, sidr_req_msg)); cm_id_priv->tid = sidr_req_msg->hdr.tid; wc = work->mad_recv_wc->wc; cm_id_priv->sidr_slid = wc->slid; ret = cm_init_av_for_response(work->port, work->mad_recv_wc->wc, work->mad_recv_wc->recv_buf.grh, &cm_id_priv->av); if (ret) goto out; spin_lock_irq(&cm.lock); listen_cm_id_priv = cm_insert_remote_sidr(cm_id_priv); if (listen_cm_id_priv) { spin_unlock_irq(&cm.lock); atomic_long_inc(&work->port->counters[CM_RECV_DUPLICATES] [CM_SIDR_REQ_COUNTER]); goto out; /* Duplicate message. */ } cm_id_priv->id.state = IB_CM_SIDR_REQ_RCVD; listen_cm_id_priv = cm_find_listen(cm_id_priv->id.device, cm_id_priv->id.service_id); if (!listen_cm_id_priv) { spin_unlock_irq(&cm.lock); ib_send_cm_sidr_rep(&cm_id_priv->id, &(struct ib_cm_sidr_rep_param){ .status = IB_SIDR_UNSUPPORTED }); goto out; /* No match. */ } spin_unlock_irq(&cm.lock); cm_id_priv->id.cm_handler = listen_cm_id_priv->id.cm_handler; cm_id_priv->id.context = listen_cm_id_priv->id.context; /* * A SIDR ID does not need to be in the xarray since it does not receive * mads, is not placed in the remote_id or remote_qpn rbtree, and does * not enter timewait. */ cm_format_sidr_req_event(work, cm_id_priv, &listen_cm_id_priv->id); ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &work->cm_event); cm_free_work(work); /* * A pointer to the listen_cm_id is held in the event, so this deref * must be after the event is delivered above. */ cm_deref_id(listen_cm_id_priv); if (ret) cm_destroy_id(&cm_id_priv->id, ret); return 0; out: ib_destroy_cm_id(&cm_id_priv->id); return -EINVAL; } static void cm_format_sidr_rep(struct cm_sidr_rep_msg *sidr_rep_msg, struct cm_id_private *cm_id_priv, struct ib_cm_sidr_rep_param *param) { cm_format_mad_ece_hdr(&sidr_rep_msg->hdr, CM_SIDR_REP_ATTR_ID, cm_id_priv->tid, param->ece.attr_mod); IBA_SET(CM_SIDR_REP_REQUESTID, sidr_rep_msg, be32_to_cpu(cm_id_priv->id.remote_id)); IBA_SET(CM_SIDR_REP_STATUS, sidr_rep_msg, param->status); IBA_SET(CM_SIDR_REP_QPN, sidr_rep_msg, param->qp_num); IBA_SET(CM_SIDR_REP_SERVICEID, sidr_rep_msg, be64_to_cpu(cm_id_priv->id.service_id)); IBA_SET(CM_SIDR_REP_Q_KEY, sidr_rep_msg, param->qkey); IBA_SET(CM_SIDR_REP_VENDOR_ID_L, sidr_rep_msg, param->ece.vendor_id & 0xFF); IBA_SET(CM_SIDR_REP_VENDOR_ID_H, sidr_rep_msg, (param->ece.vendor_id >> 8) & 0xFF); if (param->info && param->info_length) IBA_SET_MEM(CM_SIDR_REP_ADDITIONAL_INFORMATION, sidr_rep_msg, param->info, param->info_length); if (param->private_data && param->private_data_len) IBA_SET_MEM(CM_SIDR_REP_PRIVATE_DATA, sidr_rep_msg, param->private_data, param->private_data_len); } static int cm_send_sidr_rep_locked(struct cm_id_private *cm_id_priv, struct ib_cm_sidr_rep_param *param) { struct ib_mad_send_buf *msg; unsigned long flags; int ret; lockdep_assert_held(&cm_id_priv->lock); if ((param->info && param->info_length > IB_CM_SIDR_REP_INFO_LENGTH) || (param->private_data && param->private_data_len > IB_CM_SIDR_REP_PRIVATE_DATA_SIZE)) return -EINVAL; if (cm_id_priv->id.state != IB_CM_SIDR_REQ_RCVD) return -EINVAL; msg = cm_alloc_msg(cm_id_priv); if (IS_ERR(msg)) return PTR_ERR(msg); cm_format_sidr_rep((struct cm_sidr_rep_msg *) msg->mad, cm_id_priv, param); trace_icm_send_sidr_rep(&cm_id_priv->id); ret = ib_post_send_mad(msg, NULL); if (ret) { cm_free_msg(msg); return ret; } cm_id_priv->id.state = IB_CM_IDLE; spin_lock_irqsave(&cm.lock, flags); if (!RB_EMPTY_NODE(&cm_id_priv->sidr_id_node)) { rb_erase(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table); RB_CLEAR_NODE(&cm_id_priv->sidr_id_node); } spin_unlock_irqrestore(&cm.lock, flags); return 0; } int ib_send_cm_sidr_rep(struct ib_cm_id *cm_id, struct ib_cm_sidr_rep_param *param) { struct cm_id_private *cm_id_priv = container_of(cm_id, struct cm_id_private, id); unsigned long flags; int ret; spin_lock_irqsave(&cm_id_priv->lock, flags); ret = cm_send_sidr_rep_locked(cm_id_priv, param); spin_unlock_irqrestore(&cm_id_priv->lock, flags); return ret; } EXPORT_SYMBOL(ib_send_cm_sidr_rep); static void cm_format_sidr_rep_event(struct cm_work *work, const struct cm_id_private *cm_id_priv) { struct cm_sidr_rep_msg *sidr_rep_msg; struct ib_cm_sidr_rep_event_param *param; sidr_rep_msg = (struct cm_sidr_rep_msg *) work->mad_recv_wc->recv_buf.mad; param = &work->cm_event.param.sidr_rep_rcvd; param->status = IBA_GET(CM_SIDR_REP_STATUS, sidr_rep_msg); param->qkey = IBA_GET(CM_SIDR_REP_Q_KEY, sidr_rep_msg); param->qpn = IBA_GET(CM_SIDR_REP_QPN, sidr_rep_msg); param->info = IBA_GET_MEM_PTR(CM_SIDR_REP_ADDITIONAL_INFORMATION, sidr_rep_msg); param->info_len = IBA_GET(CM_SIDR_REP_ADDITIONAL_INFORMATION_LENGTH, sidr_rep_msg); param->sgid_attr = cm_id_priv->av.ah_attr.grh.sgid_attr; work->cm_event.private_data = IBA_GET_MEM_PTR(CM_SIDR_REP_PRIVATE_DATA, sidr_rep_msg); } static int cm_sidr_rep_handler(struct cm_work *work) { struct cm_sidr_rep_msg *sidr_rep_msg; struct cm_id_private *cm_id_priv; sidr_rep_msg = (struct cm_sidr_rep_msg *) work->mad_recv_wc->recv_buf.mad; cm_id_priv = cm_acquire_id( cpu_to_be32(IBA_GET(CM_SIDR_REP_REQUESTID, sidr_rep_msg)), 0); if (!cm_id_priv) return -EINVAL; /* Unmatched reply. */ spin_lock_irq(&cm_id_priv->lock); if (cm_id_priv->id.state != IB_CM_SIDR_REQ_SENT) { spin_unlock_irq(&cm_id_priv->lock); goto out; } cm_id_priv->id.state = IB_CM_IDLE; ib_cancel_mad(cm_id_priv->msg); spin_unlock_irq(&cm_id_priv->lock); cm_format_sidr_rep_event(work, cm_id_priv); cm_process_work(cm_id_priv, work); return 0; out: cm_deref_id(cm_id_priv); return -EINVAL; } static void cm_process_send_error(struct cm_id_private *cm_id_priv, struct ib_mad_send_buf *msg, enum ib_wc_status wc_status) { enum ib_cm_state state = (unsigned long) msg->context[1]; struct ib_cm_event cm_event = {}; int ret; /* Discard old sends. */ spin_lock_irq(&cm_id_priv->lock); if (msg != cm_id_priv->msg) { spin_unlock_irq(&cm_id_priv->lock); cm_free_priv_msg(msg); return; } cm_free_priv_msg(msg); if (state != cm_id_priv->id.state || wc_status == IB_WC_SUCCESS || wc_status == IB_WC_WR_FLUSH_ERR) goto out_unlock; trace_icm_mad_send_err(state, wc_status); switch (state) { case IB_CM_REQ_SENT: case IB_CM_MRA_REQ_RCVD: cm_reset_to_idle(cm_id_priv); cm_event.event = IB_CM_REQ_ERROR; break; case IB_CM_REP_SENT: case IB_CM_MRA_REP_RCVD: cm_reset_to_idle(cm_id_priv); cm_event.event = IB_CM_REP_ERROR; break; case IB_CM_DREQ_SENT: cm_enter_timewait(cm_id_priv); cm_event.event = IB_CM_DREQ_ERROR; break; case IB_CM_SIDR_REQ_SENT: cm_id_priv->id.state = IB_CM_IDLE; cm_event.event = IB_CM_SIDR_REQ_ERROR; break; default: goto out_unlock; } spin_unlock_irq(&cm_id_priv->lock); cm_event.param.send_status = wc_status; /* No other events can occur on the cm_id at this point. */ ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &cm_event); if (ret) ib_destroy_cm_id(&cm_id_priv->id); return; out_unlock: spin_unlock_irq(&cm_id_priv->lock); } static void cm_send_handler(struct ib_mad_agent *mad_agent, struct ib_mad_send_wc *mad_send_wc) { struct ib_mad_send_buf *msg = mad_send_wc->send_buf; struct cm_id_private *cm_id_priv; struct cm_port *port; u16 attr_index; port = mad_agent->context; attr_index = be16_to_cpu(((struct ib_mad_hdr *) msg->mad)->attr_id) - CM_ATTR_ID_OFFSET; if (msg->context[0] == CM_DIRECT_RETRY_CTX) { msg->retries = 1; cm_id_priv = NULL; } else { cm_id_priv = msg->context[0]; } atomic_long_add(1 + msg->retries, &port->counters[CM_XMIT][attr_index]); if (msg->retries) atomic_long_add(msg->retries, &port->counters[CM_XMIT_RETRIES][attr_index]); if (cm_id_priv) cm_process_send_error(cm_id_priv, msg, mad_send_wc->status); else cm_free_msg(msg); } static void cm_work_handler(struct work_struct *_work) { struct cm_work *work = container_of(_work, struct cm_work, work.work); int ret; switch (work->cm_event.event) { case IB_CM_REQ_RECEIVED: ret = cm_req_handler(work); break; case IB_CM_MRA_RECEIVED: ret = cm_mra_handler(work); break; case IB_CM_REJ_RECEIVED: ret = cm_rej_handler(work); break; case IB_CM_REP_RECEIVED: ret = cm_rep_handler(work); break; case IB_CM_RTU_RECEIVED: ret = cm_rtu_handler(work); break; case IB_CM_USER_ESTABLISHED: ret = cm_establish_handler(work); break; case IB_CM_DREQ_RECEIVED: ret = cm_dreq_handler(work); break; case IB_CM_DREP_RECEIVED: ret = cm_drep_handler(work); break; case IB_CM_SIDR_REQ_RECEIVED: ret = cm_sidr_req_handler(work); break; case IB_CM_SIDR_REP_RECEIVED: ret = cm_sidr_rep_handler(work); break; case IB_CM_LAP_RECEIVED: ret = cm_lap_handler(work); break; case IB_CM_APR_RECEIVED: ret = cm_apr_handler(work); break; case IB_CM_TIMEWAIT_EXIT: ret = cm_timewait_handler(work); break; default: trace_icm_handler_err(work->cm_event.event); ret = -EINVAL; break; } if (ret) cm_free_work(work); } static int cm_establish(struct ib_cm_id *cm_id) { struct cm_id_private *cm_id_priv; struct cm_work *work; unsigned long flags; int ret = 0; struct cm_device *cm_dev; cm_dev = ib_get_client_data(cm_id->device, &cm_client); if (!cm_dev) return -ENODEV; work = kmalloc(sizeof *work, GFP_ATOMIC); if (!work) return -ENOMEM; cm_id_priv = container_of(cm_id, struct cm_id_private, id); spin_lock_irqsave(&cm_id_priv->lock, flags); switch (cm_id->state) { case IB_CM_REP_SENT: case IB_CM_MRA_REP_RCVD: cm_id->state = IB_CM_ESTABLISHED; break; case IB_CM_ESTABLISHED: ret = -EISCONN; break; default: trace_icm_establish_err(cm_id); ret = -EINVAL; break; } spin_unlock_irqrestore(&cm_id_priv->lock, flags); if (ret) { kfree(work); goto out; } /* * The CM worker thread may try to destroy the cm_id before it * can execute this work item. To prevent potential deadlock, * we need to find the cm_id once we're in the context of the * worker thread, rather than holding a reference on it. */ INIT_DELAYED_WORK(&work->work, cm_work_handler); work->local_id = cm_id->local_id; work->remote_id = cm_id->remote_id; work->mad_recv_wc = NULL; work->cm_event.event = IB_CM_USER_ESTABLISHED; /* Check if the device started its remove_one */ spin_lock_irqsave(&cm.lock, flags); if (!cm_dev->going_down) { queue_delayed_work(cm.wq, &work->work, 0); } else { kfree(work); ret = -ENODEV; } spin_unlock_irqrestore(&cm.lock, flags); out: return ret; } static int cm_migrate(struct ib_cm_id *cm_id) { struct cm_id_private *cm_id_priv; unsigned long flags; int ret = 0; cm_id_priv = container_of(cm_id, struct cm_id_private, id); spin_lock_irqsave(&cm_id_priv->lock, flags); if (cm_id->state == IB_CM_ESTABLISHED && (cm_id->lap_state == IB_CM_LAP_UNINIT || cm_id->lap_state == IB_CM_LAP_IDLE)) { cm_id->lap_state = IB_CM_LAP_IDLE; cm_id_priv->av = cm_id_priv->alt_av; } else ret = -EINVAL; spin_unlock_irqrestore(&cm_id_priv->lock, flags); return ret; } int ib_cm_notify(struct ib_cm_id *cm_id, enum ib_event_type event) { int ret; switch (event) { case IB_EVENT_COMM_EST: ret = cm_establish(cm_id); break; case IB_EVENT_PATH_MIG: ret = cm_migrate(cm_id); break; default: ret = -EINVAL; } return ret; } EXPORT_SYMBOL(ib_cm_notify); static void cm_recv_handler(struct ib_mad_agent *mad_agent, struct ib_mad_send_buf *send_buf, struct ib_mad_recv_wc *mad_recv_wc) { struct cm_port *port = mad_agent->context; struct cm_work *work; enum ib_cm_event_type event; bool alt_path = false; u16 attr_id; int paths = 0; int going_down = 0; switch (mad_recv_wc->recv_buf.mad->mad_hdr.attr_id) { case CM_REQ_ATTR_ID: alt_path = cm_req_has_alt_path((struct cm_req_msg *) mad_recv_wc->recv_buf.mad); paths = 1 + (alt_path != 0); event = IB_CM_REQ_RECEIVED; break; case CM_MRA_ATTR_ID: event = IB_CM_MRA_RECEIVED; break; case CM_REJ_ATTR_ID: event = IB_CM_REJ_RECEIVED; break; case CM_REP_ATTR_ID: event = IB_CM_REP_RECEIVED; break; case CM_RTU_ATTR_ID: event = IB_CM_RTU_RECEIVED; break; case CM_DREQ_ATTR_ID: event = IB_CM_DREQ_RECEIVED; break; case CM_DREP_ATTR_ID: event = IB_CM_DREP_RECEIVED; break; case CM_SIDR_REQ_ATTR_ID: event = IB_CM_SIDR_REQ_RECEIVED; break; case CM_SIDR_REP_ATTR_ID: event = IB_CM_SIDR_REP_RECEIVED; break; case CM_LAP_ATTR_ID: paths = 1; event = IB_CM_LAP_RECEIVED; break; case CM_APR_ATTR_ID: event = IB_CM_APR_RECEIVED; break; default: ib_free_recv_mad(mad_recv_wc); return; } attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id); atomic_long_inc(&port->counters[CM_RECV][attr_id - CM_ATTR_ID_OFFSET]); work = kmalloc(struct_size(work, path, paths), GFP_KERNEL); if (!work) { ib_free_recv_mad(mad_recv_wc); return; } INIT_DELAYED_WORK(&work->work, cm_work_handler); work->cm_event.event = event; work->mad_recv_wc = mad_recv_wc; work->port = port; /* Check if the device started its remove_one */ spin_lock_irq(&cm.lock); if (!port->cm_dev->going_down) queue_delayed_work(cm.wq, &work->work, 0); else going_down = 1; spin_unlock_irq(&cm.lock); if (going_down) { kfree(work); ib_free_recv_mad(mad_recv_wc); } } static int cm_init_qp_init_attr(struct cm_id_private *cm_id_priv, struct ib_qp_attr *qp_attr, int *qp_attr_mask) { unsigned long flags; int ret; spin_lock_irqsave(&cm_id_priv->lock, flags); switch (cm_id_priv->id.state) { case IB_CM_REQ_SENT: case IB_CM_MRA_REQ_RCVD: case IB_CM_REQ_RCVD: case IB_CM_MRA_REQ_SENT: case IB_CM_REP_RCVD: case IB_CM_MRA_REP_SENT: case IB_CM_REP_SENT: case IB_CM_MRA_REP_RCVD: case IB_CM_ESTABLISHED: *qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS | IB_QP_PKEY_INDEX | IB_QP_PORT; qp_attr->qp_access_flags = IB_ACCESS_REMOTE_WRITE; if (cm_id_priv->responder_resources) { struct ib_device *ib_dev = cm_id_priv->id.device; u64 support_flush = ib_dev->attrs.device_cap_flags & (IB_DEVICE_FLUSH_GLOBAL | IB_DEVICE_FLUSH_PERSISTENT); u32 flushable = support_flush ? (IB_ACCESS_FLUSH_GLOBAL | IB_ACCESS_FLUSH_PERSISTENT) : 0; qp_attr->qp_access_flags |= IB_ACCESS_REMOTE_READ | IB_ACCESS_REMOTE_ATOMIC | flushable; } qp_attr->pkey_index = cm_id_priv->av.pkey_index; if (cm_id_priv->av.port) qp_attr->port_num = cm_id_priv->av.port->port_num; ret = 0; break; default: trace_icm_qp_init_err(&cm_id_priv->id); ret = -EINVAL; break; } spin_unlock_irqrestore(&cm_id_priv->lock, flags); return ret; } static int cm_init_qp_rtr_attr(struct cm_id_private *cm_id_priv, struct ib_qp_attr *qp_attr, int *qp_attr_mask) { unsigned long flags; int ret; spin_lock_irqsave(&cm_id_priv->lock, flags); switch (cm_id_priv->id.state) { case IB_CM_REQ_RCVD: case IB_CM_MRA_REQ_SENT: case IB_CM_REP_RCVD: case IB_CM_MRA_REP_SENT: case IB_CM_REP_SENT: case IB_CM_MRA_REP_RCVD: case IB_CM_ESTABLISHED: *qp_attr_mask = IB_QP_STATE | IB_QP_AV | IB_QP_PATH_MTU | IB_QP_DEST_QPN | IB_QP_RQ_PSN; qp_attr->ah_attr = cm_id_priv->av.ah_attr; if ((qp_attr->ah_attr.type == RDMA_AH_ATTR_TYPE_IB) && cm_id_priv->av.dlid_datapath && (cm_id_priv->av.dlid_datapath != 0xffff)) qp_attr->ah_attr.ib.dlid = cm_id_priv->av.dlid_datapath; qp_attr->path_mtu = cm_id_priv->path_mtu; qp_attr->dest_qp_num = be32_to_cpu(cm_id_priv->remote_qpn); qp_attr->rq_psn = be32_to_cpu(cm_id_priv->rq_psn); if (cm_id_priv->qp_type == IB_QPT_RC || cm_id_priv->qp_type == IB_QPT_XRC_TGT) { *qp_attr_mask |= IB_QP_MAX_DEST_RD_ATOMIC | IB_QP_MIN_RNR_TIMER; qp_attr->max_dest_rd_atomic = cm_id_priv->responder_resources; qp_attr->min_rnr_timer = 0; } if (rdma_ah_get_dlid(&cm_id_priv->alt_av.ah_attr) && cm_id_priv->alt_av.port) { *qp_attr_mask |= IB_QP_ALT_PATH; qp_attr->alt_port_num = cm_id_priv->alt_av.port->port_num; qp_attr->alt_pkey_index = cm_id_priv->alt_av.pkey_index; qp_attr->alt_timeout = cm_id_priv->alt_av.timeout; qp_attr->alt_ah_attr = cm_id_priv->alt_av.ah_attr; } ret = 0; break; default: trace_icm_qp_rtr_err(&cm_id_priv->id); ret = -EINVAL; break; } spin_unlock_irqrestore(&cm_id_priv->lock, flags); return ret; } static int cm_init_qp_rts_attr(struct cm_id_private *cm_id_priv, struct ib_qp_attr *qp_attr, int *qp_attr_mask) { unsigned long flags; int ret; spin_lock_irqsave(&cm_id_priv->lock, flags); switch (cm_id_priv->id.state) { /* Allow transition to RTS before sending REP */ case IB_CM_REQ_RCVD: case IB_CM_MRA_REQ_SENT: case IB_CM_REP_RCVD: case IB_CM_MRA_REP_SENT: case IB_CM_REP_SENT: case IB_CM_MRA_REP_RCVD: case IB_CM_ESTABLISHED: if (cm_id_priv->id.lap_state == IB_CM_LAP_UNINIT) { *qp_attr_mask = IB_QP_STATE | IB_QP_SQ_PSN; qp_attr->sq_psn = be32_to_cpu(cm_id_priv->sq_psn); switch (cm_id_priv->qp_type) { case IB_QPT_RC: case IB_QPT_XRC_INI: *qp_attr_mask |= IB_QP_RETRY_CNT | IB_QP_RNR_RETRY | IB_QP_MAX_QP_RD_ATOMIC; qp_attr->retry_cnt = cm_id_priv->retry_count; qp_attr->rnr_retry = cm_id_priv->rnr_retry_count; qp_attr->max_rd_atomic = cm_id_priv->initiator_depth; fallthrough; case IB_QPT_XRC_TGT: *qp_attr_mask |= IB_QP_TIMEOUT; qp_attr->timeout = cm_id_priv->av.timeout; break; default: break; } if (rdma_ah_get_dlid(&cm_id_priv->alt_av.ah_attr)) { *qp_attr_mask |= IB_QP_PATH_MIG_STATE; qp_attr->path_mig_state = IB_MIG_REARM; } } else { *qp_attr_mask = IB_QP_ALT_PATH | IB_QP_PATH_MIG_STATE; if (cm_id_priv->alt_av.port) qp_attr->alt_port_num = cm_id_priv->alt_av.port->port_num; qp_attr->alt_pkey_index = cm_id_priv->alt_av.pkey_index; qp_attr->alt_timeout = cm_id_priv->alt_av.timeout; qp_attr->alt_ah_attr = cm_id_priv->alt_av.ah_attr; qp_attr->path_mig_state = IB_MIG_REARM; } ret = 0; break; default: trace_icm_qp_rts_err(&cm_id_priv->id); ret = -EINVAL; break; } spin_unlock_irqrestore(&cm_id_priv->lock, flags); return ret; } int ib_cm_init_qp_attr(struct ib_cm_id *cm_id, struct ib_qp_attr *qp_attr, int *qp_attr_mask) { struct cm_id_private *cm_id_priv; int ret; cm_id_priv = container_of(cm_id, struct cm_id_private, id); switch (qp_attr->qp_state) { case IB_QPS_INIT: ret = cm_init_qp_init_attr(cm_id_priv, qp_attr, qp_attr_mask); break; case IB_QPS_RTR: ret = cm_init_qp_rtr_attr(cm_id_priv, qp_attr, qp_attr_mask); break; case IB_QPS_RTS: ret = cm_init_qp_rts_attr(cm_id_priv, qp_attr, qp_attr_mask); break; default: ret = -EINVAL; break; } return ret; } EXPORT_SYMBOL(ib_cm_init_qp_attr); static ssize_t cm_show_counter(struct ib_device *ibdev, u32 port_num, struct ib_port_attribute *attr, char *buf) { struct cm_counter_attribute *cm_attr = container_of(attr, struct cm_counter_attribute, attr); struct cm_device *cm_dev = ib_get_client_data(ibdev, &cm_client); if (WARN_ON(!cm_dev)) return -EINVAL; return sysfs_emit( buf, "%ld\n", atomic_long_read( &cm_dev->port[port_num - 1] ->counters[cm_attr->group][cm_attr->index])); } #define CM_COUNTER_ATTR(_name, _group, _index) \ { \ .attr = __ATTR(_name, 0444, cm_show_counter, NULL), \ .group = _group, .index = _index \ } #define CM_COUNTER_GROUP(_group, _name) \ static struct cm_counter_attribute cm_counter_attr_##_group[] = { \ CM_COUNTER_ATTR(req, _group, CM_REQ_COUNTER), \ CM_COUNTER_ATTR(mra, _group, CM_MRA_COUNTER), \ CM_COUNTER_ATTR(rej, _group, CM_REJ_COUNTER), \ CM_COUNTER_ATTR(rep, _group, CM_REP_COUNTER), \ CM_COUNTER_ATTR(rtu, _group, CM_RTU_COUNTER), \ CM_COUNTER_ATTR(dreq, _group, CM_DREQ_COUNTER), \ CM_COUNTER_ATTR(drep, _group, CM_DREP_COUNTER), \ CM_COUNTER_ATTR(sidr_req, _group, CM_SIDR_REQ_COUNTER), \ CM_COUNTER_ATTR(sidr_rep, _group, CM_SIDR_REP_COUNTER), \ CM_COUNTER_ATTR(lap, _group, CM_LAP_COUNTER), \ CM_COUNTER_ATTR(apr, _group, CM_APR_COUNTER), \ }; \ static struct attribute *cm_counter_attrs_##_group[] = { \ &cm_counter_attr_##_group[0].attr.attr, \ &cm_counter_attr_##_group[1].attr.attr, \ &cm_counter_attr_##_group[2].attr.attr, \ &cm_counter_attr_##_group[3].attr.attr, \ &cm_counter_attr_##_group[4].attr.attr, \ &cm_counter_attr_##_group[5].attr.attr, \ &cm_counter_attr_##_group[6].attr.attr, \ &cm_counter_attr_##_group[7].attr.attr, \ &cm_counter_attr_##_group[8].attr.attr, \ &cm_counter_attr_##_group[9].attr.attr, \ &cm_counter_attr_##_group[10].attr.attr, \ NULL, \ }; \ static const struct attribute_group cm_counter_group_##_group = { \ .name = _name, \ .attrs = cm_counter_attrs_##_group, \ }; CM_COUNTER_GROUP(CM_XMIT, "cm_tx_msgs") CM_COUNTER_GROUP(CM_XMIT_RETRIES, "cm_tx_retries") CM_COUNTER_GROUP(CM_RECV, "cm_rx_msgs") CM_COUNTER_GROUP(CM_RECV_DUPLICATES, "cm_rx_duplicates") static const struct attribute_group *cm_counter_groups[] = { &cm_counter_group_CM_XMIT, &cm_counter_group_CM_XMIT_RETRIES, &cm_counter_group_CM_RECV, &cm_counter_group_CM_RECV_DUPLICATES, NULL, }; static int cm_add_one(struct ib_device *ib_device) { struct cm_device *cm_dev; struct cm_port *port; struct ib_mad_reg_req reg_req = { .mgmt_class = IB_MGMT_CLASS_CM, .mgmt_class_version = IB_CM_CLASS_VERSION, }; struct ib_port_modify port_modify = { .set_port_cap_mask = IB_PORT_CM_SUP }; unsigned long flags; int ret; int count = 0; u32 i; cm_dev = kzalloc(struct_size(cm_dev, port, ib_device->phys_port_cnt), GFP_KERNEL); if (!cm_dev) return -ENOMEM; kref_init(&cm_dev->kref); spin_lock_init(&cm_dev->mad_agent_lock); cm_dev->ib_device = ib_device; cm_dev->ack_delay = ib_device->attrs.local_ca_ack_delay; cm_dev->going_down = 0; ib_set_client_data(ib_device, &cm_client, cm_dev); set_bit(IB_MGMT_METHOD_SEND, reg_req.method_mask); rdma_for_each_port (ib_device, i) { if (!rdma_cap_ib_cm(ib_device, i)) continue; port = kzalloc(sizeof *port, GFP_KERNEL); if (!port) { ret = -ENOMEM; goto error1; } cm_dev->port[i-1] = port; port->cm_dev = cm_dev; port->port_num = i; ret = ib_port_register_client_groups(ib_device, i, cm_counter_groups); if (ret) goto error1; port->mad_agent = ib_register_mad_agent(ib_device, i, IB_QPT_GSI, &reg_req, 0, cm_send_handler, cm_recv_handler, port, 0); if (IS_ERR(port->mad_agent)) { ret = PTR_ERR(port->mad_agent); goto error2; } ret = ib_modify_port(ib_device, i, 0, &port_modify); if (ret) goto error3; count++; } if (!count) { ret = -EOPNOTSUPP; goto free; } write_lock_irqsave(&cm.device_lock, flags); list_add_tail(&cm_dev->list, &cm.device_list); write_unlock_irqrestore(&cm.device_lock, flags); return 0; error3: ib_unregister_mad_agent(port->mad_agent); error2: ib_port_unregister_client_groups(ib_device, i, cm_counter_groups); error1: port_modify.set_port_cap_mask = 0; port_modify.clr_port_cap_mask = IB_PORT_CM_SUP; while (--i) { if (!rdma_cap_ib_cm(ib_device, i)) continue; port = cm_dev->port[i-1]; ib_modify_port(ib_device, port->port_num, 0, &port_modify); ib_unregister_mad_agent(port->mad_agent); ib_port_unregister_client_groups(ib_device, i, cm_counter_groups); } free: cm_device_put(cm_dev); return ret; } static void cm_remove_one(struct ib_device *ib_device, void *client_data) { struct cm_device *cm_dev = client_data; struct cm_port *port; struct ib_port_modify port_modify = { .clr_port_cap_mask = IB_PORT_CM_SUP }; unsigned long flags; u32 i; write_lock_irqsave(&cm.device_lock, flags); list_del(&cm_dev->list); write_unlock_irqrestore(&cm.device_lock, flags); spin_lock_irq(&cm.lock); cm_dev->going_down = 1; spin_unlock_irq(&cm.lock); rdma_for_each_port (ib_device, i) { struct ib_mad_agent *mad_agent; if (!rdma_cap_ib_cm(ib_device, i)) continue; port = cm_dev->port[i-1]; mad_agent = port->mad_agent; ib_modify_port(ib_device, port->port_num, 0, &port_modify); /* * We flush the queue here after the going_down set, this * verify that no new works will be queued in the recv handler, * after that we can call the unregister_mad_agent */ flush_workqueue(cm.wq); /* * The above ensures no call paths from the work are running, * the remaining paths all take the mad_agent_lock. */ spin_lock(&cm_dev->mad_agent_lock); port->mad_agent = NULL; spin_unlock(&cm_dev->mad_agent_lock); ib_unregister_mad_agent(mad_agent); ib_port_unregister_client_groups(ib_device, i, cm_counter_groups); } cm_device_put(cm_dev); } static int __init ib_cm_init(void) { int ret; INIT_LIST_HEAD(&cm.device_list); rwlock_init(&cm.device_lock); spin_lock_init(&cm.lock); cm.listen_service_table = RB_ROOT; cm.listen_service_id = be64_to_cpu(IB_CM_ASSIGN_SERVICE_ID); cm.remote_id_table = RB_ROOT; cm.remote_qp_table = RB_ROOT; cm.remote_sidr_table = RB_ROOT; xa_init_flags(&cm.local_id_table, XA_FLAGS_ALLOC); get_random_bytes(&cm.random_id_operand, sizeof cm.random_id_operand); INIT_LIST_HEAD(&cm.timewait_list); cm.wq = alloc_workqueue("ib_cm", 0, 1); if (!cm.wq) { ret = -ENOMEM; goto error2; } ret = ib_register_client(&cm_client); if (ret) goto error3; return 0; error3: destroy_workqueue(cm.wq); error2: return ret; } static void __exit ib_cm_cleanup(void) { struct cm_timewait_info *timewait_info, *tmp; spin_lock_irq(&cm.lock); list_for_each_entry(timewait_info, &cm.timewait_list, list) cancel_delayed_work(&timewait_info->work.work); spin_unlock_irq(&cm.lock); ib_unregister_client(&cm_client); destroy_workqueue(cm.wq); list_for_each_entry_safe(timewait_info, tmp, &cm.timewait_list, list) { list_del(&timewait_info->list); kfree(timewait_info); } WARN_ON(!xa_empty(&cm.local_id_table)); } module_init(ib_cm_init); module_exit(ib_cm_cleanup);
13 10 12 13 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 /* * linux/fs/nls/mac-cyrillic.c * * Charset maccyrillic translation tables. * Generated automatically from the Unicode and charset * tables from the Unicode Organization (www.unicode.org). * The Unicode to charset table has only exact mappings. */ /* * COPYRIGHT AND PERMISSION NOTICE * * Copyright 1991-2012 Unicode, Inc. All rights reserved. Distributed under * the Terms of Use in http://www.unicode.org/copyright.html. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of the Unicode data files and any associated documentation (the "Data * Files") or Unicode software and any associated documentation (the * "Software") to deal in the Data Files or Software without restriction, * including without limitation the rights to use, copy, modify, merge, * publish, distribute, and/or sell copies of the Data Files or Software, and * to permit persons to whom the Data Files or Software are furnished to do * so, provided that (a) the above copyright notice(s) and this permission * notice appear with all copies of the Data Files or Software, (b) both the * above copyright notice(s) and this permission notice appear in associated * documentation, and (c) there is clear notice in each modified Data File or * in the Software as well as in the documentation associated with the Data * File(s) or Software that the data or software has been modified. * * THE DATA FILES AND SOFTWARE ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY * KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF * THIRD PARTY RIGHTS. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR HOLDERS * INCLUDED IN THIS NOTICE BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF * USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR * OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR * PERFORMANCE OF THE DATA FILES OR SOFTWARE. * * Except as contained in this notice, the name of a copyright holder shall * not be used in advertising or otherwise to promote the sale, use or other * dealings in these Data Files or Software without prior written * authorization of the copyright holder. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/nls.h> #include <linux/errno.h> static const wchar_t charset2uni[256] = { /* 0x00 */ 0x0000, 0x0001, 0x0002, 0x0003, 0x0004, 0x0005, 0x0006, 0x0007, 0x0008, 0x0009, 0x000a, 0x000b, 0x000c, 0x000d, 0x000e, 0x000f, /* 0x10 */ 0x0010, 0x0011, 0x0012, 0x0013, 0x0014, 0x0015, 0x0016, 0x0017, 0x0018, 0x0019, 0x001a, 0x001b, 0x001c, 0x001d, 0x001e, 0x001f, /* 0x20 */ 0x0020, 0x0021, 0x0022, 0x0023, 0x0024, 0x0025, 0x0026, 0x0027, 0x0028, 0x0029, 0x002a, 0x002b, 0x002c, 0x002d, 0x002e, 0x002f, /* 0x30 */ 0x0030, 0x0031, 0x0032, 0x0033, 0x0034, 0x0035, 0x0036, 0x0037, 0x0038, 0x0039, 0x003a, 0x003b, 0x003c, 0x003d, 0x003e, 0x003f, /* 0x40 */ 0x0040, 0x0041, 0x0042, 0x0043, 0x0044, 0x0045, 0x0046, 0x0047, 0x0048, 0x0049, 0x004a, 0x004b, 0x004c, 0x004d, 0x004e, 0x004f, /* 0x50 */ 0x0050, 0x0051, 0x0052, 0x0053, 0x0054, 0x0055, 0x0056, 0x0057, 0x0058, 0x0059, 0x005a, 0x005b, 0x005c, 0x005d, 0x005e, 0x005f, /* 0x60 */ 0x0060, 0x0061, 0x0062, 0x0063, 0x0064, 0x0065, 0x0066, 0x0067, 0x0068, 0x0069, 0x006a, 0x006b, 0x006c, 0x006d, 0x006e, 0x006f, /* 0x70 */ 0x0070, 0x0071, 0x0072, 0x0073, 0x0074, 0x0075, 0x0076, 0x0077, 0x0078, 0x0079, 0x007a, 0x007b, 0x007c, 0x007d, 0x007e, 0x007f, /* 0x80 */ 0x0410, 0x0411, 0x0412, 0x0413, 0x0414, 0x0415, 0x0416, 0x0417, 0x0418, 0x0419, 0x041a, 0x041b, 0x041c, 0x041d, 0x041e, 0x041f, /* 0x90 */ 0x0420, 0x0421, 0x0422, 0x0423, 0x0424, 0x0425, 0x0426, 0x0427, 0x0428, 0x0429, 0x042a, 0x042b, 0x042c, 0x042d, 0x042e, 0x042f, /* 0xa0 */ 0x2020, 0x00b0, 0x0490, 0x00a3, 0x00a7, 0x2022, 0x00b6, 0x0406, 0x00ae, 0x00a9, 0x2122, 0x0402, 0x0452, 0x2260, 0x0403, 0x0453, /* 0xb0 */ 0x221e, 0x00b1, 0x2264, 0x2265, 0x0456, 0x00b5, 0x0491, 0x0408, 0x0404, 0x0454, 0x0407, 0x0457, 0x0409, 0x0459, 0x040a, 0x045a, /* 0xc0 */ 0x0458, 0x0405, 0x00ac, 0x221a, 0x0192, 0x2248, 0x2206, 0x00ab, 0x00bb, 0x2026, 0x00a0, 0x040b, 0x045b, 0x040c, 0x045c, 0x0455, /* 0xd0 */ 0x2013, 0x2014, 0x201c, 0x201d, 0x2018, 0x2019, 0x00f7, 0x201e, 0x040e, 0x045e, 0x040f, 0x045f, 0x2116, 0x0401, 0x0451, 0x044f, /* 0xe0 */ 0x0430, 0x0431, 0x0432, 0x0433, 0x0434, 0x0435, 0x0436, 0x0437, 0x0438, 0x0439, 0x043a, 0x043b, 0x043c, 0x043d, 0x043e, 0x043f, /* 0xf0 */ 0x0440, 0x0441, 0x0442, 0x0443, 0x0444, 0x0445, 0x0446, 0x0447, 0x0448, 0x0449, 0x044a, 0x044b, 0x044c, 0x044d, 0x044e, 0x20ac, }; static const unsigned char page00[256] = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */ 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */ 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */ 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */ 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */ 0xca, 0x00, 0x00, 0xa3, 0x00, 0x00, 0x00, 0xa4, /* 0xa0-0xa7 */ 0x00, 0xa9, 0x00, 0xc7, 0xc2, 0x00, 0xa8, 0x00, /* 0xa8-0xaf */ 0xa1, 0xb1, 0x00, 0x00, 0x00, 0xb5, 0xa6, 0x00, /* 0xb0-0xb7 */ 0x00, 0x00, 0x00, 0xc8, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xd6, /* 0xf0-0xf7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */ }; static const unsigned char page01[256] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */ 0x00, 0x00, 0xc4, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */ }; static const unsigned char page04[256] = { 0x00, 0xdd, 0xab, 0xae, 0xb8, 0xc1, 0xa7, 0xba, /* 0x00-0x07 */ 0xb7, 0xbc, 0xbe, 0xcb, 0xcd, 0x00, 0xd8, 0xda, /* 0x08-0x0f */ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, /* 0x10-0x17 */ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, /* 0x18-0x1f */ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, /* 0x20-0x27 */ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, /* 0x28-0x2f */ 0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, /* 0x30-0x37 */ 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, /* 0x38-0x3f */ 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, /* 0x40-0x47 */ 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xdf, /* 0x48-0x4f */ 0x00, 0xde, 0xac, 0xaf, 0xb9, 0xcf, 0xb4, 0xbb, /* 0x50-0x57 */ 0xc0, 0xbd, 0xbf, 0xcc, 0xce, 0x00, 0xd9, 0xdb, /* 0x58-0x5f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */ 0xa2, 0xb6, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */ }; static const unsigned char page20[256] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */ 0x00, 0x00, 0x00, 0xd0, 0xd1, 0x00, 0x00, 0x00, /* 0x10-0x17 */ 0xd4, 0xd5, 0x00, 0x00, 0xd2, 0xd3, 0xd7, 0x00, /* 0x18-0x1f */ 0xa0, 0x00, 0xa5, 0x00, 0x00, 0x00, 0xc9, 0x00, /* 0x20-0x27 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */ 0x00, 0x00, 0x00, 0x00, 0xff, 0x00, 0x00, 0x00, /* 0xa8-0xaf */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */ }; static const unsigned char page21[256] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xdc, 0x00, /* 0x10-0x17 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */ 0x00, 0x00, 0xaa, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */ }; static const unsigned char page22[256] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc6, 0x00, /* 0x00-0x07 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */ 0x00, 0x00, 0xc3, 0x00, 0x00, 0x00, 0xb0, 0x00, /* 0x18-0x1f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */ 0xc5, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */ 0xad, 0x00, 0x00, 0x00, 0xb2, 0xb3, 0x00, 0x00, /* 0x60-0x67 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */ }; static const unsigned char *const page_uni2charset[256] = { page00, page01, NULL, NULL, page04, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, page20, page21, page22, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, }; static const unsigned char charset2lower[256] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x00-0x07 */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x08-0x0f */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x10-0x17 */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x18-0x1f */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x20-0x27 */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x28-0x2f */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x30-0x37 */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x38-0x3f */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x40-0x47 */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x48-0x4f */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x50-0x57 */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x58-0x5f */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x60-0x67 */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x68-0x6f */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x70-0x77 */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x78-0x7f */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x80-0x87 */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x88-0x8f */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x90-0x97 */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x98-0x9f */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa0-0xa7 */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa8-0xaf */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb0-0xb7 */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb8-0xbf */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc0-0xc7 */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc8-0xcf */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd0-0xd7 */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd8-0xdf */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe0-0xe7 */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe8-0xef */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf0-0xf7 */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf8-0xff */ }; static const unsigned char charset2upper[256] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x00-0x07 */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x08-0x0f */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x10-0x17 */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x18-0x1f */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x20-0x27 */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x28-0x2f */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x30-0x37 */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x38-0x3f */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x40-0x47 */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x48-0x4f */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x50-0x57 */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x58-0x5f */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x60-0x67 */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x68-0x6f */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x70-0x77 */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x78-0x7f */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x80-0x87 */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x88-0x8f */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x90-0x97 */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x98-0x9f */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa0-0xa7 */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa8-0xaf */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb0-0xb7 */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb8-0xbf */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc0-0xc7 */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc8-0xcf */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd0-0xd7 */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd8-0xdf */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe0-0xe7 */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe8-0xef */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf0-0xf7 */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf8-0xff */ }; static int uni2char(wchar_t uni, unsigned char *out, int boundlen) { const unsigned char *uni2charset; unsigned char cl = uni & 0x00ff; unsigned char ch = (uni & 0xff00) >> 8; if (boundlen <= 0) return -ENAMETOOLONG; uni2charset = page_uni2charset[ch]; if (uni2charset && uni2charset[cl]) out[0] = uni2charset[cl]; else return -EINVAL; return 1; } static int char2uni(const unsigned char *rawstring, int boundlen, wchar_t *uni) { *uni = charset2uni[*rawstring]; if (*uni == 0x0000) return -EINVAL; return 1; } static struct nls_table table = { .charset = "maccyrillic", .uni2char = uni2char, .char2uni = char2uni, .charset2lower = charset2lower, .charset2upper = charset2upper, }; static int __init init_nls_maccyrillic(void) { return register_nls(&table); } static void __exit exit_nls_maccyrillic(void) { unregister_nls(&table); } module_init(init_nls_maccyrillic) module_exit(exit_nls_maccyrillic) MODULE_DESCRIPTION("NLS Codepage maccyrillic"); MODULE_LICENSE("Dual BSD/GPL");
1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _BCACHEFS_SUBVOLUME_H #define _BCACHEFS_SUBVOLUME_H #include "darray.h" #include "subvolume_types.h" enum bch_validate_flags; int bch2_check_subvols(struct bch_fs *); int bch2_check_subvol_children(struct bch_fs *); int bch2_subvolume_validate(struct bch_fs *, struct bkey_s_c, enum bch_validate_flags); void bch2_subvolume_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c); int bch2_subvolume_trigger(struct btree_trans *, enum btree_id, unsigned, struct bkey_s_c, struct bkey_s, enum btree_iter_update_trigger_flags); #define bch2_bkey_ops_subvolume ((struct bkey_ops) { \ .key_validate = bch2_subvolume_validate, \ .val_to_text = bch2_subvolume_to_text, \ .trigger = bch2_subvolume_trigger, \ .min_val_size = 16, \ }) int bch2_subvol_has_children(struct btree_trans *, u32); int bch2_subvolume_get(struct btree_trans *, unsigned, bool, int, struct bch_subvolume *); int __bch2_subvolume_get_snapshot(struct btree_trans *, u32, u32 *, bool); int bch2_subvolume_get_snapshot(struct btree_trans *, u32, u32 *); int bch2_subvol_is_ro_trans(struct btree_trans *, u32); int bch2_subvol_is_ro(struct bch_fs *, u32); static inline struct bkey_s_c bch2_btree_iter_peek_in_subvolume_upto_type(struct btree_iter *iter, struct bpos end, u32 subvolid, unsigned flags) { u32 snapshot; int ret = bch2_subvolume_get_snapshot(iter->trans, subvolid, &snapshot); if (ret) return bkey_s_c_err(ret); bch2_btree_iter_set_snapshot(iter, snapshot); return bch2_btree_iter_peek_upto_type(iter, end, flags); } #define for_each_btree_key_in_subvolume_upto_continue(_trans, _iter, \ _end, _subvolid, _flags, _k, _do) \ ({ \ struct bkey_s_c _k; \ int _ret3 = 0; \ \ do { \ _ret3 = lockrestart_do(_trans, ({ \ (_k) = bch2_btree_iter_peek_in_subvolume_upto_type(&(_iter), \ _end, _subvolid, (_flags)); \ if (!(_k).k) \ break; \ \ bkey_err(_k) ?: (_do); \ })); \ } while (!_ret3 && bch2_btree_iter_advance(&(_iter))); \ \ bch2_trans_iter_exit((_trans), &(_iter)); \ _ret3; \ }) #define for_each_btree_key_in_subvolume_upto(_trans, _iter, _btree_id, \ _start, _end, _subvolid, _flags, _k, _do) \ ({ \ struct btree_iter _iter; \ bch2_trans_iter_init((_trans), &(_iter), (_btree_id), \ (_start), (_flags)); \ \ for_each_btree_key_in_subvolume_upto_continue(_trans, _iter, \ _end, _subvolid, _flags, _k, _do); \ }) int bch2_delete_dead_snapshots(struct bch_fs *); void bch2_delete_dead_snapshots_async(struct bch_fs *); int bch2_subvolume_unlink(struct btree_trans *, u32); int bch2_subvolume_create(struct btree_trans *, u64, u32, u32, u32 *, u32 *, bool); int bch2_initialize_subvolumes(struct bch_fs *); int bch2_fs_upgrade_for_subvolumes(struct bch_fs *); int bch2_fs_subvolumes_init(struct bch_fs *); #endif /* _BCACHEFS_SUBVOLUME_H */
8 8 8 7 3 5 24 23 4 23 51 50 3 3 3 7 3 5 5 5 5 5 7 7 3 3 4 2 3 1 3 3 3 1 2 3 6 1 5 3 4 2 4 4 2 2 1 1 1 1 4 4 4 3 3 5 1 4 5 5 3 3 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 /* * Copyright © 2012 Red Hat * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. * * Authors: * Dave Airlie <airlied@redhat.com> * Rob Clark <rob.clark@linaro.org> * */ #include <linux/export.h> #include <linux/dma-buf.h> #include <linux/rbtree.h> #include <linux/module.h> #include <drm/drm.h> #include <drm/drm_drv.h> #include <drm/drm_file.h> #include <drm/drm_framebuffer.h> #include <drm/drm_gem.h> #include <drm/drm_prime.h> #include "drm_internal.h" MODULE_IMPORT_NS("DMA_BUF"); /** * DOC: overview and lifetime rules * * Similar to GEM global names, PRIME file descriptors are also used to share * buffer objects across processes. They offer additional security: as file * descriptors must be explicitly sent over UNIX domain sockets to be shared * between applications, they can't be guessed like the globally unique GEM * names. * * Drivers that support the PRIME API implement the drm_gem_object_funcs.export * and &drm_driver.gem_prime_import hooks. &dma_buf_ops implementations for * drivers are all individually exported for drivers which need to overwrite * or reimplement some of them. * * Reference Counting for GEM Drivers * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * * On the export the &dma_buf holds a reference to the exported buffer object, * usually a &drm_gem_object. It takes this reference in the PRIME_HANDLE_TO_FD * IOCTL, when it first calls &drm_gem_object_funcs.export * and stores the exporting GEM object in the &dma_buf.priv field. This * reference needs to be released when the final reference to the &dma_buf * itself is dropped and its &dma_buf_ops.release function is called. For * GEM-based drivers, the &dma_buf should be exported using * drm_gem_dmabuf_export() and then released by drm_gem_dmabuf_release(). * * Thus the chain of references always flows in one direction, avoiding loops: * importing GEM object -> dma-buf -> exported GEM bo. A further complication * are the lookup caches for import and export. These are required to guarantee * that any given object will always have only one unique userspace handle. This * is required to allow userspace to detect duplicated imports, since some GEM * drivers do fail command submissions if a given buffer object is listed more * than once. These import and export caches in &drm_prime_file_private only * retain a weak reference, which is cleaned up when the corresponding object is * released. * * Self-importing: If userspace is using PRIME as a replacement for flink then * it will get a fd->handle request for a GEM object that it created. Drivers * should detect this situation and return back the underlying object from the * dma-buf private. For GEM based drivers this is handled in * drm_gem_prime_import() already. */ struct drm_prime_member { struct dma_buf *dma_buf; uint32_t handle; struct rb_node dmabuf_rb; struct rb_node handle_rb; }; static int drm_prime_add_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t handle) { struct drm_prime_member *member; struct rb_node **p, *rb; member = kmalloc(sizeof(*member), GFP_KERNEL); if (!member) return -ENOMEM; get_dma_buf(dma_buf); member->dma_buf = dma_buf; member->handle = handle; rb = NULL; p = &prime_fpriv->dmabufs.rb_node; while (*p) { struct drm_prime_member *pos; rb = *p; pos = rb_entry(rb, struct drm_prime_member, dmabuf_rb); if (dma_buf > pos->dma_buf) p = &rb->rb_right; else p = &rb->rb_left; } rb_link_node(&member->dmabuf_rb, rb, p); rb_insert_color(&member->dmabuf_rb, &prime_fpriv->dmabufs); rb = NULL; p = &prime_fpriv->handles.rb_node; while (*p) { struct drm_prime_member *pos; rb = *p; pos = rb_entry(rb, struct drm_prime_member, handle_rb); if (handle > pos->handle) p = &rb->rb_right; else p = &rb->rb_left; } rb_link_node(&member->handle_rb, rb, p); rb_insert_color(&member->handle_rb, &prime_fpriv->handles); return 0; } static struct dma_buf *drm_prime_lookup_buf_by_handle(struct drm_prime_file_private *prime_fpriv, uint32_t handle) { struct rb_node *rb; rb = prime_fpriv->handles.rb_node; while (rb) { struct drm_prime_member *member; member = rb_entry(rb, struct drm_prime_member, handle_rb); if (member->handle == handle) return member->dma_buf; else if (member->handle < handle) rb = rb->rb_right; else rb = rb->rb_left; } return NULL; } static int drm_prime_lookup_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t *handle) { struct rb_node *rb; rb = prime_fpriv->dmabufs.rb_node; while (rb) { struct drm_prime_member *member; member = rb_entry(rb, struct drm_prime_member, dmabuf_rb); if (member->dma_buf == dma_buf) { *handle = member->handle; return 0; } else if (member->dma_buf < dma_buf) { rb = rb->rb_right; } else { rb = rb->rb_left; } } return -ENOENT; } void drm_prime_remove_buf_handle(struct drm_prime_file_private *prime_fpriv, uint32_t handle) { struct rb_node *rb; mutex_lock(&prime_fpriv->lock); rb = prime_fpriv->handles.rb_node; while (rb) { struct drm_prime_member *member; member = rb_entry(rb, struct drm_prime_member, handle_rb); if (member->handle == handle) { rb_erase(&member->handle_rb, &prime_fpriv->handles); rb_erase(&member->dmabuf_rb, &prime_fpriv->dmabufs); dma_buf_put(member->dma_buf); kfree(member); break; } else if (member->handle < handle) { rb = rb->rb_right; } else { rb = rb->rb_left; } } mutex_unlock(&prime_fpriv->lock); } void drm_prime_init_file_private(struct drm_prime_file_private *prime_fpriv) { mutex_init(&prime_fpriv->lock); prime_fpriv->dmabufs = RB_ROOT; prime_fpriv->handles = RB_ROOT; } void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv) { /* by now drm_gem_release should've made sure the list is empty */ WARN_ON(!RB_EMPTY_ROOT(&prime_fpriv->dmabufs)); } /** * drm_gem_dmabuf_export - &dma_buf export implementation for GEM * @dev: parent device for the exported dmabuf * @exp_info: the export information used by dma_buf_export() * * This wraps dma_buf_export() for use by generic GEM drivers that are using * drm_gem_dmabuf_release(). In addition to calling dma_buf_export(), we take * a reference to the &drm_device and the exported &drm_gem_object (stored in * &dma_buf_export_info.priv) which is released by drm_gem_dmabuf_release(). * * Returns the new dmabuf. */ struct dma_buf *drm_gem_dmabuf_export(struct drm_device *dev, struct dma_buf_export_info *exp_info) { struct drm_gem_object *obj = exp_info->priv; struct dma_buf *dma_buf; dma_buf = dma_buf_export(exp_info); if (IS_ERR(dma_buf)) return dma_buf; drm_dev_get(dev); drm_gem_object_get(obj); dma_buf->file->f_mapping = obj->dev->anon_inode->i_mapping; return dma_buf; } EXPORT_SYMBOL(drm_gem_dmabuf_export); /** * drm_gem_dmabuf_release - &dma_buf release implementation for GEM * @dma_buf: buffer to be released * * Generic release function for dma_bufs exported as PRIME buffers. GEM drivers * must use this in their &dma_buf_ops structure as the release callback. * drm_gem_dmabuf_release() should be used in conjunction with * drm_gem_dmabuf_export(). */ void drm_gem_dmabuf_release(struct dma_buf *dma_buf) { struct drm_gem_object *obj = dma_buf->priv; struct drm_device *dev = obj->dev; /* drop the reference on the export fd holds */ drm_gem_object_put(obj); drm_dev_put(dev); } EXPORT_SYMBOL(drm_gem_dmabuf_release); /** * drm_gem_prime_fd_to_handle - PRIME import function for GEM drivers * @dev: drm_device to import into * @file_priv: drm file-private structure * @prime_fd: fd id of the dma-buf which should be imported * @handle: pointer to storage for the handle of the imported buffer object * * This is the PRIME import function which must be used mandatorily by GEM * drivers to ensure correct lifetime management of the underlying GEM object. * The actual importing of GEM object from the dma-buf is done through the * &drm_driver.gem_prime_import driver callback. * * Returns 0 on success or a negative error code on failure. */ int drm_gem_prime_fd_to_handle(struct drm_device *dev, struct drm_file *file_priv, int prime_fd, uint32_t *handle) { struct dma_buf *dma_buf; struct drm_gem_object *obj; int ret; dma_buf = dma_buf_get(prime_fd); if (IS_ERR(dma_buf)) return PTR_ERR(dma_buf); mutex_lock(&file_priv->prime.lock); ret = drm_prime_lookup_buf_handle(&file_priv->prime, dma_buf, handle); if (ret == 0) goto out_put; /* never seen this one, need to import */ mutex_lock(&dev->object_name_lock); if (dev->driver->gem_prime_import) obj = dev->driver->gem_prime_import(dev, dma_buf); else obj = drm_gem_prime_import(dev, dma_buf); if (IS_ERR(obj)) { ret = PTR_ERR(obj); goto out_unlock; } if (obj->dma_buf) { WARN_ON(obj->dma_buf != dma_buf); } else { obj->dma_buf = dma_buf; get_dma_buf(dma_buf); } /* _handle_create_tail unconditionally unlocks dev->object_name_lock. */ ret = drm_gem_handle_create_tail(file_priv, obj, handle); drm_gem_object_put(obj); if (ret) goto out_put; ret = drm_prime_add_buf_handle(&file_priv->prime, dma_buf, *handle); mutex_unlock(&file_priv->prime.lock); if (ret) goto fail; dma_buf_put(dma_buf); return 0; fail: /* hmm, if driver attached, we are relying on the free-object path * to detach.. which seems ok.. */ drm_gem_handle_delete(file_priv, *handle); dma_buf_put(dma_buf); return ret; out_unlock: mutex_unlock(&dev->object_name_lock); out_put: mutex_unlock(&file_priv->prime.lock); dma_buf_put(dma_buf); return ret; } EXPORT_SYMBOL(drm_gem_prime_fd_to_handle); int drm_prime_fd_to_handle_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_prime_handle *args = data; if (dev->driver->prime_fd_to_handle) { return dev->driver->prime_fd_to_handle(dev, file_priv, args->fd, &args->handle); } return drm_gem_prime_fd_to_handle(dev, file_priv, args->fd, &args->handle); } static struct dma_buf *export_and_register_object(struct drm_device *dev, struct drm_gem_object *obj, uint32_t flags) { struct dma_buf *dmabuf; /* prevent races with concurrent gem_close. */ if (obj->handle_count == 0) { dmabuf = ERR_PTR(-ENOENT); return dmabuf; } if (obj->funcs && obj->funcs->export) dmabuf = obj->funcs->export(obj, flags); else dmabuf = drm_gem_prime_export(obj, flags); if (IS_ERR(dmabuf)) { /* normally the created dma-buf takes ownership of the ref, * but if that fails then drop the ref */ return dmabuf; } /* * Note that callers do not need to clean up the export cache * since the check for obj->handle_count guarantees that someone * will clean it up. */ obj->dma_buf = dmabuf; get_dma_buf(obj->dma_buf); return dmabuf; } /** * drm_gem_prime_handle_to_dmabuf - PRIME export function for GEM drivers * @dev: dev to export the buffer from * @file_priv: drm file-private structure * @handle: buffer handle to export * @flags: flags like DRM_CLOEXEC * * This is the PRIME export function which must be used mandatorily by GEM * drivers to ensure correct lifetime management of the underlying GEM object. * The actual exporting from GEM object to a dma-buf is done through the * &drm_gem_object_funcs.export callback. * * Unlike drm_gem_prime_handle_to_fd(), it returns the struct dma_buf it * has created, without attaching it to any file descriptors. The difference * between those two is similar to that between anon_inode_getfile() and * anon_inode_getfd(); insertion into descriptor table is something you * can not revert if any cleanup is needed, so the descriptor-returning * variants should only be used when you are past the last failure exit * and the only thing left is passing the new file descriptor to userland. * When all you need is the object itself or when you need to do something * else that might fail, use that one instead. */ struct dma_buf *drm_gem_prime_handle_to_dmabuf(struct drm_device *dev, struct drm_file *file_priv, uint32_t handle, uint32_t flags) { struct drm_gem_object *obj; int ret = 0; struct dma_buf *dmabuf; mutex_lock(&file_priv->prime.lock); obj = drm_gem_object_lookup(file_priv, handle); if (!obj) { dmabuf = ERR_PTR(-ENOENT); goto out_unlock; } dmabuf = drm_prime_lookup_buf_by_handle(&file_priv->prime, handle); if (dmabuf) { get_dma_buf(dmabuf); goto out; } mutex_lock(&dev->object_name_lock); /* re-export the original imported object */ if (obj->import_attach) { dmabuf = obj->import_attach->dmabuf; get_dma_buf(dmabuf); goto out_have_obj; } if (obj->dma_buf) { get_dma_buf(obj->dma_buf); dmabuf = obj->dma_buf; goto out_have_obj; } dmabuf = export_and_register_object(dev, obj, flags); if (IS_ERR(dmabuf)) { /* normally the created dma-buf takes ownership of the ref, * but if that fails then drop the ref */ mutex_unlock(&dev->object_name_lock); goto out; } out_have_obj: /* * If we've exported this buffer then cheat and add it to the import list * so we get the correct handle back. We must do this under the * protection of dev->object_name_lock to ensure that a racing gem close * ioctl doesn't miss to remove this buffer handle from the cache. */ ret = drm_prime_add_buf_handle(&file_priv->prime, dmabuf, handle); mutex_unlock(&dev->object_name_lock); if (ret) { dma_buf_put(dmabuf); dmabuf = ERR_PTR(ret); } out: drm_gem_object_put(obj); out_unlock: mutex_unlock(&file_priv->prime.lock); return dmabuf; } EXPORT_SYMBOL(drm_gem_prime_handle_to_dmabuf); /** * drm_gem_prime_handle_to_fd - PRIME export function for GEM drivers * @dev: dev to export the buffer from * @file_priv: drm file-private structure * @handle: buffer handle to export * @flags: flags like DRM_CLOEXEC * @prime_fd: pointer to storage for the fd id of the create dma-buf * * This is the PRIME export function which must be used mandatorily by GEM * drivers to ensure correct lifetime management of the underlying GEM object. * The actual exporting from GEM object to a dma-buf is done through the * &drm_gem_object_funcs.export callback. */ int drm_gem_prime_handle_to_fd(struct drm_device *dev, struct drm_file *file_priv, uint32_t handle, uint32_t flags, int *prime_fd) { struct dma_buf *dmabuf; int fd = get_unused_fd_flags(flags); if (fd < 0) return fd; dmabuf = drm_gem_prime_handle_to_dmabuf(dev, file_priv, handle, flags); if (IS_ERR(dmabuf)) { put_unused_fd(fd); return PTR_ERR(dmabuf); } fd_install(fd, dmabuf->file); *prime_fd = fd; return 0; } EXPORT_SYMBOL(drm_gem_prime_handle_to_fd); int drm_prime_handle_to_fd_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_prime_handle *args = data; /* check flags are valid */ if (args->flags & ~(DRM_CLOEXEC | DRM_RDWR)) return -EINVAL; if (dev->driver->prime_handle_to_fd) { return dev->driver->prime_handle_to_fd(dev, file_priv, args->handle, args->flags, &args->fd); } return drm_gem_prime_handle_to_fd(dev, file_priv, args->handle, args->flags, &args->fd); } /** * DOC: PRIME Helpers * * Drivers can implement &drm_gem_object_funcs.export and * &drm_driver.gem_prime_import in terms of simpler APIs by using the helper * functions drm_gem_prime_export() and drm_gem_prime_import(). These functions * implement dma-buf support in terms of some lower-level helpers, which are * again exported for drivers to use individually: * * Exporting buffers * ~~~~~~~~~~~~~~~~~ * * Optional pinning of buffers is handled at dma-buf attach and detach time in * drm_gem_map_attach() and drm_gem_map_detach(). Backing storage itself is * handled by drm_gem_map_dma_buf() and drm_gem_unmap_dma_buf(), which relies on * &drm_gem_object_funcs.get_sg_table. If &drm_gem_object_funcs.get_sg_table is * unimplemented, exports into another device are rejected. * * For kernel-internal access there's drm_gem_dmabuf_vmap() and * drm_gem_dmabuf_vunmap(). Userspace mmap support is provided by * drm_gem_dmabuf_mmap(). * * Note that these export helpers can only be used if the underlying backing * storage is fully coherent and either permanently pinned, or it is safe to pin * it indefinitely. * * FIXME: The underlying helper functions are named rather inconsistently. * * Importing buffers * ~~~~~~~~~~~~~~~~~ * * Importing dma-bufs using drm_gem_prime_import() relies on * &drm_driver.gem_prime_import_sg_table. * * Note that similarly to the export helpers this permanently pins the * underlying backing storage. Which is ok for scanout, but is not the best * option for sharing lots of buffers for rendering. */ /** * drm_gem_map_attach - dma_buf attach implementation for GEM * @dma_buf: buffer to attach device to * @attach: buffer attachment data * * Calls &drm_gem_object_funcs.pin for device specific handling. This can be * used as the &dma_buf_ops.attach callback. Must be used together with * drm_gem_map_detach(). * * Returns 0 on success, negative error code on failure. */ int drm_gem_map_attach(struct dma_buf *dma_buf, struct dma_buf_attachment *attach) { struct drm_gem_object *obj = dma_buf->priv; /* * drm_gem_map_dma_buf() requires obj->get_sg_table(), but drivers * that implement their own ->map_dma_buf() do not. */ if (dma_buf->ops->map_dma_buf == drm_gem_map_dma_buf && !obj->funcs->get_sg_table) return -ENOSYS; return drm_gem_pin(obj); } EXPORT_SYMBOL(drm_gem_map_attach); /** * drm_gem_map_detach - dma_buf detach implementation for GEM * @dma_buf: buffer to detach from * @attach: attachment to be detached * * Calls &drm_gem_object_funcs.pin for device specific handling. Cleans up * &dma_buf_attachment from drm_gem_map_attach(). This can be used as the * &dma_buf_ops.detach callback. */ void drm_gem_map_detach(struct dma_buf *dma_buf, struct dma_buf_attachment *attach) { struct drm_gem_object *obj = dma_buf->priv; drm_gem_unpin(obj); } EXPORT_SYMBOL(drm_gem_map_detach); /** * drm_gem_map_dma_buf - map_dma_buf implementation for GEM * @attach: attachment whose scatterlist is to be returned * @dir: direction of DMA transfer * * Calls &drm_gem_object_funcs.get_sg_table and then maps the scatterlist. This * can be used as the &dma_buf_ops.map_dma_buf callback. Should be used together * with drm_gem_unmap_dma_buf(). * * Returns:sg_table containing the scatterlist to be returned; returns ERR_PTR * on error. May return -EINTR if it is interrupted by a signal. */ struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach, enum dma_data_direction dir) { struct drm_gem_object *obj = attach->dmabuf->priv; struct sg_table *sgt; int ret; if (WARN_ON(dir == DMA_NONE)) return ERR_PTR(-EINVAL); if (WARN_ON(!obj->funcs->get_sg_table)) return ERR_PTR(-ENOSYS); sgt = obj->funcs->get_sg_table(obj); if (IS_ERR(sgt)) return sgt; ret = dma_map_sgtable(attach->dev, sgt, dir, DMA_ATTR_SKIP_CPU_SYNC); if (ret) { sg_free_table(sgt); kfree(sgt); sgt = ERR_PTR(ret); } return sgt; } EXPORT_SYMBOL(drm_gem_map_dma_buf); /** * drm_gem_unmap_dma_buf - unmap_dma_buf implementation for GEM * @attach: attachment to unmap buffer from * @sgt: scatterlist info of the buffer to unmap * @dir: direction of DMA transfer * * This can be used as the &dma_buf_ops.unmap_dma_buf callback. */ void drm_gem_unmap_dma_buf(struct dma_buf_attachment *attach, struct sg_table *sgt, enum dma_data_direction dir) { if (!sgt) return; dma_unmap_sgtable(attach->dev, sgt, dir, DMA_ATTR_SKIP_CPU_SYNC); sg_free_table(sgt); kfree(sgt); } EXPORT_SYMBOL(drm_gem_unmap_dma_buf); /** * drm_gem_dmabuf_vmap - dma_buf vmap implementation for GEM * @dma_buf: buffer to be mapped * @map: the virtual address of the buffer * * Sets up a kernel virtual mapping. This can be used as the &dma_buf_ops.vmap * callback. Calls into &drm_gem_object_funcs.vmap for device specific handling. * The kernel virtual address is returned in map. * * Returns 0 on success or a negative errno code otherwise. */ int drm_gem_dmabuf_vmap(struct dma_buf *dma_buf, struct iosys_map *map) { struct drm_gem_object *obj = dma_buf->priv; return drm_gem_vmap(obj, map); } EXPORT_SYMBOL(drm_gem_dmabuf_vmap); /** * drm_gem_dmabuf_vunmap - dma_buf vunmap implementation for GEM * @dma_buf: buffer to be unmapped * @map: the virtual address of the buffer * * Releases a kernel virtual mapping. This can be used as the * &dma_buf_ops.vunmap callback. Calls into &drm_gem_object_funcs.vunmap for device specific handling. */ void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, struct iosys_map *map) { struct drm_gem_object *obj = dma_buf->priv; drm_gem_vunmap(obj, map); } EXPORT_SYMBOL(drm_gem_dmabuf_vunmap); /** * drm_gem_prime_mmap - PRIME mmap function for GEM drivers * @obj: GEM object * @vma: Virtual address range * * This function sets up a userspace mapping for PRIME exported buffers using * the same codepath that is used for regular GEM buffer mapping on the DRM fd. * The fake GEM offset is added to vma->vm_pgoff and &drm_driver->fops->mmap is * called to set up the mapping. */ int drm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma) { struct drm_file *priv; struct file *fil; int ret; /* Add the fake offset */ vma->vm_pgoff += drm_vma_node_start(&obj->vma_node); if (obj->funcs && obj->funcs->mmap) { vma->vm_ops = obj->funcs->vm_ops; drm_gem_object_get(obj); ret = obj->funcs->mmap(obj, vma); if (ret) { drm_gem_object_put(obj); return ret; } vma->vm_private_data = obj; return 0; } priv = kzalloc(sizeof(*priv), GFP_KERNEL); fil = kzalloc(sizeof(*fil), GFP_KERNEL); if (!priv || !fil) { ret = -ENOMEM; goto out; } /* Used by drm_gem_mmap() to lookup the GEM object */ priv->minor = obj->dev->primary; fil->private_data = priv; ret = drm_vma_node_allow(&obj->vma_node, priv); if (ret) goto out; ret = obj->dev->driver->fops->mmap(fil, vma); drm_vma_node_revoke(&obj->vma_node, priv); out: kfree(priv); kfree(fil); return ret; } EXPORT_SYMBOL(drm_gem_prime_mmap); /** * drm_gem_dmabuf_mmap - dma_buf mmap implementation for GEM * @dma_buf: buffer to be mapped * @vma: virtual address range * * Provides memory mapping for the buffer. This can be used as the * &dma_buf_ops.mmap callback. It just forwards to drm_gem_prime_mmap(). * * Returns 0 on success or a negative error code on failure. */ int drm_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma) { struct drm_gem_object *obj = dma_buf->priv; return drm_gem_prime_mmap(obj, vma); } EXPORT_SYMBOL(drm_gem_dmabuf_mmap); static const struct dma_buf_ops drm_gem_prime_dmabuf_ops = { .cache_sgt_mapping = true, .attach = drm_gem_map_attach, .detach = drm_gem_map_detach, .map_dma_buf = drm_gem_map_dma_buf, .unmap_dma_buf = drm_gem_unmap_dma_buf, .release = drm_gem_dmabuf_release, .mmap = drm_gem_dmabuf_mmap, .vmap = drm_gem_dmabuf_vmap, .vunmap = drm_gem_dmabuf_vunmap, }; /** * drm_prime_pages_to_sg - converts a page array into an sg list * @dev: DRM device * @pages: pointer to the array of page pointers to convert * @nr_pages: length of the page vector * * This helper creates an sg table object from a set of pages * the driver is responsible for mapping the pages into the * importers address space for use with dma_buf itself. * * This is useful for implementing &drm_gem_object_funcs.get_sg_table. */ struct sg_table *drm_prime_pages_to_sg(struct drm_device *dev, struct page **pages, unsigned int nr_pages) { struct sg_table *sg; size_t max_segment = 0; int err; sg = kmalloc(sizeof(struct sg_table), GFP_KERNEL); if (!sg) return ERR_PTR(-ENOMEM); if (dev) max_segment = dma_max_mapping_size(dev->dev); if (max_segment == 0) max_segment = UINT_MAX; err = sg_alloc_table_from_pages_segment(sg, pages, nr_pages, 0, (unsigned long)nr_pages << PAGE_SHIFT, max_segment, GFP_KERNEL); if (err) { kfree(sg); sg = ERR_PTR(err); } return sg; } EXPORT_SYMBOL(drm_prime_pages_to_sg); /** * drm_prime_get_contiguous_size - returns the contiguous size of the buffer * @sgt: sg_table describing the buffer to check * * This helper calculates the contiguous size in the DMA address space * of the buffer described by the provided sg_table. * * This is useful for implementing * &drm_gem_object_funcs.gem_prime_import_sg_table. */ unsigned long drm_prime_get_contiguous_size(struct sg_table *sgt) { dma_addr_t expected = sg_dma_address(sgt->sgl); struct scatterlist *sg; unsigned long size = 0; int i; for_each_sgtable_dma_sg(sgt, sg, i) { unsigned int len = sg_dma_len(sg); if (!len) break; if (sg_dma_address(sg) != expected) break; expected += len; size += len; } return size; } EXPORT_SYMBOL(drm_prime_get_contiguous_size); /** * drm_gem_prime_export - helper library implementation of the export callback * @obj: GEM object to export * @flags: flags like DRM_CLOEXEC and DRM_RDWR * * This is the implementation of the &drm_gem_object_funcs.export functions for GEM drivers * using the PRIME helpers. It is used as the default in * drm_gem_prime_handle_to_fd(). */ struct dma_buf *drm_gem_prime_export(struct drm_gem_object *obj, int flags) { struct drm_device *dev = obj->dev; struct dma_buf_export_info exp_info = { .exp_name = KBUILD_MODNAME, /* white lie for debug */ .owner = dev->driver->fops->owner, .ops = &drm_gem_prime_dmabuf_ops, .size = obj->size, .flags = flags, .priv = obj, .resv = obj->resv, }; return drm_gem_dmabuf_export(dev, &exp_info); } EXPORT_SYMBOL(drm_gem_prime_export); /** * drm_gem_prime_import_dev - core implementation of the import callback * @dev: drm_device to import into * @dma_buf: dma-buf object to import * @attach_dev: struct device to dma_buf attach * * This is the core of drm_gem_prime_import(). It's designed to be called by * drivers who want to use a different device structure than &drm_device.dev for * attaching via dma_buf. This function calls * &drm_driver.gem_prime_import_sg_table internally. * * Drivers must arrange to call drm_prime_gem_destroy() from their * &drm_gem_object_funcs.free hook when using this function. */ struct drm_gem_object *drm_gem_prime_import_dev(struct drm_device *dev, struct dma_buf *dma_buf, struct device *attach_dev) { struct dma_buf_attachment *attach; struct sg_table *sgt; struct drm_gem_object *obj; int ret; if (dma_buf->ops == &drm_gem_prime_dmabuf_ops) { obj = dma_buf->priv; if (obj->dev == dev) { /* * Importing dmabuf exported from our own gem increases * refcount on gem itself instead of f_count of dmabuf. */ drm_gem_object_get(obj); return obj; } } if (!dev->driver->gem_prime_import_sg_table) return ERR_PTR(-EINVAL); attach = dma_buf_attach(dma_buf, attach_dev); if (IS_ERR(attach)) return ERR_CAST(attach); get_dma_buf(dma_buf); sgt = dma_buf_map_attachment_unlocked(attach, DMA_BIDIRECTIONAL); if (IS_ERR(sgt)) { ret = PTR_ERR(sgt); goto fail_detach; } obj = dev->driver->gem_prime_import_sg_table(dev, attach, sgt); if (IS_ERR(obj)) { ret = PTR_ERR(obj); goto fail_unmap; } obj->import_attach = attach; obj->resv = dma_buf->resv; return obj; fail_unmap: dma_buf_unmap_attachment_unlocked(attach, sgt, DMA_BIDIRECTIONAL); fail_detach: dma_buf_detach(dma_buf, attach); dma_buf_put(dma_buf); return ERR_PTR(ret); } EXPORT_SYMBOL(drm_gem_prime_import_dev); /** * drm_gem_prime_import - helper library implementation of the import callback * @dev: drm_device to import into * @dma_buf: dma-buf object to import * * This is the implementation of the gem_prime_import functions for GEM drivers * using the PRIME helpers. Drivers can use this as their * &drm_driver.gem_prime_import implementation. It is used as the default * implementation in drm_gem_prime_fd_to_handle(). * * Drivers must arrange to call drm_prime_gem_destroy() from their * &drm_gem_object_funcs.free hook when using this function. */ struct drm_gem_object *drm_gem_prime_import(struct drm_device *dev, struct dma_buf *dma_buf) { return drm_gem_prime_import_dev(dev, dma_buf, dev->dev); } EXPORT_SYMBOL(drm_gem_prime_import); /** * drm_prime_sg_to_page_array - convert an sg table into a page array * @sgt: scatter-gather table to convert * @pages: array of page pointers to store the pages in * @max_entries: size of the passed-in array * * Exports an sg table into an array of pages. * * This function is deprecated and strongly discouraged to be used. * The page array is only useful for page faults and those can corrupt fields * in the struct page if they are not handled by the exporting driver. */ int __deprecated drm_prime_sg_to_page_array(struct sg_table *sgt, struct page **pages, int max_entries) { struct sg_page_iter page_iter; struct page **p = pages; for_each_sgtable_page(sgt, &page_iter, 0) { if (WARN_ON(p - pages >= max_entries)) return -1; *p++ = sg_page_iter_page(&page_iter); } return 0; } EXPORT_SYMBOL(drm_prime_sg_to_page_array); /** * drm_prime_sg_to_dma_addr_array - convert an sg table into a dma addr array * @sgt: scatter-gather table to convert * @addrs: array to store the dma bus address of each page * @max_entries: size of both the passed-in arrays * * Exports an sg table into an array of addresses. * * Drivers should use this in their &drm_driver.gem_prime_import_sg_table * implementation. */ int drm_prime_sg_to_dma_addr_array(struct sg_table *sgt, dma_addr_t *addrs, int max_entries) { struct sg_dma_page_iter dma_iter; dma_addr_t *a = addrs; for_each_sgtable_dma_page(sgt, &dma_iter, 0) { if (WARN_ON(a - addrs >= max_entries)) return -1; *a++ = sg_page_iter_dma_address(&dma_iter); } return 0; } EXPORT_SYMBOL(drm_prime_sg_to_dma_addr_array); /** * drm_prime_gem_destroy - helper to clean up a PRIME-imported GEM object * @obj: GEM object which was created from a dma-buf * @sg: the sg-table which was pinned at import time * * This is the cleanup functions which GEM drivers need to call when they use * drm_gem_prime_import() or drm_gem_prime_import_dev() to import dma-bufs. */ void drm_prime_gem_destroy(struct drm_gem_object *obj, struct sg_table *sg) { struct dma_buf_attachment *attach; struct dma_buf *dma_buf; attach = obj->import_attach; if (sg) dma_buf_unmap_attachment_unlocked(attach, sg, DMA_BIDIRECTIONAL); dma_buf = attach->dmabuf; dma_buf_detach(attach->dmabuf, attach); /* remove the reference */ dma_buf_put(dma_buf); } EXPORT_SYMBOL(drm_prime_gem_destroy);
3 3 3 3 7 7 7 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 // SPDX-License-Identifier: GPL-2.0-only /* * linux/net/sunrpc/clnt.c * * This file contains the high-level RPC interface. * It is modeled as a finite state machine to support both synchronous * and asynchronous requests. * * - RPC header generation and argument serialization. * - Credential refresh. * - TCP connect handling. * - Retry of operation when it is suspected the operation failed because * of uid squashing on the server, or when the credentials were stale * and need to be refreshed, or when a packet was damaged in transit. * This may be have to be moved to the VFS layer. * * Copyright (C) 1992,1993 Rick Sladkey <jrs@world.std.com> * Copyright (C) 1995,1996 Olaf Kirch <okir@monad.swb.de> */ #include <linux/module.h> #include <linux/types.h> #include <linux/kallsyms.h> #include <linux/mm.h> #include <linux/namei.h> #include <linux/mount.h> #include <linux/slab.h> #include <linux/rcupdate.h> #include <linux/utsname.h> #include <linux/workqueue.h> #include <linux/in.h> #include <linux/in6.h> #include <linux/un.h> #include <linux/sunrpc/clnt.h> #include <linux/sunrpc/addr.h> #include <linux/sunrpc/rpc_pipe_fs.h> #include <linux/sunrpc/metrics.h> #include <linux/sunrpc/bc_xprt.h> #include <trace/events/sunrpc.h> #include "sunrpc.h" #include "sysfs.h" #include "netns.h" #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) # define RPCDBG_FACILITY RPCDBG_CALL #endif static DECLARE_WAIT_QUEUE_HEAD(destroy_wait); static void call_start(struct rpc_task *task); static void call_reserve(struct rpc_task *task); static void call_reserveresult(struct rpc_task *task); static void call_allocate(struct rpc_task *task); static void call_encode(struct rpc_task *task); static void call_decode(struct rpc_task *task); static void call_bind(struct rpc_task *task); static void call_bind_status(struct rpc_task *task); static void call_transmit(struct rpc_task *task); static void call_status(struct rpc_task *task); static void call_transmit_status(struct rpc_task *task); static void call_refresh(struct rpc_task *task); static void call_refreshresult(struct rpc_task *task); static void call_connect(struct rpc_task *task); static void call_connect_status(struct rpc_task *task); static int rpc_encode_header(struct rpc_task *task, struct xdr_stream *xdr); static int rpc_decode_header(struct rpc_task *task, struct xdr_stream *xdr); static int rpc_ping(struct rpc_clnt *clnt); static int rpc_ping_noreply(struct rpc_clnt *clnt); static void rpc_check_timeout(struct rpc_task *task); static void rpc_register_client(struct rpc_clnt *clnt) { struct net *net = rpc_net_ns(clnt); struct sunrpc_net *sn = net_generic(net, sunrpc_net_id); spin_lock(&sn->rpc_client_lock); list_add(&clnt->cl_clients, &sn->all_clients); spin_unlock(&sn->rpc_client_lock); } static void rpc_unregister_client(struct rpc_clnt *clnt) { struct net *net = rpc_net_ns(clnt); struct sunrpc_net *sn = net_generic(net, sunrpc_net_id); spin_lock(&sn->rpc_client_lock); list_del(&clnt->cl_clients); spin_unlock(&sn->rpc_client_lock); } static void __rpc_clnt_remove_pipedir(struct rpc_clnt *clnt) { rpc_remove_client_dir(clnt); } static void rpc_clnt_remove_pipedir(struct rpc_clnt *clnt) { struct net *net = rpc_net_ns(clnt); struct super_block *pipefs_sb; pipefs_sb = rpc_get_sb_net(net); if (pipefs_sb) { if (pipefs_sb == clnt->pipefs_sb) __rpc_clnt_remove_pipedir(clnt); rpc_put_sb_net(net); } } static struct dentry *rpc_setup_pipedir_sb(struct super_block *sb, struct rpc_clnt *clnt) { static uint32_t clntid; const char *dir_name = clnt->cl_program->pipe_dir_name; char name[15]; struct dentry *dir, *dentry; dir = rpc_d_lookup_sb(sb, dir_name); if (dir == NULL) { pr_info("RPC: pipefs directory doesn't exist: %s\n", dir_name); return dir; } for (;;) { snprintf(name, sizeof(name), "clnt%x", (unsigned int)clntid++); name[sizeof(name) - 1] = '\0'; dentry = rpc_create_client_dir(dir, name, clnt); if (!IS_ERR(dentry)) break; if (dentry == ERR_PTR(-EEXIST)) continue; printk(KERN_INFO "RPC: Couldn't create pipefs entry" " %s/%s, error %ld\n", dir_name, name, PTR_ERR(dentry)); break; } dput(dir); return dentry; } static int rpc_setup_pipedir(struct super_block *pipefs_sb, struct rpc_clnt *clnt) { struct dentry *dentry; clnt->pipefs_sb = pipefs_sb; if (clnt->cl_program->pipe_dir_name != NULL) { dentry = rpc_setup_pipedir_sb(pipefs_sb, clnt); if (IS_ERR(dentry)) return PTR_ERR(dentry); } return 0; } static int rpc_clnt_skip_event(struct rpc_clnt *clnt, unsigned long event) { if (clnt->cl_program->pipe_dir_name == NULL) return 1; switch (event) { case RPC_PIPEFS_MOUNT: if (clnt->cl_pipedir_objects.pdh_dentry != NULL) return 1; if (refcount_read(&clnt->cl_count) == 0) return 1; break; case RPC_PIPEFS_UMOUNT: if (clnt->cl_pipedir_objects.pdh_dentry == NULL) return 1; break; } return 0; } static int __rpc_clnt_handle_event(struct rpc_clnt *clnt, unsigned long event, struct super_block *sb) { struct dentry *dentry; switch (event) { case RPC_PIPEFS_MOUNT: dentry = rpc_setup_pipedir_sb(sb, clnt); if (!dentry) return -ENOENT; if (IS_ERR(dentry)) return PTR_ERR(dentry); break; case RPC_PIPEFS_UMOUNT: __rpc_clnt_remove_pipedir(clnt); break; default: printk(KERN_ERR "%s: unknown event: %ld\n", __func__, event); return -ENOTSUPP; } return 0; } static int __rpc_pipefs_event(struct rpc_clnt *clnt, unsigned long event, struct super_block *sb) { int error = 0; for (;; clnt = clnt->cl_parent) { if (!rpc_clnt_skip_event(clnt, event)) error = __rpc_clnt_handle_event(clnt, event, sb); if (error || clnt == clnt->cl_parent) break; } return error; } static struct rpc_clnt *rpc_get_client_for_event(struct net *net, int event) { struct sunrpc_net *sn = net_generic(net, sunrpc_net_id); struct rpc_clnt *clnt; spin_lock(&sn->rpc_client_lock); list_for_each_entry(clnt, &sn->all_clients, cl_clients) { if (rpc_clnt_skip_event(clnt, event)) continue; spin_unlock(&sn->rpc_client_lock); return clnt; } spin_unlock(&sn->rpc_client_lock); return NULL; } static int rpc_pipefs_event(struct notifier_block *nb, unsigned long event, void *ptr) { struct super_block *sb = ptr; struct rpc_clnt *clnt; int error = 0; while ((clnt = rpc_get_client_for_event(sb->s_fs_info, event))) { error = __rpc_pipefs_event(clnt, event, sb); if (error) break; } return error; } static struct notifier_block rpc_clients_block = { .notifier_call = rpc_pipefs_event, .priority = SUNRPC_PIPEFS_RPC_PRIO, }; int rpc_clients_notifier_register(void) { return rpc_pipefs_notifier_register(&rpc_clients_block); } void rpc_clients_notifier_unregister(void) { return rpc_pipefs_notifier_unregister(&rpc_clients_block); } static struct rpc_xprt *rpc_clnt_set_transport(struct rpc_clnt *clnt, struct rpc_xprt *xprt, const struct rpc_timeout *timeout) { struct rpc_xprt *old; spin_lock(&clnt->cl_lock); old = rcu_dereference_protected(clnt->cl_xprt, lockdep_is_held(&clnt->cl_lock)); if (!xprt_bound(xprt)) clnt->cl_autobind = 1; clnt->cl_timeout = timeout; rcu_assign_pointer(clnt->cl_xprt, xprt); spin_unlock(&clnt->cl_lock); return old; } static void rpc_clnt_set_nodename(struct rpc_clnt *clnt, const char *nodename) { ssize_t copied; copied = strscpy(clnt->cl_nodename, nodename, sizeof(clnt->cl_nodename)); clnt->cl_nodelen = copied < 0 ? sizeof(clnt->cl_nodename) - 1 : copied; } static int rpc_client_register(struct rpc_clnt *clnt, rpc_authflavor_t pseudoflavor, const char *client_name) { struct rpc_auth_create_args auth_args = { .pseudoflavor = pseudoflavor, .target_name = client_name, }; struct rpc_auth *auth; struct net *net = rpc_net_ns(clnt); struct super_block *pipefs_sb; int err; rpc_clnt_debugfs_register(clnt); pipefs_sb = rpc_get_sb_net(net); if (pipefs_sb) { err = rpc_setup_pipedir(pipefs_sb, clnt); if (err) goto out; } rpc_register_client(clnt); if (pipefs_sb) rpc_put_sb_net(net); auth = rpcauth_create(&auth_args, clnt); if (IS_ERR(auth)) { dprintk("RPC: Couldn't create auth handle (flavor %u)\n", pseudoflavor); err = PTR_ERR(auth); goto err_auth; } return 0; err_auth: pipefs_sb = rpc_get_sb_net(net); rpc_unregister_client(clnt); __rpc_clnt_remove_pipedir(clnt); out: if (pipefs_sb) rpc_put_sb_net(net); rpc_sysfs_client_destroy(clnt); rpc_clnt_debugfs_unregister(clnt); return err; } static DEFINE_IDA(rpc_clids); void rpc_cleanup_clids(void) { ida_destroy(&rpc_clids); } static int rpc_alloc_clid(struct rpc_clnt *clnt) { int clid; clid = ida_alloc(&rpc_clids, GFP_KERNEL); if (clid < 0) return clid; clnt->cl_clid = clid; return 0; } static void rpc_free_clid(struct rpc_clnt *clnt) { ida_free(&rpc_clids, clnt->cl_clid); } static struct rpc_clnt * rpc_new_client(const struct rpc_create_args *args, struct rpc_xprt_switch *xps, struct rpc_xprt *xprt, struct rpc_clnt *parent) { const struct rpc_program *program = args->program; const struct rpc_version *version; struct rpc_clnt *clnt = NULL; const struct rpc_timeout *timeout; const char *nodename = args->nodename; int err; err = rpciod_up(); if (err) goto out_no_rpciod; err = -EINVAL; if (args->version >= program->nrvers) goto out_err; version = program->version[args->version]; if (version == NULL) goto out_err; err = -ENOMEM; clnt = kzalloc(sizeof(*clnt), GFP_KERNEL); if (!clnt) goto out_err; clnt->cl_parent = parent ? : clnt; clnt->cl_xprtsec = args->xprtsec; err = rpc_alloc_clid(clnt); if (err) goto out_no_clid; clnt->cl_cred = get_cred(args->cred); clnt->cl_procinfo = version->procs; clnt->cl_maxproc = version->nrprocs; clnt->cl_prog = args->prognumber ? : program->number; clnt->cl_vers = version->number; clnt->cl_stats = args->stats ? : program->stats; clnt->cl_metrics = rpc_alloc_iostats(clnt); rpc_init_pipe_dir_head(&clnt->cl_pipedir_objects); err = -ENOMEM; if (clnt->cl_metrics == NULL) goto out_no_stats; clnt->cl_program = program; INIT_LIST_HEAD(&clnt->cl_tasks); spin_lock_init(&clnt->cl_lock); timeout = xprt->timeout; if (args->timeout != NULL) { memcpy(&clnt->cl_timeout_default, args->timeout, sizeof(clnt->cl_timeout_default)); timeout = &clnt->cl_timeout_default; } rpc_clnt_set_transport(clnt, xprt, timeout); xprt->main = true; xprt_iter_init(&clnt->cl_xpi, xps); xprt_switch_put(xps); clnt->cl_rtt = &clnt->cl_rtt_default; rpc_init_rtt(&clnt->cl_rtt_default, clnt->cl_timeout->to_initval); refcount_set(&clnt->cl_count, 1); if (nodename == NULL) nodename = utsname()->nodename; /* save the nodename */ rpc_clnt_set_nodename(clnt, nodename); rpc_sysfs_client_setup(clnt, xps, rpc_net_ns(clnt)); err = rpc_client_register(clnt, args->authflavor, args->client_name); if (err) goto out_no_path; if (parent) refcount_inc(&parent->cl_count); trace_rpc_clnt_new(clnt, xprt, args); return clnt; out_no_path: rpc_free_iostats(clnt->cl_metrics); out_no_stats: put_cred(clnt->cl_cred); rpc_free_clid(clnt); out_no_clid: kfree(clnt); out_err: rpciod_down(); out_no_rpciod: xprt_switch_put(xps); xprt_put(xprt); trace_rpc_clnt_new_err(program->name, args->servername, err); return ERR_PTR(err); } static struct rpc_clnt *rpc_create_xprt(struct rpc_create_args *args, struct rpc_xprt *xprt) { struct rpc_clnt *clnt = NULL; struct rpc_xprt_switch *xps; if (args->bc_xprt && args->bc_xprt->xpt_bc_xps) { WARN_ON_ONCE(!(args->protocol & XPRT_TRANSPORT_BC)); xps = args->bc_xprt->xpt_bc_xps; xprt_switch_get(xps); } else { xps = xprt_switch_alloc(xprt, GFP_KERNEL); if (xps == NULL) { xprt_put(xprt); return ERR_PTR(-ENOMEM); } if (xprt->bc_xprt) { xprt_switch_get(xps); xprt->bc_xprt->xpt_bc_xps = xps; } } clnt = rpc_new_client(args, xps, xprt, NULL); if (IS_ERR(clnt)) return clnt; if (!(args->flags & RPC_CLNT_CREATE_NOPING)) { int err = rpc_ping(clnt); if (err != 0) { rpc_shutdown_client(clnt); return ERR_PTR(err); } } else if (args->flags & RPC_CLNT_CREATE_CONNECTED) { int err = rpc_ping_noreply(clnt); if (err != 0) { rpc_shutdown_client(clnt); return ERR_PTR(err); } } clnt->cl_softrtry = 1; if (args->flags & (RPC_CLNT_CREATE_HARDRTRY|RPC_CLNT_CREATE_SOFTERR)) { clnt->cl_softrtry = 0; if (args->flags & RPC_CLNT_CREATE_SOFTERR) clnt->cl_softerr = 1; } if (args->flags & RPC_CLNT_CREATE_AUTOBIND) clnt->cl_autobind = 1; if (args->flags & RPC_CLNT_CREATE_NO_RETRANS_TIMEOUT) clnt->cl_noretranstimeo = 1; if (args->flags & RPC_CLNT_CREATE_DISCRTRY) clnt->cl_discrtry = 1; if (!(args->flags & RPC_CLNT_CREATE_QUIET)) clnt->cl_chatty = 1; return clnt; } /** * rpc_create - create an RPC client and transport with one call * @args: rpc_clnt create argument structure * * Creates and initializes an RPC transport and an RPC client. * * It can ping the server in order to determine if it is up, and to see if * it supports this program and version. RPC_CLNT_CREATE_NOPING disables * this behavior so asynchronous tasks can also use rpc_create. */ struct rpc_clnt *rpc_create(struct rpc_create_args *args) { struct rpc_xprt *xprt; struct xprt_create xprtargs = { .net = args->net, .ident = args->protocol, .srcaddr = args->saddress, .dstaddr = args->address, .addrlen = args->addrsize, .servername = args->servername, .bc_xprt = args->bc_xprt, .xprtsec = args->xprtsec, .connect_timeout = args->connect_timeout, .reconnect_timeout = args->reconnect_timeout, }; char servername[RPC_MAXNETNAMELEN]; struct rpc_clnt *clnt; int i; if (args->bc_xprt) { WARN_ON_ONCE(!(args->protocol & XPRT_TRANSPORT_BC)); xprt = args->bc_xprt->xpt_bc_xprt; if (xprt) { xprt_get(xprt); return rpc_create_xprt(args, xprt); } } if (args->flags & RPC_CLNT_CREATE_INFINITE_SLOTS) xprtargs.flags |= XPRT_CREATE_INFINITE_SLOTS; if (args->flags & RPC_CLNT_CREATE_NO_IDLE_TIMEOUT) xprtargs.flags |= XPRT_CREATE_NO_IDLE_TIMEOUT; /* * If the caller chooses not to specify a hostname, whip * up a string representation of the passed-in address. */ if (xprtargs.servername == NULL) { struct sockaddr_un *sun = (struct sockaddr_un *)args->address; struct sockaddr_in *sin = (struct sockaddr_in *)args->address; struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)args->address; servername[0] = '\0'; switch (args->address->sa_family) { case AF_LOCAL: if (sun->sun_path[0]) snprintf(servername, sizeof(servername), "%s", sun->sun_path); else snprintf(servername, sizeof(servername), "@%s", sun->sun_path+1); break; case AF_INET: snprintf(servername, sizeof(servername), "%pI4", &sin->sin_addr.s_addr); break; case AF_INET6: snprintf(servername, sizeof(servername), "%pI6", &sin6->sin6_addr); break; default: /* caller wants default server name, but * address family isn't recognized. */ return ERR_PTR(-EINVAL); } xprtargs.servername = servername; } xprt = xprt_create_transport(&xprtargs); if (IS_ERR(xprt)) return (struct rpc_clnt *)xprt; /* * By default, kernel RPC client connects from a reserved port. * CAP_NET_BIND_SERVICE will not be set for unprivileged requesters, * but it is always enabled for rpciod, which handles the connect * operation. */ xprt->resvport = 1; if (args->flags & RPC_CLNT_CREATE_NONPRIVPORT) xprt->resvport = 0; xprt->reuseport = 0; if (args->flags & RPC_CLNT_CREATE_REUSEPORT) xprt->reuseport = 1; clnt = rpc_create_xprt(args, xprt); if (IS_ERR(clnt) || args->nconnect <= 1) return clnt; for (i = 0; i < args->nconnect - 1; i++) { if (rpc_clnt_add_xprt(clnt, &xprtargs, NULL, NULL) < 0) break; } return clnt; } EXPORT_SYMBOL_GPL(rpc_create); /* * This function clones the RPC client structure. It allows us to share the * same transport while varying parameters such as the authentication * flavour. */ static struct rpc_clnt *__rpc_clone_client(struct rpc_create_args *args, struct rpc_clnt *clnt) { struct rpc_xprt_switch *xps; struct rpc_xprt *xprt; struct rpc_clnt *new; int err; err = -ENOMEM; rcu_read_lock(); xprt = xprt_get(rcu_dereference(clnt->cl_xprt)); xps = xprt_switch_get(rcu_dereference(clnt->cl_xpi.xpi_xpswitch)); rcu_read_unlock(); if (xprt == NULL || xps == NULL) { xprt_put(xprt); xprt_switch_put(xps); goto out_err; } args->servername = xprt->servername; args->nodename = clnt->cl_nodename; new = rpc_new_client(args, xps, xprt, clnt); if (IS_ERR(new)) return new; /* Turn off autobind on clones */ new->cl_autobind = 0; new->cl_softrtry = clnt->cl_softrtry; new->cl_softerr = clnt->cl_softerr; new->cl_noretranstimeo = clnt->cl_noretranstimeo; new->cl_discrtry = clnt->cl_discrtry; new->cl_chatty = clnt->cl_chatty; new->cl_principal = clnt->cl_principal; new->cl_max_connect = clnt->cl_max_connect; return new; out_err: trace_rpc_clnt_clone_err(clnt, err); return ERR_PTR(err); } /** * rpc_clone_client - Clone an RPC client structure * * @clnt: RPC client whose parameters are copied * * Returns a fresh RPC client or an ERR_PTR. */ struct rpc_clnt *rpc_clone_client(struct rpc_clnt *clnt) { struct rpc_create_args args = { .program = clnt->cl_program, .prognumber = clnt->cl_prog, .version = clnt->cl_vers, .authflavor = clnt->cl_auth->au_flavor, .cred = clnt->cl_cred, .stats = clnt->cl_stats, }; return __rpc_clone_client(&args, clnt); } EXPORT_SYMBOL_GPL(rpc_clone_client); /** * rpc_clone_client_set_auth - Clone an RPC client structure and set its auth * * @clnt: RPC client whose parameters are copied * @flavor: security flavor for new client * * Returns a fresh RPC client or an ERR_PTR. */ struct rpc_clnt * rpc_clone_client_set_auth(struct rpc_clnt *clnt, rpc_authflavor_t flavor) { struct rpc_create_args args = { .program = clnt->cl_program, .prognumber = clnt->cl_prog, .version = clnt->cl_vers, .authflavor = flavor, .cred = clnt->cl_cred, .stats = clnt->cl_stats, }; return __rpc_clone_client(&args, clnt); } EXPORT_SYMBOL_GPL(rpc_clone_client_set_auth); /** * rpc_switch_client_transport: switch the RPC transport on the fly * @clnt: pointer to a struct rpc_clnt * @args: pointer to the new transport arguments * @timeout: pointer to the new timeout parameters * * This function allows the caller to switch the RPC transport for the * rpc_clnt structure 'clnt' to allow it to connect to a mirrored NFS * server, for instance. It assumes that the caller has ensured that * there are no active RPC tasks by using some form of locking. * * Returns zero if "clnt" is now using the new xprt. Otherwise a * negative errno is returned, and "clnt" continues to use the old * xprt. */ int rpc_switch_client_transport(struct rpc_clnt *clnt, struct xprt_create *args, const struct rpc_timeout *timeout) { const struct rpc_timeout *old_timeo; rpc_authflavor_t pseudoflavor; struct rpc_xprt_switch *xps, *oldxps; struct rpc_xprt *xprt, *old; struct rpc_clnt *parent; int err; args->xprtsec = clnt->cl_xprtsec; xprt = xprt_create_transport(args); if (IS_ERR(xprt)) return PTR_ERR(xprt); xps = xprt_switch_alloc(xprt, GFP_KERNEL); if (xps == NULL) { xprt_put(xprt); return -ENOMEM; } pseudoflavor = clnt->cl_auth->au_flavor; old_timeo = clnt->cl_timeout; old = rpc_clnt_set_transport(clnt, xprt, timeout); oldxps = xprt_iter_xchg_switch(&clnt->cl_xpi, xps); rpc_unregister_client(clnt); __rpc_clnt_remove_pipedir(clnt); rpc_sysfs_client_destroy(clnt); rpc_clnt_debugfs_unregister(clnt); /* * A new transport was created. "clnt" therefore * becomes the root of a new cl_parent tree. clnt's * children, if it has any, still point to the old xprt. */ parent = clnt->cl_parent; clnt->cl_parent = clnt; /* * The old rpc_auth cache cannot be re-used. GSS * contexts in particular are between a single * client and server. */ err = rpc_client_register(clnt, pseudoflavor, NULL); if (err) goto out_revert; synchronize_rcu(); if (parent != clnt) rpc_release_client(parent); xprt_switch_put(oldxps); xprt_put(old); trace_rpc_clnt_replace_xprt(clnt); return 0; out_revert: xps = xprt_iter_xchg_switch(&clnt->cl_xpi, oldxps); rpc_clnt_set_transport(clnt, old, old_timeo); clnt->cl_parent = parent; rpc_client_register(clnt, pseudoflavor, NULL); xprt_switch_put(xps); xprt_put(xprt); trace_rpc_clnt_replace_xprt_err(clnt); return err; } EXPORT_SYMBOL_GPL(rpc_switch_client_transport); static struct rpc_xprt_switch *rpc_clnt_xprt_switch_get(struct rpc_clnt *clnt) { struct rpc_xprt_switch *xps; rcu_read_lock(); xps = xprt_switch_get(rcu_dereference(clnt->cl_xpi.xpi_xpswitch)); rcu_read_unlock(); return xps; } static int _rpc_clnt_xprt_iter_init(struct rpc_clnt *clnt, struct rpc_xprt_iter *xpi, void func(struct rpc_xprt_iter *xpi, struct rpc_xprt_switch *xps)) { struct rpc_xprt_switch *xps; xps = rpc_clnt_xprt_switch_get(clnt); if (xps == NULL) return -EAGAIN; func(xpi, xps); xprt_switch_put(xps); return 0; } static int rpc_clnt_xprt_iter_init(struct rpc_clnt *clnt, struct rpc_xprt_iter *xpi) { return _rpc_clnt_xprt_iter_init(clnt, xpi, xprt_iter_init_listall); } static int rpc_clnt_xprt_iter_offline_init(struct rpc_clnt *clnt, struct rpc_xprt_iter *xpi) { return _rpc_clnt_xprt_iter_init(clnt, xpi, xprt_iter_init_listoffline); } /** * rpc_clnt_iterate_for_each_xprt - Apply a function to all transports * @clnt: pointer to client * @fn: function to apply * @data: void pointer to function data * * Iterates through the list of RPC transports currently attached to the * client and applies the function fn(clnt, xprt, data). * * On error, the iteration stops, and the function returns the error value. */ int rpc_clnt_iterate_for_each_xprt(struct rpc_clnt *clnt, int (*fn)(struct rpc_clnt *, struct rpc_xprt *, void *), void *data) { struct rpc_xprt_iter xpi; int ret; ret = rpc_clnt_xprt_iter_init(clnt, &xpi); if (ret) return ret; for (;;) { struct rpc_xprt *xprt = xprt_iter_get_next(&xpi); if (!xprt) break; ret = fn(clnt, xprt, data); xprt_put(xprt); if (ret < 0) break; } xprt_iter_destroy(&xpi); return ret; } EXPORT_SYMBOL_GPL(rpc_clnt_iterate_for_each_xprt); /* * Kill all tasks for the given client. * XXX: kill their descendants as well? */ void rpc_killall_tasks(struct rpc_clnt *clnt) { struct rpc_task *rovr; if (list_empty(&clnt->cl_tasks)) return; /* * Spin lock all_tasks to prevent changes... */ trace_rpc_clnt_killall(clnt); spin_lock(&clnt->cl_lock); list_for_each_entry(rovr, &clnt->cl_tasks, tk_task) rpc_signal_task(rovr); spin_unlock(&clnt->cl_lock); } EXPORT_SYMBOL_GPL(rpc_killall_tasks); /** * rpc_cancel_tasks - try to cancel a set of RPC tasks * @clnt: Pointer to RPC client * @error: RPC task error value to set * @fnmatch: Pointer to selector function * @data: User data * * Uses @fnmatch to define a set of RPC tasks that are to be cancelled. * The argument @error must be a negative error value. */ unsigned long rpc_cancel_tasks(struct rpc_clnt *clnt, int error, bool (*fnmatch)(const struct rpc_task *, const void *), const void *data) { struct rpc_task *task; unsigned long count = 0; if (list_empty(&clnt->cl_tasks)) return 0; /* * Spin lock all_tasks to prevent changes... */ spin_lock(&clnt->cl_lock); list_for_each_entry(task, &clnt->cl_tasks, tk_task) { if (!RPC_IS_ACTIVATED(task)) continue; if (!fnmatch(task, data)) continue; rpc_task_try_cancel(task, error); count++; } spin_unlock(&clnt->cl_lock); return count; } EXPORT_SYMBOL_GPL(rpc_cancel_tasks); static int rpc_clnt_disconnect_xprt(struct rpc_clnt *clnt, struct rpc_xprt *xprt, void *dummy) { if (xprt_connected(xprt)) xprt_force_disconnect(xprt); return 0; } void rpc_clnt_disconnect(struct rpc_clnt *clnt) { rpc_clnt_iterate_for_each_xprt(clnt, rpc_clnt_disconnect_xprt, NULL); } EXPORT_SYMBOL_GPL(rpc_clnt_disconnect); /* * Properly shut down an RPC client, terminating all outstanding * requests. */ void rpc_shutdown_client(struct rpc_clnt *clnt) { might_sleep(); trace_rpc_clnt_shutdown(clnt); while (!list_empty(&clnt->cl_tasks)) { rpc_killall_tasks(clnt); wait_event_timeout(destroy_wait, list_empty(&clnt->cl_tasks), 1*HZ); } rpc_release_client(clnt); } EXPORT_SYMBOL_GPL(rpc_shutdown_client); /* * Free an RPC client */ static void rpc_free_client_work(struct work_struct *work) { struct rpc_clnt *clnt = container_of(work, struct rpc_clnt, cl_work); trace_rpc_clnt_free(clnt); /* These might block on processes that might allocate memory, * so they cannot be called in rpciod, so they are handled separately * here. */ rpc_sysfs_client_destroy(clnt); rpc_clnt_debugfs_unregister(clnt); rpc_free_clid(clnt); rpc_clnt_remove_pipedir(clnt); xprt_put(rcu_dereference_raw(clnt->cl_xprt)); kfree(clnt); rpciod_down(); } static struct rpc_clnt * rpc_free_client(struct rpc_clnt *clnt) { struct rpc_clnt *parent = NULL; trace_rpc_clnt_release(clnt); if (clnt->cl_parent != clnt) parent = clnt->cl_parent; rpc_unregister_client(clnt); rpc_free_iostats(clnt->cl_metrics); clnt->cl_metrics = NULL; xprt_iter_destroy(&clnt->cl_xpi); put_cred(clnt->cl_cred); INIT_WORK(&clnt->cl_work, rpc_free_client_work); schedule_work(&clnt->cl_work); return parent; } /* * Free an RPC client */ static struct rpc_clnt * rpc_free_auth(struct rpc_clnt *clnt) { /* * Note: RPCSEC_GSS may need to send NULL RPC calls in order to * release remaining GSS contexts. This mechanism ensures * that it can do so safely. */ if (clnt->cl_auth != NULL) { rpcauth_release(clnt->cl_auth); clnt->cl_auth = NULL; } if (refcount_dec_and_test(&clnt->cl_count)) return rpc_free_client(clnt); return NULL; } /* * Release reference to the RPC client */ void rpc_release_client(struct rpc_clnt *clnt) { do { if (list_empty(&clnt->cl_tasks)) wake_up(&destroy_wait); if (refcount_dec_not_one(&clnt->cl_count)) break; clnt = rpc_free_auth(clnt); } while (clnt != NULL); } EXPORT_SYMBOL_GPL(rpc_release_client); /** * rpc_bind_new_program - bind a new RPC program to an existing client * @old: old rpc_client * @program: rpc program to set * @vers: rpc program version * * Clones the rpc client and sets up a new RPC program. This is mainly * of use for enabling different RPC programs to share the same transport. * The Sun NFSv2/v3 ACL protocol can do this. */ struct rpc_clnt *rpc_bind_new_program(struct rpc_clnt *old, const struct rpc_program *program, u32 vers) { struct rpc_create_args args = { .program = program, .prognumber = program->number, .version = vers, .authflavor = old->cl_auth->au_flavor, .cred = old->cl_cred, .stats = old->cl_stats, .timeout = old->cl_timeout, }; struct rpc_clnt *clnt; int err; clnt = __rpc_clone_client(&args, old); if (IS_ERR(clnt)) goto out; err = rpc_ping(clnt); if (err != 0) { rpc_shutdown_client(clnt); clnt = ERR_PTR(err); } out: return clnt; } EXPORT_SYMBOL_GPL(rpc_bind_new_program); struct rpc_xprt * rpc_task_get_xprt(struct rpc_clnt *clnt, struct rpc_xprt *xprt) { struct rpc_xprt_switch *xps; if (!xprt) return NULL; rcu_read_lock(); xps = rcu_dereference(clnt->cl_xpi.xpi_xpswitch); atomic_long_inc(&xps->xps_queuelen); rcu_read_unlock(); atomic_long_inc(&xprt->queuelen); return xprt; } static void rpc_task_release_xprt(struct rpc_clnt *clnt, struct rpc_xprt *xprt) { struct rpc_xprt_switch *xps; atomic_long_dec(&xprt->queuelen); rcu_read_lock(); xps = rcu_dereference(clnt->cl_xpi.xpi_xpswitch); atomic_long_dec(&xps->xps_queuelen); rcu_read_unlock(); xprt_put(xprt); } void rpc_task_release_transport(struct rpc_task *task) { struct rpc_xprt *xprt = task->tk_xprt; if (xprt) { task->tk_xprt = NULL; if (task->tk_client) rpc_task_release_xprt(task->tk_client, xprt); else xprt_put(xprt); } } EXPORT_SYMBOL_GPL(rpc_task_release_transport); void rpc_task_release_client(struct rpc_task *task) { struct rpc_clnt *clnt = task->tk_client; rpc_task_release_transport(task); if (clnt != NULL) { /* Remove from client task list */ spin_lock(&clnt->cl_lock); list_del(&task->tk_task); spin_unlock(&clnt->cl_lock); task->tk_client = NULL; rpc_release_client(clnt); } } static struct rpc_xprt * rpc_task_get_first_xprt(struct rpc_clnt *clnt) { struct rpc_xprt *xprt; rcu_read_lock(); xprt = xprt_get(rcu_dereference(clnt->cl_xprt)); rcu_read_unlock(); return rpc_task_get_xprt(clnt, xprt); } static struct rpc_xprt * rpc_task_get_next_xprt(struct rpc_clnt *clnt) { return rpc_task_get_xprt(clnt, xprt_iter_get_next(&clnt->cl_xpi)); } static void rpc_task_set_transport(struct rpc_task *task, struct rpc_clnt *clnt) { if (task->tk_xprt) { if (!(test_bit(XPRT_OFFLINE, &task->tk_xprt->state) && (task->tk_flags & RPC_TASK_MOVEABLE))) return; xprt_release(task); xprt_put(task->tk_xprt); } if (task->tk_flags & RPC_TASK_NO_ROUND_ROBIN) task->tk_xprt = rpc_task_get_first_xprt(clnt); else task->tk_xprt = rpc_task_get_next_xprt(clnt); } static void rpc_task_set_client(struct rpc_task *task, struct rpc_clnt *clnt) { rpc_task_set_transport(task, clnt); task->tk_client = clnt; refcount_inc(&clnt->cl_count); if (clnt->cl_softrtry) task->tk_flags |= RPC_TASK_SOFT; if (clnt->cl_softerr) task->tk_flags |= RPC_TASK_TIMEOUT; if (clnt->cl_noretranstimeo) task->tk_flags |= RPC_TASK_NO_RETRANS_TIMEOUT; /* Add to the client's list of all tasks */ spin_lock(&clnt->cl_lock); list_add_tail(&task->tk_task, &clnt->cl_tasks); spin_unlock(&clnt->cl_lock); } static void rpc_task_set_rpc_message(struct rpc_task *task, const struct rpc_message *msg) { if (msg != NULL) { task->tk_msg.rpc_proc = msg->rpc_proc; task->tk_msg.rpc_argp = msg->rpc_argp; task->tk_msg.rpc_resp = msg->rpc_resp; task->tk_msg.rpc_cred = msg->rpc_cred; if (!(task->tk_flags & RPC_TASK_CRED_NOREF)) get_cred(task->tk_msg.rpc_cred); } } /* * Default callback for async RPC calls */ static void rpc_default_callback(struct rpc_task *task, void *data) { } static const struct rpc_call_ops rpc_default_ops = { .rpc_call_done = rpc_default_callback, }; /** * rpc_run_task - Allocate a new RPC task, then run rpc_execute against it * @task_setup_data: pointer to task initialisation data */ struct rpc_task *rpc_run_task(const struct rpc_task_setup *task_setup_data) { struct rpc_task *task; task = rpc_new_task(task_setup_data); if (IS_ERR(task)) return task; if (!RPC_IS_ASYNC(task)) task->tk_flags |= RPC_TASK_CRED_NOREF; rpc_task_set_client(task, task_setup_data->rpc_client); rpc_task_set_rpc_message(task, task_setup_data->rpc_message); if (task->tk_action == NULL) rpc_call_start(task); atomic_inc(&task->tk_count); rpc_execute(task); return task; } EXPORT_SYMBOL_GPL(rpc_run_task); /** * rpc_call_sync - Perform a synchronous RPC call * @clnt: pointer to RPC client * @msg: RPC call parameters * @flags: RPC call flags */ int rpc_call_sync(struct rpc_clnt *clnt, const struct rpc_message *msg, int flags) { struct rpc_task *task; struct rpc_task_setup task_setup_data = { .rpc_client = clnt, .rpc_message = msg, .callback_ops = &rpc_default_ops, .flags = flags, }; int status; WARN_ON_ONCE(flags & RPC_TASK_ASYNC); if (flags & RPC_TASK_ASYNC) { rpc_release_calldata(task_setup_data.callback_ops, task_setup_data.callback_data); return -EINVAL; } task = rpc_run_task(&task_setup_data); if (IS_ERR(task)) return PTR_ERR(task); status = task->tk_status; rpc_put_task(task); return status; } EXPORT_SYMBOL_GPL(rpc_call_sync); /** * rpc_call_async - Perform an asynchronous RPC call * @clnt: pointer to RPC client * @msg: RPC call parameters * @flags: RPC call flags * @tk_ops: RPC call ops * @data: user call data */ int rpc_call_async(struct rpc_clnt *clnt, const struct rpc_message *msg, int flags, const struct rpc_call_ops *tk_ops, void *data) { struct rpc_task *task; struct rpc_task_setup task_setup_data = { .rpc_client = clnt, .rpc_message = msg, .callback_ops = tk_ops, .callback_data = data, .flags = flags|RPC_TASK_ASYNC, }; task = rpc_run_task(&task_setup_data); if (IS_ERR(task)) return PTR_ERR(task); rpc_put_task(task); return 0; } EXPORT_SYMBOL_GPL(rpc_call_async); #if defined(CONFIG_SUNRPC_BACKCHANNEL) static void call_bc_encode(struct rpc_task *task); /** * rpc_run_bc_task - Allocate a new RPC task for backchannel use, then run * rpc_execute against it * @req: RPC request * @timeout: timeout values to use for this task */ struct rpc_task *rpc_run_bc_task(struct rpc_rqst *req, struct rpc_timeout *timeout) { struct rpc_task *task; struct rpc_task_setup task_setup_data = { .callback_ops = &rpc_default_ops, .flags = RPC_TASK_SOFTCONN | RPC_TASK_NO_RETRANS_TIMEOUT, }; dprintk("RPC: rpc_run_bc_task req= %p\n", req); /* * Create an rpc_task to send the data */ task = rpc_new_task(&task_setup_data); if (IS_ERR(task)) { xprt_free_bc_request(req); return task; } xprt_init_bc_request(req, task, timeout); task->tk_action = call_bc_encode; atomic_inc(&task->tk_count); WARN_ON_ONCE(atomic_read(&task->tk_count) != 2); rpc_execute(task); dprintk("RPC: rpc_run_bc_task: task= %p\n", task); return task; } #endif /* CONFIG_SUNRPC_BACKCHANNEL */ /** * rpc_prepare_reply_pages - Prepare to receive a reply data payload into pages * @req: RPC request to prepare * @pages: vector of struct page pointers * @base: offset in first page where receive should start, in bytes * @len: expected size of the upper layer data payload, in bytes * @hdrsize: expected size of upper layer reply header, in XDR words * */ void rpc_prepare_reply_pages(struct rpc_rqst *req, struct page **pages, unsigned int base, unsigned int len, unsigned int hdrsize) { hdrsize += RPC_REPHDRSIZE + req->rq_cred->cr_auth->au_ralign; xdr_inline_pages(&req->rq_rcv_buf, hdrsize << 2, pages, base, len); trace_rpc_xdr_reply_pages(req->rq_task, &req->rq_rcv_buf); } EXPORT_SYMBOL_GPL(rpc_prepare_reply_pages); void rpc_call_start(struct rpc_task *task) { task->tk_action = call_start; } EXPORT_SYMBOL_GPL(rpc_call_start); /** * rpc_peeraddr - extract remote peer address from clnt's xprt * @clnt: RPC client structure * @buf: target buffer * @bufsize: length of target buffer * * Returns the number of bytes that are actually in the stored address. */ size_t rpc_peeraddr(struct rpc_clnt *clnt, struct sockaddr *buf, size_t bufsize) { size_t bytes; struct rpc_xprt *xprt; rcu_read_lock(); xprt = rcu_dereference(clnt->cl_xprt); bytes = xprt->addrlen; if (bytes > bufsize) bytes = bufsize; memcpy(buf, &xprt->addr, bytes); rcu_read_unlock(); return bytes; } EXPORT_SYMBOL_GPL(rpc_peeraddr); /** * rpc_peeraddr2str - return remote peer address in printable format * @clnt: RPC client structure * @format: address format * * NB: the lifetime of the memory referenced by the returned pointer is * the same as the rpc_xprt itself. As long as the caller uses this * pointer, it must hold the RCU read lock. */ const char *rpc_peeraddr2str(struct rpc_clnt *clnt, enum rpc_display_format_t format) { struct rpc_xprt *xprt; xprt = rcu_dereference(clnt->cl_xprt); if (xprt->address_strings[format] != NULL) return xprt->address_strings[format]; else return "unprintable"; } EXPORT_SYMBOL_GPL(rpc_peeraddr2str); static const struct sockaddr_in rpc_inaddr_loopback = { .sin_family = AF_INET, .sin_addr.s_addr = htonl(INADDR_ANY), }; static const struct sockaddr_in6 rpc_in6addr_loopback = { .sin6_family = AF_INET6, .sin6_addr = IN6ADDR_ANY_INIT, }; /* * Try a getsockname() on a connected datagram socket. Using a * connected datagram socket prevents leaving a socket in TIME_WAIT. * This conserves the ephemeral port number space. * * Returns zero and fills in "buf" if successful; otherwise, a * negative errno is returned. */ static int rpc_sockname(struct net *net, struct sockaddr *sap, size_t salen, struct sockaddr *buf) { struct socket *sock; int err; err = __sock_create(net, sap->sa_family, SOCK_DGRAM, IPPROTO_UDP, &sock, 1); if (err < 0) { dprintk("RPC: can't create UDP socket (%d)\n", err); goto out; } switch (sap->sa_family) { case AF_INET: err = kernel_bind(sock, (struct sockaddr *)&rpc_inaddr_loopback, sizeof(rpc_inaddr_loopback)); break; case AF_INET6: err = kernel_bind(sock, (struct sockaddr *)&rpc_in6addr_loopback, sizeof(rpc_in6addr_loopback)); break; default: err = -EAFNOSUPPORT; goto out_release; } if (err < 0) { dprintk("RPC: can't bind UDP socket (%d)\n", err); goto out_release; } err = kernel_connect(sock, sap, salen, 0); if (err < 0) { dprintk("RPC: can't connect UDP socket (%d)\n", err); goto out_release; } err = kernel_getsockname(sock, buf); if (err < 0) { dprintk("RPC: getsockname failed (%d)\n", err); goto out_release; } err = 0; if (buf->sa_family == AF_INET6) { struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)buf; sin6->sin6_scope_id = 0; } dprintk("RPC: %s succeeded\n", __func__); out_release: sock_release(sock); out: return err; } /* * Scraping a connected socket failed, so we don't have a useable * local address. Fallback: generate an address that will prevent * the server from calling us back. * * Returns zero and fills in "buf" if successful; otherwise, a * negative errno is returned. */ static int rpc_anyaddr(int family, struct sockaddr *buf, size_t buflen) { switch (family) { case AF_INET: if (buflen < sizeof(rpc_inaddr_loopback)) return -EINVAL; memcpy(buf, &rpc_inaddr_loopback, sizeof(rpc_inaddr_loopback)); break; case AF_INET6: if (buflen < sizeof(rpc_in6addr_loopback)) return -EINVAL; memcpy(buf, &rpc_in6addr_loopback, sizeof(rpc_in6addr_loopback)); break; default: dprintk("RPC: %s: address family not supported\n", __func__); return -EAFNOSUPPORT; } dprintk("RPC: %s: succeeded\n", __func__); return 0; } /** * rpc_localaddr - discover local endpoint address for an RPC client * @clnt: RPC client structure * @buf: target buffer * @buflen: size of target buffer, in bytes * * Returns zero and fills in "buf" and "buflen" if successful; * otherwise, a negative errno is returned. * * This works even if the underlying transport is not currently connected, * or if the upper layer never previously provided a source address. * * The result of this function call is transient: multiple calls in * succession may give different results, depending on how local * networking configuration changes over time. */ int rpc_localaddr(struct rpc_clnt *clnt, struct sockaddr *buf, size_t buflen) { struct sockaddr_storage address; struct sockaddr *sap = (struct sockaddr *)&address; struct rpc_xprt *xprt; struct net *net; size_t salen; int err; rcu_read_lock(); xprt = rcu_dereference(clnt->cl_xprt); salen = xprt->addrlen; memcpy(sap, &xprt->addr, salen); net = get_net(xprt->xprt_net); rcu_read_unlock(); rpc_set_port(sap, 0); err = rpc_sockname(net, sap, salen, buf); put_net(net); if (err != 0) /* Couldn't discover local address, return ANYADDR */ return rpc_anyaddr(sap->sa_family, buf, buflen); return 0; } EXPORT_SYMBOL_GPL(rpc_localaddr); void rpc_setbufsize(struct rpc_clnt *clnt, unsigned int sndsize, unsigned int rcvsize) { struct rpc_xprt *xprt; rcu_read_lock(); xprt = rcu_dereference(clnt->cl_xprt); if (xprt->ops->set_buffer_size) xprt->ops->set_buffer_size(xprt, sndsize, rcvsize); rcu_read_unlock(); } EXPORT_SYMBOL_GPL(rpc_setbufsize); /** * rpc_net_ns - Get the network namespace for this RPC client * @clnt: RPC client to query * */ struct net *rpc_net_ns(struct rpc_clnt *clnt) { struct net *ret; rcu_read_lock(); ret = rcu_dereference(clnt->cl_xprt)->xprt_net; rcu_read_unlock(); return ret; } EXPORT_SYMBOL_GPL(rpc_net_ns); /** * rpc_max_payload - Get maximum payload size for a transport, in bytes * @clnt: RPC client to query * * For stream transports, this is one RPC record fragment (see RFC * 1831), as we don't support multi-record requests yet. For datagram * transports, this is the size of an IP packet minus the IP, UDP, and * RPC header sizes. */ size_t rpc_max_payload(struct rpc_clnt *clnt) { size_t ret; rcu_read_lock(); ret = rcu_dereference(clnt->cl_xprt)->max_payload; rcu_read_unlock(); return ret; } EXPORT_SYMBOL_GPL(rpc_max_payload); /** * rpc_max_bc_payload - Get maximum backchannel payload size, in bytes * @clnt: RPC client to query */ size_t rpc_max_bc_payload(struct rpc_clnt *clnt) { struct rpc_xprt *xprt; size_t ret; rcu_read_lock(); xprt = rcu_dereference(clnt->cl_xprt); ret = xprt->ops->bc_maxpayload(xprt); rcu_read_unlock(); return ret; } EXPORT_SYMBOL_GPL(rpc_max_bc_payload); unsigned int rpc_num_bc_slots(struct rpc_clnt *clnt) { struct rpc_xprt *xprt; unsigned int ret; rcu_read_lock(); xprt = rcu_dereference(clnt->cl_xprt); ret = xprt->ops->bc_num_slots(xprt); rcu_read_unlock(); return ret; } EXPORT_SYMBOL_GPL(rpc_num_bc_slots); /** * rpc_force_rebind - force transport to check that remote port is unchanged * @clnt: client to rebind * */ void rpc_force_rebind(struct rpc_clnt *clnt) { if (clnt->cl_autobind) { rcu_read_lock(); xprt_clear_bound(rcu_dereference(clnt->cl_xprt)); rcu_read_unlock(); } } EXPORT_SYMBOL_GPL(rpc_force_rebind); static int __rpc_restart_call(struct rpc_task *task, void (*action)(struct rpc_task *)) { task->tk_status = 0; task->tk_rpc_status = 0; task->tk_action = action; return 1; } /* * Restart an (async) RPC call. Usually called from within the * exit handler. */ int rpc_restart_call(struct rpc_task *task) { return __rpc_restart_call(task, call_start); } EXPORT_SYMBOL_GPL(rpc_restart_call); /* * Restart an (async) RPC call from the call_prepare state. * Usually called from within the exit handler. */ int rpc_restart_call_prepare(struct rpc_task *task) { if (task->tk_ops->rpc_call_prepare != NULL) return __rpc_restart_call(task, rpc_prepare_task); return rpc_restart_call(task); } EXPORT_SYMBOL_GPL(rpc_restart_call_prepare); const char *rpc_proc_name(const struct rpc_task *task) { const struct rpc_procinfo *proc = task->tk_msg.rpc_proc; if (proc) { if (proc->p_name) return proc->p_name; else return "NULL"; } else return "no proc"; } static void __rpc_call_rpcerror(struct rpc_task *task, int tk_status, int rpc_status) { trace_rpc_call_rpcerror(task, tk_status, rpc_status); rpc_task_set_rpc_status(task, rpc_status); rpc_exit(task, tk_status); } static void rpc_call_rpcerror(struct rpc_task *task, int status) { __rpc_call_rpcerror(task, status, status); } /* * 0. Initial state * * Other FSM states can be visited zero or more times, but * this state is visited exactly once for each RPC. */ static void call_start(struct rpc_task *task) { struct rpc_clnt *clnt = task->tk_client; int idx = task->tk_msg.rpc_proc->p_statidx; trace_rpc_request(task); if (task->tk_client->cl_shutdown) { rpc_call_rpcerror(task, -EIO); return; } /* Increment call count (version might not be valid for ping) */ if (clnt->cl_program->version[clnt->cl_vers]) clnt->cl_program->version[clnt->cl_vers]->counts[idx]++; clnt->cl_stats->rpccnt++; task->tk_action = call_reserve; rpc_task_set_transport(task, clnt); } /* * 1. Reserve an RPC call slot */ static void call_reserve(struct rpc_task *task) { task->tk_status = 0; task->tk_action = call_reserveresult; xprt_reserve(task); } static void call_retry_reserve(struct rpc_task *task); /* * 1b. Grok the result of xprt_reserve() */ static void call_reserveresult(struct rpc_task *task) { int status = task->tk_status; /* * After a call to xprt_reserve(), we must have either * a request slot or else an error status. */ task->tk_status = 0; if (status >= 0) { if (task->tk_rqstp) { task->tk_action = call_refresh; return; } rpc_call_rpcerror(task, -EIO); return; } switch (status) { case -ENOMEM: rpc_delay(task, HZ >> 2); fallthrough; case -EAGAIN: /* woken up; retry */ task->tk_action = call_retry_reserve; return; default: rpc_call_rpcerror(task, status); } } /* * 1c. Retry reserving an RPC call slot */ static void call_retry_reserve(struct rpc_task *task) { task->tk_status = 0; task->tk_action = call_reserveresult; xprt_retry_reserve(task); } /* * 2. Bind and/or refresh the credentials */ static void call_refresh(struct rpc_task *task) { task->tk_action = call_refreshresult; task->tk_status = 0; task->tk_client->cl_stats->rpcauthrefresh++; rpcauth_refreshcred(task); } /* * 2a. Process the results of a credential refresh */ static void call_refreshresult(struct rpc_task *task) { int status = task->tk_status; task->tk_status = 0; task->tk_action = call_refresh; switch (status) { case 0: if (rpcauth_uptodatecred(task)) { task->tk_action = call_allocate; return; } /* Use rate-limiting and a max number of retries if refresh * had status 0 but failed to update the cred. */ fallthrough; case -ETIMEDOUT: rpc_delay(task, 3*HZ); fallthrough; case -EAGAIN: status = -EACCES; fallthrough; case -EKEYEXPIRED: if (!task->tk_cred_retry) break; task->tk_cred_retry--; trace_rpc_retry_refresh_status(task); return; case -ENOMEM: rpc_delay(task, HZ >> 4); return; } trace_rpc_refresh_status(task); rpc_call_rpcerror(task, status); } /* * 2b. Allocate the buffer. For details, see sched.c:rpc_malloc. * (Note: buffer memory is freed in xprt_release). */ static void call_allocate(struct rpc_task *task) { const struct rpc_auth *auth = task->tk_rqstp->rq_cred->cr_auth; struct rpc_rqst *req = task->tk_rqstp; struct rpc_xprt *xprt = req->rq_xprt; const struct rpc_procinfo *proc = task->tk_msg.rpc_proc; int status; task->tk_status = 0; task->tk_action = call_encode; if (req->rq_buffer) return; /* * Calculate the size (in quads) of the RPC call * and reply headers, and convert both values * to byte sizes. */ req->rq_callsize = RPC_CALLHDRSIZE + (auth->au_cslack << 1) + proc->p_arglen; req->rq_callsize <<= 2; /* * Note: the reply buffer must at minimum allocate enough space * for the 'struct accepted_reply' from RFC5531. */ req->rq_rcvsize = RPC_REPHDRSIZE + auth->au_rslack + \ max_t(size_t, proc->p_replen, 2); req->rq_rcvsize <<= 2; status = xprt->ops->buf_alloc(task); trace_rpc_buf_alloc(task, status); if (status == 0) return; if (status != -ENOMEM) { rpc_call_rpcerror(task, status); return; } if (RPC_IS_ASYNC(task) || !fatal_signal_pending(current)) { task->tk_action = call_allocate; rpc_delay(task, HZ>>4); return; } rpc_call_rpcerror(task, -ERESTARTSYS); } static int rpc_task_need_encode(struct rpc_task *task) { return test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate) == 0 && (!(task->tk_flags & RPC_TASK_SENT) || !(task->tk_flags & RPC_TASK_NO_RETRANS_TIMEOUT) || xprt_request_need_retransmit(task)); } static void rpc_xdr_encode(struct rpc_task *task) { struct rpc_rqst *req = task->tk_rqstp; struct xdr_stream xdr; xdr_buf_init(&req->rq_snd_buf, req->rq_buffer, req->rq_callsize); xdr_buf_init(&req->rq_rcv_buf, req->rq_rbuffer, req->rq_rcvsize); req->rq_reply_bytes_recvd = 0; req->rq_snd_buf.head[0].iov_len = 0; xdr_init_encode(&xdr, &req->rq_snd_buf, req->rq_snd_buf.head[0].iov_base, req); if (rpc_encode_header(task, &xdr)) return; task->tk_status = rpcauth_wrap_req(task, &xdr); } /* * 3. Encode arguments of an RPC call */ static void call_encode(struct rpc_task *task) { if (!rpc_task_need_encode(task)) goto out; /* Dequeue task from the receive queue while we're encoding */ xprt_request_dequeue_xprt(task); /* Encode here so that rpcsec_gss can use correct sequence number. */ rpc_xdr_encode(task); /* Add task to reply queue before transmission to avoid races */ if (task->tk_status == 0 && rpc_reply_expected(task)) task->tk_status = xprt_request_enqueue_receive(task); /* Did the encode result in an error condition? */ if (task->tk_status != 0) { /* Was the error nonfatal? */ switch (task->tk_status) { case -EAGAIN: case -ENOMEM: rpc_delay(task, HZ >> 4); break; case -EKEYEXPIRED: if (!task->tk_cred_retry) { rpc_call_rpcerror(task, task->tk_status); } else { task->tk_action = call_refresh; task->tk_cred_retry--; trace_rpc_retry_refresh_status(task); } break; default: rpc_call_rpcerror(task, task->tk_status); } return; } xprt_request_enqueue_transmit(task); out: task->tk_action = call_transmit; /* Check that the connection is OK */ if (!xprt_bound(task->tk_xprt)) task->tk_action = call_bind; else if (!xprt_connected(task->tk_xprt)) task->tk_action = call_connect; } /* * Helpers to check if the task was already transmitted, and * to take action when that is the case. */ static bool rpc_task_transmitted(struct rpc_task *task) { return !test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate); } static void rpc_task_handle_transmitted(struct rpc_task *task) { xprt_end_transmit(task); task->tk_action = call_transmit_status; } /* * 4. Get the server port number if not yet set */ static void call_bind(struct rpc_task *task) { struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt; if (rpc_task_transmitted(task)) { rpc_task_handle_transmitted(task); return; } if (xprt_bound(xprt)) { task->tk_action = call_connect; return; } task->tk_action = call_bind_status; if (!xprt_prepare_transmit(task)) return; xprt->ops->rpcbind(task); } /* * 4a. Sort out bind result */ static void call_bind_status(struct rpc_task *task) { struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt; int status = -EIO; if (rpc_task_transmitted(task)) { rpc_task_handle_transmitted(task); return; } if (task->tk_status >= 0) goto out_next; if (xprt_bound(xprt)) { task->tk_status = 0; goto out_next; } switch (task->tk_status) { case -ENOMEM: rpc_delay(task, HZ >> 2); goto retry_timeout; case -EACCES: trace_rpcb_prog_unavail_err(task); /* fail immediately if this is an RPC ping */ if (task->tk_msg.rpc_proc->p_proc == 0) { status = -EOPNOTSUPP; break; } rpc_delay(task, 3*HZ); goto retry_timeout; case -ENOBUFS: rpc_delay(task, HZ >> 2); goto retry_timeout; case -EAGAIN: goto retry_timeout; case -ETIMEDOUT: trace_rpcb_timeout_err(task); goto retry_timeout; case -EPFNOSUPPORT: /* server doesn't support any rpcbind version we know of */ trace_rpcb_bind_version_err(task); break; case -EPROTONOSUPPORT: trace_rpcb_bind_version_err(task); goto retry_timeout; case -ECONNREFUSED: /* connection problems */ case -ECONNRESET: case -ECONNABORTED: case -ENOTCONN: case -EHOSTDOWN: case -ENETDOWN: case -EHOSTUNREACH: case -ENETUNREACH: case -EPIPE: trace_rpcb_unreachable_err(task); if (!RPC_IS_SOFTCONN(task)) { rpc_delay(task, 5*HZ); goto retry_timeout; } status = task->tk_status; break; default: trace_rpcb_unrecognized_err(task); } rpc_call_rpcerror(task, status); return; out_next: task->tk_action = call_connect; return; retry_timeout: task->tk_status = 0; task->tk_action = call_bind; rpc_check_timeout(task); } /* * 4b. Connect to the RPC server */ static void call_connect(struct rpc_task *task) { struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt; if (rpc_task_transmitted(task)) { rpc_task_handle_transmitted(task); return; } if (xprt_connected(xprt)) { task->tk_action = call_transmit; return; } task->tk_action = call_connect_status; if (task->tk_status < 0) return; if (task->tk_flags & RPC_TASK_NOCONNECT) { rpc_call_rpcerror(task, -ENOTCONN); return; } if (!xprt_prepare_transmit(task)) return; xprt_connect(task); } /* * 4c. Sort out connect result */ static void call_connect_status(struct rpc_task *task) { struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt; struct rpc_clnt *clnt = task->tk_client; int status = task->tk_status; if (rpc_task_transmitted(task)) { rpc_task_handle_transmitted(task); return; } trace_rpc_connect_status(task); if (task->tk_status == 0) { clnt->cl_stats->netreconn++; goto out_next; } if (xprt_connected(xprt)) { task->tk_status = 0; goto out_next; } task->tk_status = 0; switch (status) { case -ECONNREFUSED: case -ECONNRESET: /* A positive refusal suggests a rebind is needed. */ if (RPC_IS_SOFTCONN(task)) break; if (clnt->cl_autobind) { rpc_force_rebind(clnt); goto out_retry; } fallthrough; case -ECONNABORTED: case -ENETDOWN: case -ENETUNREACH: case -EHOSTUNREACH: case -EPIPE: case -EPROTO: xprt_conditional_disconnect(task->tk_rqstp->rq_xprt, task->tk_rqstp->rq_connect_cookie); if (RPC_IS_SOFTCONN(task)) break; /* retry with existing socket, after a delay */ rpc_delay(task, 3*HZ); fallthrough; case -EADDRINUSE: case -ENOTCONN: case -EAGAIN: case -ETIMEDOUT: if (!(task->tk_flags & RPC_TASK_NO_ROUND_ROBIN) && (task->tk_flags & RPC_TASK_MOVEABLE) && test_bit(XPRT_REMOVE, &xprt->state)) { struct rpc_xprt *saved = task->tk_xprt; struct rpc_xprt_switch *xps; xps = rpc_clnt_xprt_switch_get(clnt); if (xps->xps_nxprts > 1) { long value; xprt_release(task); value = atomic_long_dec_return(&xprt->queuelen); if (value == 0) rpc_xprt_switch_remove_xprt(xps, saved, true); xprt_put(saved); task->tk_xprt = NULL; task->tk_action = call_start; } xprt_switch_put(xps); if (!task->tk_xprt) goto out; } goto out_retry; case -ENOBUFS: rpc_delay(task, HZ >> 2); goto out_retry; } rpc_call_rpcerror(task, status); return; out_next: task->tk_action = call_transmit; return; out_retry: /* Check for timeouts before looping back to call_bind */ task->tk_action = call_bind; out: rpc_check_timeout(task); } /* * 5. Transmit the RPC request, and wait for reply */ static void call_transmit(struct rpc_task *task) { if (rpc_task_transmitted(task)) { rpc_task_handle_transmitted(task); return; } task->tk_action = call_transmit_status; if (!xprt_prepare_transmit(task)) return; task->tk_status = 0; if (test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate)) { if (!xprt_connected(task->tk_xprt)) { task->tk_status = -ENOTCONN; return; } xprt_transmit(task); } xprt_end_transmit(task); } /* * 5a. Handle cleanup after a transmission */ static void call_transmit_status(struct rpc_task *task) { task->tk_action = call_status; /* * Common case: success. Force the compiler to put this * test first. */ if (rpc_task_transmitted(task)) { task->tk_status = 0; xprt_request_wait_receive(task); return; } switch (task->tk_status) { default: break; case -EBADMSG: task->tk_status = 0; task->tk_action = call_encode; break; /* * Special cases: if we've been waiting on the * socket's write_space() callback, or if the * socket just returned a connection error, * then hold onto the transport lock. */ case -ENOMEM: case -ENOBUFS: rpc_delay(task, HZ>>2); fallthrough; case -EBADSLT: case -EAGAIN: task->tk_action = call_transmit; task->tk_status = 0; break; case -EHOSTDOWN: case -ENETDOWN: case -EHOSTUNREACH: case -ENETUNREACH: case -EPERM: break; case -ECONNREFUSED: if (RPC_IS_SOFTCONN(task)) { if (!task->tk_msg.rpc_proc->p_proc) trace_xprt_ping(task->tk_xprt, task->tk_status); rpc_call_rpcerror(task, task->tk_status); return; } fallthrough; case -ECONNRESET: case -ECONNABORTED: case -EADDRINUSE: case -ENOTCONN: case -EPIPE: task->tk_action = call_bind; task->tk_status = 0; break; } rpc_check_timeout(task); } #if defined(CONFIG_SUNRPC_BACKCHANNEL) static void call_bc_transmit(struct rpc_task *task); static void call_bc_transmit_status(struct rpc_task *task); static void call_bc_encode(struct rpc_task *task) { xprt_request_enqueue_transmit(task); task->tk_action = call_bc_transmit; } /* * 5b. Send the backchannel RPC reply. On error, drop the reply. In * addition, disconnect on connectivity errors. */ static void call_bc_transmit(struct rpc_task *task) { task->tk_action = call_bc_transmit_status; if (test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate)) { if (!xprt_prepare_transmit(task)) return; task->tk_status = 0; xprt_transmit(task); } xprt_end_transmit(task); } static void call_bc_transmit_status(struct rpc_task *task) { struct rpc_rqst *req = task->tk_rqstp; if (rpc_task_transmitted(task)) task->tk_status = 0; switch (task->tk_status) { case 0: /* Success */ case -ENETDOWN: case -EHOSTDOWN: case -EHOSTUNREACH: case -ENETUNREACH: case -ECONNRESET: case -ECONNREFUSED: case -EADDRINUSE: case -ENOTCONN: case -EPIPE: break; case -ENOMEM: case -ENOBUFS: rpc_delay(task, HZ>>2); fallthrough; case -EBADSLT: case -EAGAIN: task->tk_status = 0; task->tk_action = call_bc_transmit; return; case -ETIMEDOUT: /* * Problem reaching the server. Disconnect and let the * forechannel reestablish the connection. The server will * have to retransmit the backchannel request and we'll * reprocess it. Since these ops are idempotent, there's no * need to cache our reply at this time. */ printk(KERN_NOTICE "RPC: Could not send backchannel reply " "error: %d\n", task->tk_status); xprt_conditional_disconnect(req->rq_xprt, req->rq_connect_cookie); break; default: /* * We were unable to reply and will have to drop the * request. The server should reconnect and retransmit. */ printk(KERN_NOTICE "RPC: Could not send backchannel reply " "error: %d\n", task->tk_status); break; } task->tk_action = rpc_exit_task; } #endif /* CONFIG_SUNRPC_BACKCHANNEL */ /* * 6. Sort out the RPC call status */ static void call_status(struct rpc_task *task) { struct rpc_clnt *clnt = task->tk_client; int status; if (!task->tk_msg.rpc_proc->p_proc) trace_xprt_ping(task->tk_xprt, task->tk_status); status = task->tk_status; if (status >= 0) { task->tk_action = call_decode; return; } trace_rpc_call_status(task); task->tk_status = 0; switch(status) { case -EHOSTDOWN: case -ENETDOWN: case -EHOSTUNREACH: case -ENETUNREACH: case -EPERM: if (RPC_IS_SOFTCONN(task)) goto out_exit; /* * Delay any retries for 3 seconds, then handle as if it * were a timeout. */ rpc_delay(task, 3*HZ); fallthrough; case -ETIMEDOUT: break; case -ECONNREFUSED: case -ECONNRESET: case -ECONNABORTED: case -ENOTCONN: rpc_force_rebind(clnt); break; case -EADDRINUSE: rpc_delay(task, 3*HZ); fallthrough; case -EPIPE: case -EAGAIN: break; case -ENFILE: case -ENOBUFS: case -ENOMEM: rpc_delay(task, HZ>>2); break; case -EIO: /* shutdown or soft timeout */ goto out_exit; default: if (clnt->cl_chatty) printk("%s: RPC call returned error %d\n", clnt->cl_program->name, -status); goto out_exit; } task->tk_action = call_encode; rpc_check_timeout(task); return; out_exit: rpc_call_rpcerror(task, status); } static bool rpc_check_connected(const struct rpc_rqst *req) { /* No allocated request or transport? return true */ if (!req || !req->rq_xprt) return true; return xprt_connected(req->rq_xprt); } static void rpc_check_timeout(struct rpc_task *task) { struct rpc_clnt *clnt = task->tk_client; if (RPC_SIGNALLED(task)) return; if (xprt_adjust_timeout(task->tk_rqstp) == 0) return; trace_rpc_timeout_status(task); task->tk_timeouts++; if (RPC_IS_SOFTCONN(task) && !rpc_check_connected(task->tk_rqstp)) { rpc_call_rpcerror(task, -ETIMEDOUT); return; } if (RPC_IS_SOFT(task)) { /* * Once a "no retrans timeout" soft tasks (a.k.a NFSv4) has * been sent, it should time out only if the transport * connection gets terminally broken. */ if ((task->tk_flags & RPC_TASK_NO_RETRANS_TIMEOUT) && rpc_check_connected(task->tk_rqstp)) return; if (clnt->cl_chatty) { pr_notice_ratelimited( "%s: server %s not responding, timed out\n", clnt->cl_program->name, task->tk_xprt->servername); } if (task->tk_flags & RPC_TASK_TIMEOUT) rpc_call_rpcerror(task, -ETIMEDOUT); else __rpc_call_rpcerror(task, -EIO, -ETIMEDOUT); return; } if (!(task->tk_flags & RPC_CALL_MAJORSEEN)) { task->tk_flags |= RPC_CALL_MAJORSEEN; if (clnt->cl_chatty) { pr_notice_ratelimited( "%s: server %s not responding, still trying\n", clnt->cl_program->name, task->tk_xprt->servername); } } rpc_force_rebind(clnt); /* * Did our request time out due to an RPCSEC_GSS out-of-sequence * event? RFC2203 requires the server to drop all such requests. */ rpcauth_invalcred(task); } /* * 7. Decode the RPC reply */ static void call_decode(struct rpc_task *task) { struct rpc_clnt *clnt = task->tk_client; struct rpc_rqst *req = task->tk_rqstp; struct xdr_stream xdr; int err; if (!task->tk_msg.rpc_proc->p_decode) { task->tk_action = rpc_exit_task; return; } if (task->tk_flags & RPC_CALL_MAJORSEEN) { if (clnt->cl_chatty) { pr_notice_ratelimited("%s: server %s OK\n", clnt->cl_program->name, task->tk_xprt->servername); } task->tk_flags &= ~RPC_CALL_MAJORSEEN; } /* * Did we ever call xprt_complete_rqst()? If not, we should assume * the message is incomplete. */ err = -EAGAIN; if (!req->rq_reply_bytes_recvd) goto out; /* Ensure that we see all writes made by xprt_complete_rqst() * before it changed req->rq_reply_bytes_recvd. */ smp_rmb(); req->rq_rcv_buf.len = req->rq_private_buf.len; trace_rpc_xdr_recvfrom(task, &req->rq_rcv_buf); /* Check that the softirq receive buffer is valid */ WARN_ON(memcmp(&req->rq_rcv_buf, &req->rq_private_buf, sizeof(req->rq_rcv_buf)) != 0); xdr_init_decode(&xdr, &req->rq_rcv_buf, req->rq_rcv_buf.head[0].iov_base, req); err = rpc_decode_header(task, &xdr); out: switch (err) { case 0: task->tk_action = rpc_exit_task; task->tk_status = rpcauth_unwrap_resp(task, &xdr); xdr_finish_decode(&xdr); return; case -EAGAIN: task->tk_status = 0; if (task->tk_client->cl_discrtry) xprt_conditional_disconnect(req->rq_xprt, req->rq_connect_cookie); task->tk_action = call_encode; rpc_check_timeout(task); break; case -EKEYREJECTED: task->tk_action = call_reserve; rpc_check_timeout(task); rpcauth_invalcred(task); /* Ensure we obtain a new XID if we retry! */ xprt_release(task); } } static int rpc_encode_header(struct rpc_task *task, struct xdr_stream *xdr) { struct rpc_clnt *clnt = task->tk_client; struct rpc_rqst *req = task->tk_rqstp; __be32 *p; int error; error = -EMSGSIZE; p = xdr_reserve_space(xdr, RPC_CALLHDRSIZE << 2); if (!p) goto out_fail; *p++ = req->rq_xid; *p++ = rpc_call; *p++ = cpu_to_be32(RPC_VERSION); *p++ = cpu_to_be32(clnt->cl_prog); *p++ = cpu_to_be32(clnt->cl_vers); *p = cpu_to_be32(task->tk_msg.rpc_proc->p_proc); error = rpcauth_marshcred(task, xdr); if (error < 0) goto out_fail; return 0; out_fail: trace_rpc_bad_callhdr(task); rpc_call_rpcerror(task, error); return error; } static noinline int rpc_decode_header(struct rpc_task *task, struct xdr_stream *xdr) { struct rpc_clnt *clnt = task->tk_client; int error; __be32 *p; /* RFC-1014 says that the representation of XDR data must be a * multiple of four bytes * - if it isn't pointer subtraction in the NFS client may give * undefined results */ if (task->tk_rqstp->rq_rcv_buf.len & 3) goto out_unparsable; p = xdr_inline_decode(xdr, 3 * sizeof(*p)); if (!p) goto out_unparsable; p++; /* skip XID */ if (*p++ != rpc_reply) goto out_unparsable; if (*p++ != rpc_msg_accepted) goto out_msg_denied; error = rpcauth_checkverf(task, xdr); if (error) { struct rpc_cred *cred = task->tk_rqstp->rq_cred; if (!test_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags)) { rpcauth_invalcred(task); if (!task->tk_cred_retry) goto out_err; task->tk_cred_retry--; trace_rpc__stale_creds(task); return -EKEYREJECTED; } goto out_verifier; } p = xdr_inline_decode(xdr, sizeof(*p)); if (!p) goto out_unparsable; switch (*p) { case rpc_success: return 0; case rpc_prog_unavail: trace_rpc__prog_unavail(task); error = -EPFNOSUPPORT; goto out_err; case rpc_prog_mismatch: trace_rpc__prog_mismatch(task); error = -EPROTONOSUPPORT; goto out_err; case rpc_proc_unavail: trace_rpc__proc_unavail(task); error = -EOPNOTSUPP; goto out_err; case rpc_garbage_args: case rpc_system_err: trace_rpc__garbage_args(task); error = -EIO; break; default: goto out_unparsable; } out_garbage: clnt->cl_stats->rpcgarbage++; if (task->tk_garb_retry) { task->tk_garb_retry--; task->tk_action = call_encode; return -EAGAIN; } out_err: rpc_call_rpcerror(task, error); return error; out_unparsable: trace_rpc__unparsable(task); error = -EIO; goto out_garbage; out_verifier: trace_rpc_bad_verifier(task); switch (error) { case -EPROTONOSUPPORT: goto out_err; case -EACCES: /* Re-encode with a fresh cred */ fallthrough; default: goto out_garbage; } out_msg_denied: error = -EACCES; p = xdr_inline_decode(xdr, sizeof(*p)); if (!p) goto out_unparsable; switch (*p++) { case rpc_auth_error: break; case rpc_mismatch: trace_rpc__mismatch(task); error = -EPROTONOSUPPORT; goto out_err; default: goto out_unparsable; } p = xdr_inline_decode(xdr, sizeof(*p)); if (!p) goto out_unparsable; switch (*p++) { case rpc_autherr_rejectedcred: case rpc_autherr_rejectedverf: case rpcsec_gsserr_credproblem: case rpcsec_gsserr_ctxproblem: rpcauth_invalcred(task); if (!task->tk_cred_retry) break; task->tk_cred_retry--; trace_rpc__stale_creds(task); return -EKEYREJECTED; case rpc_autherr_badcred: case rpc_autherr_badverf: /* possibly garbled cred/verf? */ if (!task->tk_garb_retry) break; task->tk_garb_retry--; trace_rpc__bad_creds(task); task->tk_action = call_encode; return -EAGAIN; case rpc_autherr_tooweak: trace_rpc__auth_tooweak(task); pr_warn("RPC: server %s requires stronger authentication.\n", task->tk_xprt->servername); break; default: goto out_unparsable; } goto out_err; } static void rpcproc_encode_null(struct rpc_rqst *rqstp, struct xdr_stream *xdr, const void *obj) { } static int rpcproc_decode_null(struct rpc_rqst *rqstp, struct xdr_stream *xdr, void *obj) { return 0; } static const struct rpc_procinfo rpcproc_null = { .p_encode = rpcproc_encode_null, .p_decode = rpcproc_decode_null, }; static const struct rpc_procinfo rpcproc_null_noreply = { .p_encode = rpcproc_encode_null, }; static void rpc_null_call_prepare(struct rpc_task *task, void *data) { task->tk_flags &= ~RPC_TASK_NO_RETRANS_TIMEOUT; rpc_call_start(task); } static const struct rpc_call_ops rpc_null_ops = { .rpc_call_prepare = rpc_null_call_prepare, .rpc_call_done = rpc_default_callback, }; static struct rpc_task *rpc_call_null_helper(struct rpc_clnt *clnt, struct rpc_xprt *xprt, struct rpc_cred *cred, int flags, const struct rpc_call_ops *ops, void *data) { struct rpc_message msg = { .rpc_proc = &rpcproc_null, }; struct rpc_task_setup task_setup_data = { .rpc_client = clnt, .rpc_xprt = xprt, .rpc_message = &msg, .rpc_op_cred = cred, .callback_ops = ops ?: &rpc_null_ops, .callback_data = data, .flags = flags | RPC_TASK_SOFT | RPC_TASK_SOFTCONN | RPC_TASK_NULLCREDS, }; return rpc_run_task(&task_setup_data); } struct rpc_task *rpc_call_null(struct rpc_clnt *clnt, struct rpc_cred *cred, int flags) { return rpc_call_null_helper(clnt, NULL, cred, flags, NULL, NULL); } EXPORT_SYMBOL_GPL(rpc_call_null); static int rpc_ping(struct rpc_clnt *clnt) { struct rpc_task *task; int status; if (clnt->cl_auth->au_ops->ping) return clnt->cl_auth->au_ops->ping(clnt); task = rpc_call_null_helper(clnt, NULL, NULL, 0, NULL, NULL); if (IS_ERR(task)) return PTR_ERR(task); status = task->tk_status; rpc_put_task(task); return status; } static int rpc_ping_noreply(struct rpc_clnt *clnt) { struct rpc_message msg = { .rpc_proc = &rpcproc_null_noreply, }; struct rpc_task_setup task_setup_data = { .rpc_client = clnt, .rpc_message = &msg, .callback_ops = &rpc_null_ops, .flags = RPC_TASK_SOFT | RPC_TASK_SOFTCONN | RPC_TASK_NULLCREDS, }; struct rpc_task *task; int status; task = rpc_run_task(&task_setup_data); if (IS_ERR(task)) return PTR_ERR(task); status = task->tk_status; rpc_put_task(task); return status; } struct rpc_cb_add_xprt_calldata { struct rpc_xprt_switch *xps; struct rpc_xprt *xprt; }; static void rpc_cb_add_xprt_done(struct rpc_task *task, void *calldata) { struct rpc_cb_add_xprt_calldata *data = calldata; if (task->tk_status == 0) rpc_xprt_switch_add_xprt(data->xps, data->xprt); } static void rpc_cb_add_xprt_release(void *calldata) { struct rpc_cb_add_xprt_calldata *data = calldata; xprt_put(data->xprt); xprt_switch_put(data->xps); kfree(data); } static const struct rpc_call_ops rpc_cb_add_xprt_call_ops = { .rpc_call_prepare = rpc_null_call_prepare, .rpc_call_done = rpc_cb_add_xprt_done, .rpc_release = rpc_cb_add_xprt_release, }; /** * rpc_clnt_test_and_add_xprt - Test and add a new transport to a rpc_clnt * @clnt: pointer to struct rpc_clnt * @xps: pointer to struct rpc_xprt_switch, * @xprt: pointer struct rpc_xprt * @in_max_connect: pointer to the max_connect value for the passed in xprt transport */ int rpc_clnt_test_and_add_xprt(struct rpc_clnt *clnt, struct rpc_xprt_switch *xps, struct rpc_xprt *xprt, void *in_max_connect) { struct rpc_cb_add_xprt_calldata *data; struct rpc_task *task; int max_connect = clnt->cl_max_connect; if (in_max_connect) max_connect = *(int *)in_max_connect; if (xps->xps_nunique_destaddr_xprts + 1 > max_connect) { rcu_read_lock(); pr_warn("SUNRPC: reached max allowed number (%d) did not add " "transport to server: %s\n", max_connect, rpc_peeraddr2str(clnt, RPC_DISPLAY_ADDR)); rcu_read_unlock(); return -EINVAL; } data = kmalloc(sizeof(*data), GFP_KERNEL); if (!data) return -ENOMEM; data->xps = xprt_switch_get(xps); data->xprt = xprt_get(xprt); if (rpc_xprt_switch_has_addr(data->xps, (struct sockaddr *)&xprt->addr)) { rpc_cb_add_xprt_release(data); goto success; } task = rpc_call_null_helper(clnt, xprt, NULL, RPC_TASK_ASYNC, &rpc_cb_add_xprt_call_ops, data); if (IS_ERR(task)) return PTR_ERR(task); data->xps->xps_nunique_destaddr_xprts++; rpc_put_task(task); success: return 1; } EXPORT_SYMBOL_GPL(rpc_clnt_test_and_add_xprt); static int rpc_clnt_add_xprt_helper(struct rpc_clnt *clnt, struct rpc_xprt *xprt, struct rpc_add_xprt_test *data) { struct rpc_task *task; int status = -EADDRINUSE; /* Test the connection */ task = rpc_call_null_helper(clnt, xprt, NULL, 0, NULL, NULL); if (IS_ERR(task)) return PTR_ERR(task); status = task->tk_status; rpc_put_task(task); if (status < 0) return status; /* rpc_xprt_switch and rpc_xprt are deferrenced by add_xprt_test() */ data->add_xprt_test(clnt, xprt, data->data); return 0; } /** * rpc_clnt_setup_test_and_add_xprt() * * This is an rpc_clnt_add_xprt setup() function which returns 1 so: * 1) caller of the test function must dereference the rpc_xprt_switch * and the rpc_xprt. * 2) test function must call rpc_xprt_switch_add_xprt, usually in * the rpc_call_done routine. * * Upon success (return of 1), the test function adds the new * transport to the rpc_clnt xprt switch * * @clnt: struct rpc_clnt to get the new transport * @xps: the rpc_xprt_switch to hold the new transport * @xprt: the rpc_xprt to test * @data: a struct rpc_add_xprt_test pointer that holds the test function * and test function call data */ int rpc_clnt_setup_test_and_add_xprt(struct rpc_clnt *clnt, struct rpc_xprt_switch *xps, struct rpc_xprt *xprt, void *data) { int status = -EADDRINUSE; xprt = xprt_get(xprt); xprt_switch_get(xps); if (rpc_xprt_switch_has_addr(xps, (struct sockaddr *)&xprt->addr)) goto out_err; status = rpc_clnt_add_xprt_helper(clnt, xprt, data); if (status < 0) goto out_err; status = 1; out_err: xprt_put(xprt); xprt_switch_put(xps); if (status < 0) pr_info("RPC: rpc_clnt_test_xprt failed: %d addr %s not " "added\n", status, xprt->address_strings[RPC_DISPLAY_ADDR]); /* so that rpc_clnt_add_xprt does not call rpc_xprt_switch_add_xprt */ return status; } EXPORT_SYMBOL_GPL(rpc_clnt_setup_test_and_add_xprt); /** * rpc_clnt_add_xprt - Add a new transport to a rpc_clnt * @clnt: pointer to struct rpc_clnt * @xprtargs: pointer to struct xprt_create * @setup: callback to test and/or set up the connection * @data: pointer to setup function data * * Creates a new transport using the parameters set in args and * adds it to clnt. * If ping is set, then test that connectivity succeeds before * adding the new transport. * */ int rpc_clnt_add_xprt(struct rpc_clnt *clnt, struct xprt_create *xprtargs, int (*setup)(struct rpc_clnt *, struct rpc_xprt_switch *, struct rpc_xprt *, void *), void *data) { struct rpc_xprt_switch *xps; struct rpc_xprt *xprt; unsigned long connect_timeout; unsigned long reconnect_timeout; unsigned char resvport, reuseport; int ret = 0, ident; rcu_read_lock(); xps = xprt_switch_get(rcu_dereference(clnt->cl_xpi.xpi_xpswitch)); xprt = xprt_iter_xprt(&clnt->cl_xpi); if (xps == NULL || xprt == NULL) { rcu_read_unlock(); xprt_switch_put(xps); return -EAGAIN; } resvport = xprt->resvport; reuseport = xprt->reuseport; connect_timeout = xprt->connect_timeout; reconnect_timeout = xprt->max_reconnect_timeout; ident = xprt->xprt_class->ident; rcu_read_unlock(); if (!xprtargs->ident) xprtargs->ident = ident; xprtargs->xprtsec = clnt->cl_xprtsec; xprt = xprt_create_transport(xprtargs); if (IS_ERR(xprt)) { ret = PTR_ERR(xprt); goto out_put_switch; } xprt->resvport = resvport; xprt->reuseport = reuseport; if (xprtargs->connect_timeout) connect_timeout = xprtargs->connect_timeout; if (xprtargs->reconnect_timeout) reconnect_timeout = xprtargs->reconnect_timeout; if (xprt->ops->set_connect_timeout != NULL) xprt->ops->set_connect_timeout(xprt, connect_timeout, reconnect_timeout); rpc_xprt_switch_set_roundrobin(xps); if (setup) { ret = setup(clnt, xps, xprt, data); if (ret != 0) goto out_put_xprt; } rpc_xprt_switch_add_xprt(xps, xprt); out_put_xprt: xprt_put(xprt); out_put_switch: xprt_switch_put(xps); return ret; } EXPORT_SYMBOL_GPL(rpc_clnt_add_xprt); static int rpc_xprt_probe_trunked(struct rpc_clnt *clnt, struct rpc_xprt *xprt, struct rpc_add_xprt_test *data) { struct rpc_xprt *main_xprt; int status = 0; xprt_get(xprt); rcu_read_lock(); main_xprt = xprt_get(rcu_dereference(clnt->cl_xprt)); status = rpc_cmp_addr_port((struct sockaddr *)&xprt->addr, (struct sockaddr *)&main_xprt->addr); rcu_read_unlock(); xprt_put(main_xprt); if (status || !test_bit(XPRT_OFFLINE, &xprt->state)) goto out; status = rpc_clnt_add_xprt_helper(clnt, xprt, data); out: xprt_put(xprt); return status; } /* rpc_clnt_probe_trunked_xprt -- probe offlined transport for session trunking * @clnt rpc_clnt structure * * For each offlined transport found in the rpc_clnt structure call * the function rpc_xprt_probe_trunked() which will determine if this * transport still belongs to the trunking group. */ void rpc_clnt_probe_trunked_xprts(struct rpc_clnt *clnt, struct rpc_add_xprt_test *data) { struct rpc_xprt_iter xpi; int ret; ret = rpc_clnt_xprt_iter_offline_init(clnt, &xpi); if (ret) return; for (;;) { struct rpc_xprt *xprt = xprt_iter_get_next(&xpi); if (!xprt) break; ret = rpc_xprt_probe_trunked(clnt, xprt, data); xprt_put(xprt); if (ret < 0) break; xprt_iter_rewind(&xpi); } xprt_iter_destroy(&xpi); } EXPORT_SYMBOL_GPL(rpc_clnt_probe_trunked_xprts); static int rpc_xprt_offline(struct rpc_clnt *clnt, struct rpc_xprt *xprt, void *data) { struct rpc_xprt *main_xprt; struct rpc_xprt_switch *xps; int err = 0; xprt_get(xprt); rcu_read_lock(); main_xprt = xprt_get(rcu_dereference(clnt->cl_xprt)); xps = xprt_switch_get(rcu_dereference(clnt->cl_xpi.xpi_xpswitch)); err = rpc_cmp_addr_port((struct sockaddr *)&xprt->addr, (struct sockaddr *)&main_xprt->addr); rcu_read_unlock(); xprt_put(main_xprt); if (err) goto out; if (wait_on_bit_lock(&xprt->state, XPRT_LOCKED, TASK_KILLABLE)) { err = -EINTR; goto out; } xprt_set_offline_locked(xprt, xps); xprt_release_write(xprt, NULL); out: xprt_put(xprt); xprt_switch_put(xps); return err; } /* rpc_clnt_manage_trunked_xprts -- offline trunked transports * @clnt rpc_clnt structure * * For each active transport found in the rpc_clnt structure call * the function rpc_xprt_offline() which will identify trunked transports * and will mark them offline. */ void rpc_clnt_manage_trunked_xprts(struct rpc_clnt *clnt) { rpc_clnt_iterate_for_each_xprt(clnt, rpc_xprt_offline, NULL); } EXPORT_SYMBOL_GPL(rpc_clnt_manage_trunked_xprts); struct connect_timeout_data { unsigned long connect_timeout; unsigned long reconnect_timeout; }; static int rpc_xprt_set_connect_timeout(struct rpc_clnt *clnt, struct rpc_xprt *xprt, void *data) { struct connect_timeout_data *timeo = data; if (xprt->ops->set_connect_timeout) xprt->ops->set_connect_timeout(xprt, timeo->connect_timeout, timeo->reconnect_timeout); return 0; } void rpc_set_connect_timeout(struct rpc_clnt *clnt, unsigned long connect_timeout, unsigned long reconnect_timeout) { struct connect_timeout_data timeout = { .connect_timeout = connect_timeout, .reconnect_timeout = reconnect_timeout, }; rpc_clnt_iterate_for_each_xprt(clnt, rpc_xprt_set_connect_timeout, &timeout); } EXPORT_SYMBOL_GPL(rpc_set_connect_timeout); void rpc_clnt_xprt_set_online(struct rpc_clnt *clnt, struct rpc_xprt *xprt) { struct rpc_xprt_switch *xps; xps = rpc_clnt_xprt_switch_get(clnt); xprt_set_online_locked(xprt, xps); xprt_switch_put(xps); } void rpc_clnt_xprt_switch_add_xprt(struct rpc_clnt *clnt, struct rpc_xprt *xprt) { struct rpc_xprt_switch *xps; if (rpc_clnt_xprt_switch_has_addr(clnt, (const struct sockaddr *)&xprt->addr)) { return rpc_clnt_xprt_set_online(clnt, xprt); } xps = rpc_clnt_xprt_switch_get(clnt); rpc_xprt_switch_add_xprt(xps, xprt); xprt_switch_put(xps); } EXPORT_SYMBOL_GPL(rpc_clnt_xprt_switch_add_xprt); void rpc_clnt_xprt_switch_remove_xprt(struct rpc_clnt *clnt, struct rpc_xprt *xprt) { struct rpc_xprt_switch *xps; rcu_read_lock(); xps = rcu_dereference(clnt->cl_xpi.xpi_xpswitch); rpc_xprt_switch_remove_xprt(rcu_dereference(clnt->cl_xpi.xpi_xpswitch), xprt, 0); xps->xps_nunique_destaddr_xprts--; rcu_read_unlock(); } EXPORT_SYMBOL_GPL(rpc_clnt_xprt_switch_remove_xprt); bool rpc_clnt_xprt_switch_has_addr(struct rpc_clnt *clnt, const struct sockaddr *sap) { struct rpc_xprt_switch *xps; bool ret; rcu_read_lock(); xps = rcu_dereference(clnt->cl_xpi.xpi_xpswitch); ret = rpc_xprt_switch_has_addr(xps, sap); rcu_read_unlock(); return ret; } EXPORT_SYMBOL_GPL(rpc_clnt_xprt_switch_has_addr); #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) static void rpc_show_header(void) { printk(KERN_INFO "-pid- flgs status -client- --rqstp- " "-timeout ---ops--\n"); } static void rpc_show_task(const struct rpc_clnt *clnt, const struct rpc_task *task) { const char *rpc_waitq = "none"; if (RPC_IS_QUEUED(task)) rpc_waitq = rpc_qname(task->tk_waitqueue); printk(KERN_INFO "%5u %04x %6d %8p %8p %8ld %8p %sv%u %s a:%ps q:%s\n", task->tk_pid, task->tk_flags, task->tk_status, clnt, task->tk_rqstp, rpc_task_timeout(task), task->tk_ops, clnt->cl_program->name, clnt->cl_vers, rpc_proc_name(task), task->tk_action, rpc_waitq); } void rpc_show_tasks(struct net *net) { struct rpc_clnt *clnt; struct rpc_task *task; int header = 0; struct sunrpc_net *sn = net_generic(net, sunrpc_net_id); spin_lock(&sn->rpc_client_lock); list_for_each_entry(clnt, &sn->all_clients, cl_clients) { spin_lock(&clnt->cl_lock); list_for_each_entry(task, &clnt->cl_tasks, tk_task) { if (!header) { rpc_show_header(); header++; } rpc_show_task(clnt, task); } spin_unlock(&clnt->cl_lock); } spin_unlock(&sn->rpc_client_lock); } #endif #if IS_ENABLED(CONFIG_SUNRPC_SWAP) static int rpc_clnt_swap_activate_callback(struct rpc_clnt *clnt, struct rpc_xprt *xprt, void *dummy) { return xprt_enable_swap(xprt); } int rpc_clnt_swap_activate(struct rpc_clnt *clnt) { while (clnt != clnt->cl_parent) clnt = clnt->cl_parent; if (atomic_inc_return(&clnt->cl_swapper) == 1) return rpc_clnt_iterate_for_each_xprt(clnt, rpc_clnt_swap_activate_callback, NULL); return 0; } EXPORT_SYMBOL_GPL(rpc_clnt_swap_activate); static int rpc_clnt_swap_deactivate_callback(struct rpc_clnt *clnt, struct rpc_xprt *xprt, void *dummy) { xprt_disable_swap(xprt); return 0; } void rpc_clnt_swap_deactivate(struct rpc_clnt *clnt) { while (clnt != clnt->cl_parent) clnt = clnt->cl_parent; if (atomic_dec_if_positive(&clnt->cl_swapper) == 0) rpc_clnt_iterate_for_each_xprt(clnt, rpc_clnt_swap_deactivate_callback, NULL); } EXPORT_SYMBOL_GPL(rpc_clnt_swap_deactivate); #endif /* CONFIG_SUNRPC_SWAP */
13 13 11 11 12 9 10 11 11 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 // SPDX-License-Identifier: GPL-2.0-or-later /* * Cryptographic API. * * RIPEMD-160 - RACE Integrity Primitives Evaluation Message Digest. * * Based on the reference implementation by Antoon Bosselaers, ESAT-COSIC * * Copyright (c) 2008 Adrian-Ken Rueegsegger <ken@codelabs.ch> */ #include <crypto/internal/hash.h> #include <linux/init.h> #include <linux/module.h> #include <linux/mm.h> #include <linux/types.h> #include <asm/byteorder.h> #include "ripemd.h" struct rmd160_ctx { u64 byte_count; u32 state[5]; __le32 buffer[16]; }; #define K1 RMD_K1 #define K2 RMD_K2 #define K3 RMD_K3 #define K4 RMD_K4 #define K5 RMD_K5 #define KK1 RMD_K6 #define KK2 RMD_K7 #define KK3 RMD_K8 #define KK4 RMD_K9 #define KK5 RMD_K1 #define F1(x, y, z) (x ^ y ^ z) /* XOR */ #define F2(x, y, z) (z ^ (x & (y ^ z))) /* x ? y : z */ #define F3(x, y, z) ((x | ~y) ^ z) #define F4(x, y, z) (y ^ (z & (x ^ y))) /* z ? x : y */ #define F5(x, y, z) (x ^ (y | ~z)) #define ROUND(a, b, c, d, e, f, k, x, s) { \ (a) += f((b), (c), (d)) + le32_to_cpup(&(x)) + (k); \ (a) = rol32((a), (s)) + (e); \ (c) = rol32((c), 10); \ } static void rmd160_transform(u32 *state, const __le32 *in) { u32 aa, bb, cc, dd, ee, aaa, bbb, ccc, ddd, eee; /* Initialize left lane */ aa = state[0]; bb = state[1]; cc = state[2]; dd = state[3]; ee = state[4]; /* Initialize right lane */ aaa = state[0]; bbb = state[1]; ccc = state[2]; ddd = state[3]; eee = state[4]; /* round 1: left lane */ ROUND(aa, bb, cc, dd, ee, F1, K1, in[0], 11); ROUND(ee, aa, bb, cc, dd, F1, K1, in[1], 14); ROUND(dd, ee, aa, bb, cc, F1, K1, in[2], 15); ROUND(cc, dd, ee, aa, bb, F1, K1, in[3], 12); ROUND(bb, cc, dd, ee, aa, F1, K1, in[4], 5); ROUND(aa, bb, cc, dd, ee, F1, K1, in[5], 8); ROUND(ee, aa, bb, cc, dd, F1, K1, in[6], 7); ROUND(dd, ee, aa, bb, cc, F1, K1, in[7], 9); ROUND(cc, dd, ee, aa, bb, F1, K1, in[8], 11); ROUND(bb, cc, dd, ee, aa, F1, K1, in[9], 13); ROUND(aa, bb, cc, dd, ee, F1, K1, in[10], 14); ROUND(ee, aa, bb, cc, dd, F1, K1, in[11], 15); ROUND(dd, ee, aa, bb, cc, F1, K1, in[12], 6); ROUND(cc, dd, ee, aa, bb, F1, K1, in[13], 7); ROUND(bb, cc, dd, ee, aa, F1, K1, in[14], 9); ROUND(aa, bb, cc, dd, ee, F1, K1, in[15], 8); /* round 2: left lane" */ ROUND(ee, aa, bb, cc, dd, F2, K2, in[7], 7); ROUND(dd, ee, aa, bb, cc, F2, K2, in[4], 6); ROUND(cc, dd, ee, aa, bb, F2, K2, in[13], 8); ROUND(bb, cc, dd, ee, aa, F2, K2, in[1], 13); ROUND(aa, bb, cc, dd, ee, F2, K2, in[10], 11); ROUND(ee, aa, bb, cc, dd, F2, K2, in[6], 9); ROUND(dd, ee, aa, bb, cc, F2, K2, in[15], 7); ROUND(cc, dd, ee, aa, bb, F2, K2, in[3], 15); ROUND(bb, cc, dd, ee, aa, F2, K2, in[12], 7); ROUND(aa, bb, cc, dd, ee, F2, K2, in[0], 12); ROUND(ee, aa, bb, cc, dd, F2, K2, in[9], 15); ROUND(dd, ee, aa, bb, cc, F2, K2, in[5], 9); ROUND(cc, dd, ee, aa, bb, F2, K2, in[2], 11); ROUND(bb, cc, dd, ee, aa, F2, K2, in[14], 7); ROUND(aa, bb, cc, dd, ee, F2, K2, in[11], 13); ROUND(ee, aa, bb, cc, dd, F2, K2, in[8], 12); /* round 3: left lane" */ ROUND(dd, ee, aa, bb, cc, F3, K3, in[3], 11); ROUND(cc, dd, ee, aa, bb, F3, K3, in[10], 13); ROUND(bb, cc, dd, ee, aa, F3, K3, in[14], 6); ROUND(aa, bb, cc, dd, ee, F3, K3, in[4], 7); ROUND(ee, aa, bb, cc, dd, F3, K3, in[9], 14); ROUND(dd, ee, aa, bb, cc, F3, K3, in[15], 9); ROUND(cc, dd, ee, aa, bb, F3, K3, in[8], 13); ROUND(bb, cc, dd, ee, aa, F3, K3, in[1], 15); ROUND(aa, bb, cc, dd, ee, F3, K3, in[2], 14); ROUND(ee, aa, bb, cc, dd, F3, K3, in[7], 8); ROUND(dd, ee, aa, bb, cc, F3, K3, in[0], 13); ROUND(cc, dd, ee, aa, bb, F3, K3, in[6], 6); ROUND(bb, cc, dd, ee, aa, F3, K3, in[13], 5); ROUND(aa, bb, cc, dd, ee, F3, K3, in[11], 12); ROUND(ee, aa, bb, cc, dd, F3, K3, in[5], 7); ROUND(dd, ee, aa, bb, cc, F3, K3, in[12], 5); /* round 4: left lane" */ ROUND(cc, dd, ee, aa, bb, F4, K4, in[1], 11); ROUND(bb, cc, dd, ee, aa, F4, K4, in[9], 12); ROUND(aa, bb, cc, dd, ee, F4, K4, in[11], 14); ROUND(ee, aa, bb, cc, dd, F4, K4, in[10], 15); ROUND(dd, ee, aa, bb, cc, F4, K4, in[0], 14); ROUND(cc, dd, ee, aa, bb, F4, K4, in[8], 15); ROUND(bb, cc, dd, ee, aa, F4, K4, in[12], 9); ROUND(aa, bb, cc, dd, ee, F4, K4, in[4], 8); ROUND(ee, aa, bb, cc, dd, F4, K4, in[13], 9); ROUND(dd, ee, aa, bb, cc, F4, K4, in[3], 14); ROUND(cc, dd, ee, aa, bb, F4, K4, in[7], 5); ROUND(bb, cc, dd, ee, aa, F4, K4, in[15], 6); ROUND(aa, bb, cc, dd, ee, F4, K4, in[14], 8); ROUND(ee, aa, bb, cc, dd, F4, K4, in[5], 6); ROUND(dd, ee, aa, bb, cc, F4, K4, in[6], 5); ROUND(cc, dd, ee, aa, bb, F4, K4, in[2], 12); /* round 5: left lane" */ ROUND(bb, cc, dd, ee, aa, F5, K5, in[4], 9); ROUND(aa, bb, cc, dd, ee, F5, K5, in[0], 15); ROUND(ee, aa, bb, cc, dd, F5, K5, in[5], 5); ROUND(dd, ee, aa, bb, cc, F5, K5, in[9], 11); ROUND(cc, dd, ee, aa, bb, F5, K5, in[7], 6); ROUND(bb, cc, dd, ee, aa, F5, K5, in[12], 8); ROUND(aa, bb, cc, dd, ee, F5, K5, in[2], 13); ROUND(ee, aa, bb, cc, dd, F5, K5, in[10], 12); ROUND(dd, ee, aa, bb, cc, F5, K5, in[14], 5); ROUND(cc, dd, ee, aa, bb, F5, K5, in[1], 12); ROUND(bb, cc, dd, ee, aa, F5, K5, in[3], 13); ROUND(aa, bb, cc, dd, ee, F5, K5, in[8], 14); ROUND(ee, aa, bb, cc, dd, F5, K5, in[11], 11); ROUND(dd, ee, aa, bb, cc, F5, K5, in[6], 8); ROUND(cc, dd, ee, aa, bb, F5, K5, in[15], 5); ROUND(bb, cc, dd, ee, aa, F5, K5, in[13], 6); /* round 1: right lane */ ROUND(aaa, bbb, ccc, ddd, eee, F5, KK1, in[5], 8); ROUND(eee, aaa, bbb, ccc, ddd, F5, KK1, in[14], 9); ROUND(ddd, eee, aaa, bbb, ccc, F5, KK1, in[7], 9); ROUND(ccc, ddd, eee, aaa, bbb, F5, KK1, in[0], 11); ROUND(bbb, ccc, ddd, eee, aaa, F5, KK1, in[9], 13); ROUND(aaa, bbb, ccc, ddd, eee, F5, KK1, in[2], 15); ROUND(eee, aaa, bbb, ccc, ddd, F5, KK1, in[11], 15); ROUND(ddd, eee, aaa, bbb, ccc, F5, KK1, in[4], 5); ROUND(ccc, ddd, eee, aaa, bbb, F5, KK1, in[13], 7); ROUND(bbb, ccc, ddd, eee, aaa, F5, KK1, in[6], 7); ROUND(aaa, bbb, ccc, ddd, eee, F5, KK1, in[15], 8); ROUND(eee, aaa, bbb, ccc, ddd, F5, KK1, in[8], 11); ROUND(ddd, eee, aaa, bbb, ccc, F5, KK1, in[1], 14); ROUND(ccc, ddd, eee, aaa, bbb, F5, KK1, in[10], 14); ROUND(bbb, ccc, ddd, eee, aaa, F5, KK1, in[3], 12); ROUND(aaa, bbb, ccc, ddd, eee, F5, KK1, in[12], 6); /* round 2: right lane */ ROUND(eee, aaa, bbb, ccc, ddd, F4, KK2, in[6], 9); ROUND(ddd, eee, aaa, bbb, ccc, F4, KK2, in[11], 13); ROUND(ccc, ddd, eee, aaa, bbb, F4, KK2, in[3], 15); ROUND(bbb, ccc, ddd, eee, aaa, F4, KK2, in[7], 7); ROUND(aaa, bbb, ccc, ddd, eee, F4, KK2, in[0], 12); ROUND(eee, aaa, bbb, ccc, ddd, F4, KK2, in[13], 8); ROUND(ddd, eee, aaa, bbb, ccc, F4, KK2, in[5], 9); ROUND(ccc, ddd, eee, aaa, bbb, F4, KK2, in[10], 11); ROUND(bbb, ccc, ddd, eee, aaa, F4, KK2, in[14], 7); ROUND(aaa, bbb, ccc, ddd, eee, F4, KK2, in[15], 7); ROUND(eee, aaa, bbb, ccc, ddd, F4, KK2, in[8], 12); ROUND(ddd, eee, aaa, bbb, ccc, F4, KK2, in[12], 7); ROUND(ccc, ddd, eee, aaa, bbb, F4, KK2, in[4], 6); ROUND(bbb, ccc, ddd, eee, aaa, F4, KK2, in[9], 15); ROUND(aaa, bbb, ccc, ddd, eee, F4, KK2, in[1], 13); ROUND(eee, aaa, bbb, ccc, ddd, F4, KK2, in[2], 11); /* round 3: right lane */ ROUND(ddd, eee, aaa, bbb, ccc, F3, KK3, in[15], 9); ROUND(ccc, ddd, eee, aaa, bbb, F3, KK3, in[5], 7); ROUND(bbb, ccc, ddd, eee, aaa, F3, KK3, in[1], 15); ROUND(aaa, bbb, ccc, ddd, eee, F3, KK3, in[3], 11); ROUND(eee, aaa, bbb, ccc, ddd, F3, KK3, in[7], 8); ROUND(ddd, eee, aaa, bbb, ccc, F3, KK3, in[14], 6); ROUND(ccc, ddd, eee, aaa, bbb, F3, KK3, in[6], 6); ROUND(bbb, ccc, ddd, eee, aaa, F3, KK3, in[9], 14); ROUND(aaa, bbb, ccc, ddd, eee, F3, KK3, in[11], 12); ROUND(eee, aaa, bbb, ccc, ddd, F3, KK3, in[8], 13); ROUND(ddd, eee, aaa, bbb, ccc, F3, KK3, in[12], 5); ROUND(ccc, ddd, eee, aaa, bbb, F3, KK3, in[2], 14); ROUND(bbb, ccc, ddd, eee, aaa, F3, KK3, in[10], 13); ROUND(aaa, bbb, ccc, ddd, eee, F3, KK3, in[0], 13); ROUND(eee, aaa, bbb, ccc, ddd, F3, KK3, in[4], 7); ROUND(ddd, eee, aaa, bbb, ccc, F3, KK3, in[13], 5); /* round 4: right lane */ ROUND(ccc, ddd, eee, aaa, bbb, F2, KK4, in[8], 15); ROUND(bbb, ccc, ddd, eee, aaa, F2, KK4, in[6], 5); ROUND(aaa, bbb, ccc, ddd, eee, F2, KK4, in[4], 8); ROUND(eee, aaa, bbb, ccc, ddd, F2, KK4, in[1], 11); ROUND(ddd, eee, aaa, bbb, ccc, F2, KK4, in[3], 14); ROUND(ccc, ddd, eee, aaa, bbb, F2, KK4, in[11], 14); ROUND(bbb, ccc, ddd, eee, aaa, F2, KK4, in[15], 6); ROUND(aaa, bbb, ccc, ddd, eee, F2, KK4, in[0], 14); ROUND(eee, aaa, bbb, ccc, ddd, F2, KK4, in[5], 6); ROUND(ddd, eee, aaa, bbb, ccc, F2, KK4, in[12], 9); ROUND(ccc, ddd, eee, aaa, bbb, F2, KK4, in[2], 12); ROUND(bbb, ccc, ddd, eee, aaa, F2, KK4, in[13], 9); ROUND(aaa, bbb, ccc, ddd, eee, F2, KK4, in[9], 12); ROUND(eee, aaa, bbb, ccc, ddd, F2, KK4, in[7], 5); ROUND(ddd, eee, aaa, bbb, ccc, F2, KK4, in[10], 15); ROUND(ccc, ddd, eee, aaa, bbb, F2, KK4, in[14], 8); /* round 5: right lane */ ROUND(bbb, ccc, ddd, eee, aaa, F1, KK5, in[12], 8); ROUND(aaa, bbb, ccc, ddd, eee, F1, KK5, in[15], 5); ROUND(eee, aaa, bbb, ccc, ddd, F1, KK5, in[10], 12); ROUND(ddd, eee, aaa, bbb, ccc, F1, KK5, in[4], 9); ROUND(ccc, ddd, eee, aaa, bbb, F1, KK5, in[1], 12); ROUND(bbb, ccc, ddd, eee, aaa, F1, KK5, in[5], 5); ROUND(aaa, bbb, ccc, ddd, eee, F1, KK5, in[8], 14); ROUND(eee, aaa, bbb, ccc, ddd, F1, KK5, in[7], 6); ROUND(ddd, eee, aaa, bbb, ccc, F1, KK5, in[6], 8); ROUND(ccc, ddd, eee, aaa, bbb, F1, KK5, in[2], 13); ROUND(bbb, ccc, ddd, eee, aaa, F1, KK5, in[13], 6); ROUND(aaa, bbb, ccc, ddd, eee, F1, KK5, in[14], 5); ROUND(eee, aaa, bbb, ccc, ddd, F1, KK5, in[0], 15); ROUND(ddd, eee, aaa, bbb, ccc, F1, KK5, in[3], 13); ROUND(ccc, ddd, eee, aaa, bbb, F1, KK5, in[9], 11); ROUND(bbb, ccc, ddd, eee, aaa, F1, KK5, in[11], 11); /* combine results */ ddd += cc + state[1]; /* final result for state[0] */ state[1] = state[2] + dd + eee; state[2] = state[3] + ee + aaa; state[3] = state[4] + aa + bbb; state[4] = state[0] + bb + ccc; state[0] = ddd; } static int rmd160_init(struct shash_desc *desc) { struct rmd160_ctx *rctx = shash_desc_ctx(desc); rctx->byte_count = 0; rctx->state[0] = RMD_H0; rctx->state[1] = RMD_H1; rctx->state[2] = RMD_H2; rctx->state[3] = RMD_H3; rctx->state[4] = RMD_H4; memset(rctx->buffer, 0, sizeof(rctx->buffer)); return 0; } static int rmd160_update(struct shash_desc *desc, const u8 *data, unsigned int len) { struct rmd160_ctx *rctx = shash_desc_ctx(desc); const u32 avail = sizeof(rctx->buffer) - (rctx->byte_count & 0x3f); rctx->byte_count += len; /* Enough space in buffer? If so copy and we're done */ if (avail > len) { memcpy((char *)rctx->buffer + (sizeof(rctx->buffer) - avail), data, len); goto out; } memcpy((char *)rctx->buffer + (sizeof(rctx->buffer) - avail), data, avail); rmd160_transform(rctx->state, rctx->buffer); data += avail; len -= avail; while (len >= sizeof(rctx->buffer)) { memcpy(rctx->buffer, data, sizeof(rctx->buffer)); rmd160_transform(rctx->state, rctx->buffer); data += sizeof(rctx->buffer); len -= sizeof(rctx->buffer); } memcpy(rctx->buffer, data, len); out: return 0; } /* Add padding and return the message digest. */ static int rmd160_final(struct shash_desc *desc, u8 *out) { struct rmd160_ctx *rctx = shash_desc_ctx(desc); u32 i, index, padlen; __le64 bits; __le32 *dst = (__le32 *)out; static const u8 padding[64] = { 0x80, }; bits = cpu_to_le64(rctx->byte_count << 3); /* Pad out to 56 mod 64 */ index = rctx->byte_count & 0x3f; padlen = (index < 56) ? (56 - index) : ((64+56) - index); rmd160_update(desc, padding, padlen); /* Append length */ rmd160_update(desc, (const u8 *)&bits, sizeof(bits)); /* Store state in digest */ for (i = 0; i < 5; i++) dst[i] = cpu_to_le32p(&rctx->state[i]); /* Wipe context */ memset(rctx, 0, sizeof(*rctx)); return 0; } static struct shash_alg alg = { .digestsize = RMD160_DIGEST_SIZE, .init = rmd160_init, .update = rmd160_update, .final = rmd160_final, .descsize = sizeof(struct rmd160_ctx), .base = { .cra_name = "rmd160", .cra_driver_name = "rmd160-generic", .cra_blocksize = RMD160_BLOCK_SIZE, .cra_module = THIS_MODULE, } }; static int __init rmd160_mod_init(void) { return crypto_register_shash(&alg); } static void __exit rmd160_mod_fini(void) { crypto_unregister_shash(&alg); } subsys_initcall(rmd160_mod_init); module_exit(rmd160_mod_fini); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Adrian-Ken Rueegsegger <ken@codelabs.ch>"); MODULE_DESCRIPTION("RIPEMD-160 Message Digest"); MODULE_ALIAS_CRYPTO("rmd160");
52 52 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 /* SPDX-License-Identifier: GPL-2.0 */ /* * IRQ subsystem internal functions and variables: * * Do not ever include this file from anything else than * kernel/irq/. Do not even think about using any information outside * of this file for your non core code. */ #include <linux/irqdesc.h> #include <linux/kernel_stat.h> #include <linux/pm_runtime.h> #include <linux/sched/clock.h> #ifdef CONFIG_SPARSE_IRQ # define MAX_SPARSE_IRQS INT_MAX #else # define MAX_SPARSE_IRQS NR_IRQS #endif #define istate core_internal_state__do_not_mess_with_it extern bool noirqdebug; extern struct irqaction chained_action; /* * Bits used by threaded handlers: * IRQTF_RUNTHREAD - signals that the interrupt handler thread should run * IRQTF_WARNED - warning "IRQ_WAKE_THREAD w/o thread_fn" has been printed * IRQTF_AFFINITY - irq thread is requested to adjust affinity * IRQTF_FORCED_THREAD - irq action is force threaded * IRQTF_READY - signals that irq thread is ready */ enum { IRQTF_RUNTHREAD, IRQTF_WARNED, IRQTF_AFFINITY, IRQTF_FORCED_THREAD, IRQTF_READY, }; /* * Bit masks for desc->core_internal_state__do_not_mess_with_it * * IRQS_AUTODETECT - autodetection in progress * IRQS_SPURIOUS_DISABLED - was disabled due to spurious interrupt * detection * IRQS_POLL_INPROGRESS - polling in progress * IRQS_ONESHOT - irq is not unmasked in primary handler * IRQS_REPLAY - irq has been resent and will not be resent * again until the handler has run and cleared * this flag. * IRQS_WAITING - irq is waiting * IRQS_PENDING - irq needs to be resent and should be resent * at the next available opportunity. * IRQS_SUSPENDED - irq is suspended * IRQS_NMI - irq line is used to deliver NMIs * IRQS_SYSFS - descriptor has been added to sysfs */ enum { IRQS_AUTODETECT = 0x00000001, IRQS_SPURIOUS_DISABLED = 0x00000002, IRQS_POLL_INPROGRESS = 0x00000008, IRQS_ONESHOT = 0x00000020, IRQS_REPLAY = 0x00000040, IRQS_WAITING = 0x00000080, IRQS_PENDING = 0x00000200, IRQS_SUSPENDED = 0x00000800, IRQS_TIMINGS = 0x00001000, IRQS_NMI = 0x00002000, IRQS_SYSFS = 0x00004000, }; #include "debug.h" #include "settings.h" extern int __irq_set_trigger(struct irq_desc *desc, unsigned long flags); extern void __disable_irq(struct irq_desc *desc); extern void __enable_irq(struct irq_desc *desc); #define IRQ_RESEND true #define IRQ_NORESEND false #define IRQ_START_FORCE true #define IRQ_START_COND false extern int irq_activate(struct irq_desc *desc); extern int irq_activate_and_startup(struct irq_desc *desc, bool resend); extern int irq_startup(struct irq_desc *desc, bool resend, bool force); extern void irq_shutdown(struct irq_desc *desc); extern void irq_shutdown_and_deactivate(struct irq_desc *desc); extern void irq_enable(struct irq_desc *desc); extern void irq_disable(struct irq_desc *desc); extern void irq_percpu_enable(struct irq_desc *desc, unsigned int cpu); extern void irq_percpu_disable(struct irq_desc *desc, unsigned int cpu); extern void mask_irq(struct irq_desc *desc); extern void unmask_irq(struct irq_desc *desc); extern void unmask_threaded_irq(struct irq_desc *desc); extern unsigned int kstat_irqs_desc(struct irq_desc *desc, const struct cpumask *cpumask); #ifdef CONFIG_SPARSE_IRQ static inline void irq_mark_irq(unsigned int irq) { } #else extern void irq_mark_irq(unsigned int irq); #endif extern int __irq_get_irqchip_state(struct irq_data *data, enum irqchip_irq_state which, bool *state); irqreturn_t __handle_irq_event_percpu(struct irq_desc *desc); irqreturn_t handle_irq_event_percpu(struct irq_desc *desc); irqreturn_t handle_irq_event(struct irq_desc *desc); /* Resending of interrupts :*/ int check_irq_resend(struct irq_desc *desc, bool inject); void clear_irq_resend(struct irq_desc *desc); void irq_resend_init(struct irq_desc *desc); bool irq_wait_for_poll(struct irq_desc *desc); void __irq_wake_thread(struct irq_desc *desc, struct irqaction *action); void wake_threads_waitq(struct irq_desc *desc); #ifdef CONFIG_PROC_FS extern void register_irq_proc(unsigned int irq, struct irq_desc *desc); extern void unregister_irq_proc(unsigned int irq, struct irq_desc *desc); extern void register_handler_proc(unsigned int irq, struct irqaction *action); extern void unregister_handler_proc(unsigned int irq, struct irqaction *action); #else static inline void register_irq_proc(unsigned int irq, struct irq_desc *desc) { } static inline void unregister_irq_proc(unsigned int irq, struct irq_desc *desc) { } static inline void register_handler_proc(unsigned int irq, struct irqaction *action) { } static inline void unregister_handler_proc(unsigned int irq, struct irqaction *action) { } #endif extern bool irq_can_set_affinity_usr(unsigned int irq); extern void irq_set_thread_affinity(struct irq_desc *desc); extern int irq_do_set_affinity(struct irq_data *data, const struct cpumask *dest, bool force); #ifdef CONFIG_SMP extern int irq_setup_affinity(struct irq_desc *desc); #else static inline int irq_setup_affinity(struct irq_desc *desc) { return 0; } #endif /* Inline functions for support of irq chips on slow busses */ static inline void chip_bus_lock(struct irq_desc *desc) { if (unlikely(desc->irq_data.chip->irq_bus_lock)) desc->irq_data.chip->irq_bus_lock(&desc->irq_data); } static inline void chip_bus_sync_unlock(struct irq_desc *desc) { if (unlikely(desc->irq_data.chip->irq_bus_sync_unlock)) desc->irq_data.chip->irq_bus_sync_unlock(&desc->irq_data); } #define _IRQ_DESC_CHECK (1 << 0) #define _IRQ_DESC_PERCPU (1 << 1) #define IRQ_GET_DESC_CHECK_GLOBAL (_IRQ_DESC_CHECK) #define IRQ_GET_DESC_CHECK_PERCPU (_IRQ_DESC_CHECK | _IRQ_DESC_PERCPU) #define for_each_action_of_desc(desc, act) \ for (act = desc->action; act; act = act->next) struct irq_desc * __irq_get_desc_lock(unsigned int irq, unsigned long *flags, bool bus, unsigned int check); void __irq_put_desc_unlock(struct irq_desc *desc, unsigned long flags, bool bus); static inline struct irq_desc * irq_get_desc_buslock(unsigned int irq, unsigned long *flags, unsigned int check) { return __irq_get_desc_lock(irq, flags, true, check); } static inline void irq_put_desc_busunlock(struct irq_desc *desc, unsigned long flags) { __irq_put_desc_unlock(desc, flags, true); } static inline struct irq_desc * irq_get_desc_lock(unsigned int irq, unsigned long *flags, unsigned int check) { return __irq_get_desc_lock(irq, flags, false, check); } static inline void irq_put_desc_unlock(struct irq_desc *desc, unsigned long flags) { __irq_put_desc_unlock(desc, flags, false); } #define __irqd_to_state(d) ACCESS_PRIVATE((d)->common, state_use_accessors) static inline unsigned int irqd_get(struct irq_data *d) { return __irqd_to_state(d); } /* * Manipulation functions for irq_data.state */ static inline void irqd_set_move_pending(struct irq_data *d) { __irqd_to_state(d) |= IRQD_SETAFFINITY_PENDING; } static inline void irqd_clr_move_pending(struct irq_data *d) { __irqd_to_state(d) &= ~IRQD_SETAFFINITY_PENDING; } static inline void irqd_set_managed_shutdown(struct irq_data *d) { __irqd_to_state(d) |= IRQD_MANAGED_SHUTDOWN; } static inline void irqd_clr_managed_shutdown(struct irq_data *d) { __irqd_to_state(d) &= ~IRQD_MANAGED_SHUTDOWN; } static inline void irqd_clear(struct irq_data *d, unsigned int mask) { __irqd_to_state(d) &= ~mask; } static inline void irqd_set(struct irq_data *d, unsigned int mask) { __irqd_to_state(d) |= mask; } static inline bool irqd_has_set(struct irq_data *d, unsigned int mask) { return __irqd_to_state(d) & mask; } static inline void irq_state_set_disabled(struct irq_desc *desc) { irqd_set(&desc->irq_data, IRQD_IRQ_DISABLED); } static inline void irq_state_set_masked(struct irq_desc *desc) { irqd_set(&desc->irq_data, IRQD_IRQ_MASKED); } #undef __irqd_to_state static inline void __kstat_incr_irqs_this_cpu(struct irq_desc *desc) { __this_cpu_inc(desc->kstat_irqs->cnt); __this_cpu_inc(kstat.irqs_sum); } static inline void kstat_incr_irqs_this_cpu(struct irq_desc *desc) { __kstat_incr_irqs_this_cpu(desc); desc->tot_count++; } static inline int irq_desc_get_node(struct irq_desc *desc) { return irq_common_data_get_node(&desc->irq_common_data); } static inline int irq_desc_is_chained(struct irq_desc *desc) { return (desc->action && desc->action == &chained_action); } static inline bool irq_is_nmi(struct irq_desc *desc) { return desc->istate & IRQS_NMI; } #ifdef CONFIG_PM_SLEEP bool irq_pm_check_wakeup(struct irq_desc *desc); void irq_pm_install_action(struct irq_desc *desc, struct irqaction *action); void irq_pm_remove_action(struct irq_desc *desc, struct irqaction *action); #else static inline bool irq_pm_check_wakeup(struct irq_desc *desc) { return false; } static inline void irq_pm_install_action(struct irq_desc *desc, struct irqaction *action) { } static inline void irq_pm_remove_action(struct irq_desc *desc, struct irqaction *action) { } #endif #ifdef CONFIG_IRQ_TIMINGS #define IRQ_TIMINGS_SHIFT 5 #define IRQ_TIMINGS_SIZE (1 << IRQ_TIMINGS_SHIFT) #define IRQ_TIMINGS_MASK (IRQ_TIMINGS_SIZE - 1) /** * struct irq_timings - irq timings storing structure * @values: a circular buffer of u64 encoded <timestamp,irq> values * @count: the number of elements in the array */ struct irq_timings { u64 values[IRQ_TIMINGS_SIZE]; int count; }; DECLARE_PER_CPU(struct irq_timings, irq_timings); extern void irq_timings_free(int irq); extern int irq_timings_alloc(int irq); static inline void irq_remove_timings(struct irq_desc *desc) { desc->istate &= ~IRQS_TIMINGS; irq_timings_free(irq_desc_get_irq(desc)); } static inline void irq_setup_timings(struct irq_desc *desc, struct irqaction *act) { int irq = irq_desc_get_irq(desc); int ret; /* * We don't need the measurement because the idle code already * knows the next expiry event. */ if (act->flags & __IRQF_TIMER) return; /* * In case the timing allocation fails, we just want to warn, * not fail, so letting the system boot anyway. */ ret = irq_timings_alloc(irq); if (ret) { pr_warn("Failed to allocate irq timing stats for irq%d (%d)", irq, ret); return; } desc->istate |= IRQS_TIMINGS; } extern void irq_timings_enable(void); extern void irq_timings_disable(void); DECLARE_STATIC_KEY_FALSE(irq_timing_enabled); /* * The interrupt number and the timestamp are encoded into a single * u64 variable to optimize the size. * 48 bit time stamp and 16 bit IRQ number is way sufficient. * Who cares an IRQ after 78 hours of idle time? */ static inline u64 irq_timing_encode(u64 timestamp, int irq) { return (timestamp << 16) | irq; } static inline int irq_timing_decode(u64 value, u64 *timestamp) { *timestamp = value >> 16; return value & U16_MAX; } static __always_inline void irq_timings_push(u64 ts, int irq) { struct irq_timings *timings = this_cpu_ptr(&irq_timings); timings->values[timings->count & IRQ_TIMINGS_MASK] = irq_timing_encode(ts, irq); timings->count++; } /* * The function record_irq_time is only called in one place in the * interrupts handler. We want this function always inline so the code * inside is embedded in the function and the static key branching * code can act at the higher level. Without the explicit * __always_inline we can end up with a function call and a small * overhead in the hotpath for nothing. */ static __always_inline void record_irq_time(struct irq_desc *desc) { if (!static_branch_likely(&irq_timing_enabled)) return; if (desc->istate & IRQS_TIMINGS) irq_timings_push(local_clock(), irq_desc_get_irq(desc)); } #else static inline void irq_remove_timings(struct irq_desc *desc) {} static inline void irq_setup_timings(struct irq_desc *desc, struct irqaction *act) {}; static inline void record_irq_time(struct irq_desc *desc) {} #endif /* CONFIG_IRQ_TIMINGS */ #ifdef CONFIG_GENERIC_IRQ_CHIP void irq_init_generic_chip(struct irq_chip_generic *gc, const char *name, int num_ct, unsigned int irq_base, void __iomem *reg_base, irq_flow_handler_t handler); #else static inline void irq_init_generic_chip(struct irq_chip_generic *gc, const char *name, int num_ct, unsigned int irq_base, void __iomem *reg_base, irq_flow_handler_t handler) { } #endif /* CONFIG_GENERIC_IRQ_CHIP */ #ifdef CONFIG_GENERIC_PENDING_IRQ static inline bool irq_can_move_pcntxt(struct irq_data *data) { return irqd_can_move_in_process_context(data); } static inline bool irq_move_pending(struct irq_data *data) { return irqd_is_setaffinity_pending(data); } static inline void irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask) { cpumask_copy(desc->pending_mask, mask); } static inline void irq_get_pending(struct cpumask *mask, struct irq_desc *desc) { cpumask_copy(mask, desc->pending_mask); } static inline struct cpumask *irq_desc_get_pending_mask(struct irq_desc *desc) { return desc->pending_mask; } static inline bool handle_enforce_irqctx(struct irq_data *data) { return irqd_is_handle_enforce_irqctx(data); } bool irq_fixup_move_pending(struct irq_desc *desc, bool force_clear); #else /* CONFIG_GENERIC_PENDING_IRQ */ static inline bool irq_can_move_pcntxt(struct irq_data *data) { return true; } static inline bool irq_move_pending(struct irq_data *data) { return false; } static inline void irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask) { } static inline void irq_get_pending(struct cpumask *mask, struct irq_desc *desc) { } static inline struct cpumask *irq_desc_get_pending_mask(struct irq_desc *desc) { return NULL; } static inline bool irq_fixup_move_pending(struct irq_desc *desc, bool fclear) { return false; } static inline bool handle_enforce_irqctx(struct irq_data *data) { return false; } #endif /* !CONFIG_GENERIC_PENDING_IRQ */ #if !defined(CONFIG_IRQ_DOMAIN) || !defined(CONFIG_IRQ_DOMAIN_HIERARCHY) static inline int irq_domain_activate_irq(struct irq_data *data, bool reserve) { irqd_set_activated(data); return 0; } static inline void irq_domain_deactivate_irq(struct irq_data *data) { irqd_clr_activated(data); } #endif static inline struct irq_data *irqd_get_parent_data(struct irq_data *irqd) { #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY return irqd->parent_data; #else return NULL; #endif } #ifdef CONFIG_GENERIC_IRQ_DEBUGFS #include <linux/debugfs.h> struct irq_bit_descr { unsigned int mask; char *name; }; #define BIT_MASK_DESCR(m) { .mask = m, .name = #m } void irq_debug_show_bits(struct seq_file *m, int ind, unsigned int state, const struct irq_bit_descr *sd, int size); void irq_add_debugfs_entry(unsigned int irq, struct irq_desc *desc); static inline void irq_remove_debugfs_entry(struct irq_desc *desc) { debugfs_remove(desc->debugfs_file); kfree(desc->dev_name); } void irq_debugfs_copy_devname(int irq, struct device *dev); # ifdef CONFIG_IRQ_DOMAIN void irq_domain_debugfs_init(struct dentry *root); # else static inline void irq_domain_debugfs_init(struct dentry *root) { } # endif #else /* CONFIG_GENERIC_IRQ_DEBUGFS */ static inline void irq_add_debugfs_entry(unsigned int irq, struct irq_desc *d) { } static inline void irq_remove_debugfs_entry(struct irq_desc *d) { } static inline void irq_debugfs_copy_devname(int irq, struct device *dev) { } #endif /* CONFIG_GENERIC_IRQ_DEBUGFS */
20 3 37 19 19 19 366 366 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_PTRACE_H #define _LINUX_PTRACE_H #include <linux/compiler.h> /* For unlikely. */ #include <linux/sched.h> /* For struct task_struct. */ #include <linux/sched/signal.h> /* For send_sig(), same_thread_group(), etc. */ #include <linux/err.h> /* for IS_ERR_VALUE */ #include <linux/bug.h> /* For BUG_ON. */ #include <linux/pid_namespace.h> /* For task_active_pid_ns. */ #include <uapi/linux/ptrace.h> #include <linux/seccomp.h> /* Add sp to seccomp_data, as seccomp is user API, we don't want to modify it */ struct syscall_info { __u64 sp; struct seccomp_data data; }; extern int ptrace_access_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, unsigned int gup_flags); /* * Ptrace flags * * The owner ship rules for task->ptrace which holds the ptrace * flags is simple. When a task is running it owns it's task->ptrace * flags. When the a task is stopped the ptracer owns task->ptrace. */ #define PT_SEIZED 0x00010000 /* SEIZE used, enable new behavior */ #define PT_PTRACED 0x00000001 #define PT_OPT_FLAG_SHIFT 3 /* PT_TRACE_* event enable flags */ #define PT_EVENT_FLAG(event) (1 << (PT_OPT_FLAG_SHIFT + (event))) #define PT_TRACESYSGOOD PT_EVENT_FLAG(0) #define PT_TRACE_FORK PT_EVENT_FLAG(PTRACE_EVENT_FORK) #define PT_TRACE_VFORK PT_EVENT_FLAG(PTRACE_EVENT_VFORK) #define PT_TRACE_CLONE PT_EVENT_FLAG(PTRACE_EVENT_CLONE) #define PT_TRACE_EXEC PT_EVENT_FLAG(PTRACE_EVENT_EXEC) #define PT_TRACE_VFORK_DONE PT_EVENT_FLAG(PTRACE_EVENT_VFORK_DONE) #define PT_TRACE_EXIT PT_EVENT_FLAG(PTRACE_EVENT_EXIT) #define PT_TRACE_SECCOMP PT_EVENT_FLAG(PTRACE_EVENT_SECCOMP) #define PT_EXITKILL (PTRACE_O_EXITKILL << PT_OPT_FLAG_SHIFT) #define PT_SUSPEND_SECCOMP (PTRACE_O_SUSPEND_SECCOMP << PT_OPT_FLAG_SHIFT) extern long arch_ptrace(struct task_struct *child, long request, unsigned long addr, unsigned long data); extern int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len); extern int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long dst, int len); extern void ptrace_disable(struct task_struct *); extern int ptrace_request(struct task_struct *child, long request, unsigned long addr, unsigned long data); extern int ptrace_notify(int exit_code, unsigned long message); extern void __ptrace_link(struct task_struct *child, struct task_struct *new_parent, const struct cred *ptracer_cred); extern void __ptrace_unlink(struct task_struct *child); extern void exit_ptrace(struct task_struct *tracer, struct list_head *dead); #define PTRACE_MODE_READ 0x01 #define PTRACE_MODE_ATTACH 0x02 #define PTRACE_MODE_NOAUDIT 0x04 #define PTRACE_MODE_FSCREDS 0x08 #define PTRACE_MODE_REALCREDS 0x10 /* shorthands for READ/ATTACH and FSCREDS/REALCREDS combinations */ #define PTRACE_MODE_READ_FSCREDS (PTRACE_MODE_READ | PTRACE_MODE_FSCREDS) #define PTRACE_MODE_READ_REALCREDS (PTRACE_MODE_READ | PTRACE_MODE_REALCREDS) #define PTRACE_MODE_ATTACH_FSCREDS (PTRACE_MODE_ATTACH | PTRACE_MODE_FSCREDS) #define PTRACE_MODE_ATTACH_REALCREDS (PTRACE_MODE_ATTACH | PTRACE_MODE_REALCREDS) /** * ptrace_may_access - check whether the caller is permitted to access * a target task. * @task: target task * @mode: selects type of access and caller credentials * * Returns true on success, false on denial. * * One of the flags PTRACE_MODE_FSCREDS and PTRACE_MODE_REALCREDS must * be set in @mode to specify whether the access was requested through * a filesystem syscall (should use effective capabilities and fsuid * of the caller) or through an explicit syscall such as * process_vm_writev or ptrace (and should use the real credentials). */ extern bool ptrace_may_access(struct task_struct *task, unsigned int mode); static inline int ptrace_reparented(struct task_struct *child) { return !same_thread_group(child->real_parent, child->parent); } static inline void ptrace_unlink(struct task_struct *child) { if (unlikely(child->ptrace)) __ptrace_unlink(child); } int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr, unsigned long data); int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr, unsigned long data); /** * ptrace_parent - return the task that is tracing the given task * @task: task to consider * * Returns %NULL if no one is tracing @task, or the &struct task_struct * pointer to its tracer. * * Must called under rcu_read_lock(). The pointer returned might be kept * live only by RCU. During exec, this may be called with task_lock() held * on @task, still held from when check_unsafe_exec() was called. */ static inline struct task_struct *ptrace_parent(struct task_struct *task) { if (unlikely(task->ptrace)) return rcu_dereference(task->parent); return NULL; } /** * ptrace_event_enabled - test whether a ptrace event is enabled * @task: ptracee of interest * @event: %PTRACE_EVENT_* to test * * Test whether @event is enabled for ptracee @task. * * Returns %true if @event is enabled, %false otherwise. */ static inline bool ptrace_event_enabled(struct task_struct *task, int event) { return task->ptrace & PT_EVENT_FLAG(event); } /** * ptrace_event - possibly stop for a ptrace event notification * @event: %PTRACE_EVENT_* value to report * @message: value for %PTRACE_GETEVENTMSG to return * * Check whether @event is enabled and, if so, report @event and @message * to the ptrace parent. * * Called without locks. */ static inline void ptrace_event(int event, unsigned long message) { if (unlikely(ptrace_event_enabled(current, event))) { ptrace_notify((event << 8) | SIGTRAP, message); } else if (event == PTRACE_EVENT_EXEC) { /* legacy EXEC report via SIGTRAP */ if ((current->ptrace & (PT_PTRACED|PT_SEIZED)) == PT_PTRACED) send_sig(SIGTRAP, current, 0); } } /** * ptrace_event_pid - possibly stop for a ptrace event notification * @event: %PTRACE_EVENT_* value to report * @pid: process identifier for %PTRACE_GETEVENTMSG to return * * Check whether @event is enabled and, if so, report @event and @pid * to the ptrace parent. @pid is reported as the pid_t seen from the * ptrace parent's pid namespace. * * Called without locks. */ static inline void ptrace_event_pid(int event, struct pid *pid) { /* * FIXME: There's a potential race if a ptracer in a different pid * namespace than parent attaches between computing message below and * when we acquire tasklist_lock in ptrace_stop(). If this happens, * the ptracer will get a bogus pid from PTRACE_GETEVENTMSG. */ unsigned long message = 0; struct pid_namespace *ns; rcu_read_lock(); ns = task_active_pid_ns(rcu_dereference(current->parent)); if (ns) message = pid_nr_ns(pid, ns); rcu_read_unlock(); ptrace_event(event, message); } /** * ptrace_init_task - initialize ptrace state for a new child * @child: new child task * @ptrace: true if child should be ptrace'd by parent's tracer * * This is called immediately after adding @child to its parent's children * list. @ptrace is false in the normal case, and true to ptrace @child. * * Called with current's siglock and write_lock_irq(&tasklist_lock) held. */ static inline void ptrace_init_task(struct task_struct *child, bool ptrace) { INIT_LIST_HEAD(&child->ptrace_entry); INIT_LIST_HEAD(&child->ptraced); child->jobctl = 0; child->ptrace = 0; child->parent = child->real_parent; if (unlikely(ptrace) && current->ptrace) { child->ptrace = current->ptrace; __ptrace_link(child, current->parent, current->ptracer_cred); if (child->ptrace & PT_SEIZED) task_set_jobctl_pending(child, JOBCTL_TRAP_STOP); else sigaddset(&child->pending.signal, SIGSTOP); } else child->ptracer_cred = NULL; } /** * ptrace_release_task - final ptrace-related cleanup of a zombie being reaped * @task: task in %EXIT_DEAD state * * Called with write_lock(&tasklist_lock) held. */ static inline void ptrace_release_task(struct task_struct *task) { BUG_ON(!list_empty(&task->ptraced)); ptrace_unlink(task); BUG_ON(!list_empty(&task->ptrace_entry)); } #ifndef force_successful_syscall_return /* * System call handlers that, upon successful completion, need to return a * negative value should call force_successful_syscall_return() right before * returning. On architectures where the syscall convention provides for a * separate error flag (e.g., alpha, ia64, ppc{,64}, sparc{,64}, possibly * others), this macro can be used to ensure that the error flag will not get * set. On architectures which do not support a separate error flag, the macro * is a no-op and the spurious error condition needs to be filtered out by some * other means (e.g., in user-level, by passing an extra argument to the * syscall handler, or something along those lines). */ #define force_successful_syscall_return() do { } while (0) #endif #ifndef is_syscall_success /* * On most systems we can tell if a syscall is a success based on if the retval * is an error value. On some systems like ia64 and powerpc they have different * indicators of success/failure and must define their own. */ #define is_syscall_success(regs) (!IS_ERR_VALUE((unsigned long)(regs_return_value(regs)))) #endif /* * <asm/ptrace.h> should define the following things inside #ifdef __KERNEL__. * * These do-nothing inlines are used when the arch does not * implement single-step. The kerneldoc comments are here * to document the interface for all arch definitions. */ #ifndef arch_has_single_step /** * arch_has_single_step - does this CPU support user-mode single-step? * * If this is defined, then there must be function declarations or * inlines for user_enable_single_step() and user_disable_single_step(). * arch_has_single_step() should evaluate to nonzero iff the machine * supports instruction single-step for user mode. * It can be a constant or it can test a CPU feature bit. */ #define arch_has_single_step() (0) /** * user_enable_single_step - single-step in user-mode task * @task: either current or a task stopped in %TASK_TRACED * * This can only be called when arch_has_single_step() has returned nonzero. * Set @task so that when it returns to user mode, it will trap after the * next single instruction executes. If arch_has_block_step() is defined, * this must clear the effects of user_enable_block_step() too. */ static inline void user_enable_single_step(struct task_struct *task) { BUG(); /* This can never be called. */ } /** * user_disable_single_step - cancel user-mode single-step * @task: either current or a task stopped in %TASK_TRACED * * Clear @task of the effects of user_enable_single_step() and * user_enable_block_step(). This can be called whether or not either * of those was ever called on @task, and even if arch_has_single_step() * returned zero. */ static inline void user_disable_single_step(struct task_struct *task) { } #else extern void user_enable_single_step(struct task_struct *); extern void user_disable_single_step(struct task_struct *); #endif /* arch_has_single_step */ #ifndef arch_has_block_step /** * arch_has_block_step - does this CPU support user-mode block-step? * * If this is defined, then there must be a function declaration or inline * for user_enable_block_step(), and arch_has_single_step() must be defined * too. arch_has_block_step() should evaluate to nonzero iff the machine * supports step-until-branch for user mode. It can be a constant or it * can test a CPU feature bit. */ #define arch_has_block_step() (0) /** * user_enable_block_step - step until branch in user-mode task * @task: either current or a task stopped in %TASK_TRACED * * This can only be called when arch_has_block_step() has returned nonzero, * and will never be called when single-instruction stepping is being used. * Set @task so that when it returns to user mode, it will trap after the * next branch or trap taken. */ static inline void user_enable_block_step(struct task_struct *task) { BUG(); /* This can never be called. */ } #else extern void user_enable_block_step(struct task_struct *); #endif /* arch_has_block_step */ #ifdef ARCH_HAS_USER_SINGLE_STEP_REPORT extern void user_single_step_report(struct pt_regs *regs); #else static inline void user_single_step_report(struct pt_regs *regs) { kernel_siginfo_t info; clear_siginfo(&info); info.si_signo = SIGTRAP; info.si_errno = 0; info.si_code = SI_USER; info.si_pid = 0; info.si_uid = 0; force_sig_info(&info); } #endif #ifndef arch_ptrace_stop_needed /** * arch_ptrace_stop_needed - Decide whether arch_ptrace_stop() should be called * * This is called with the siglock held, to decide whether or not it's * necessary to release the siglock and call arch_ptrace_stop(). It can be * defined to a constant if arch_ptrace_stop() is never required, or always * is. On machines where this makes sense, it should be defined to a quick * test to optimize out calling arch_ptrace_stop() when it would be * superfluous. For example, if the thread has not been back to user mode * since the last stop, the thread state might indicate that nothing needs * to be done. * * This is guaranteed to be invoked once before a task stops for ptrace and * may include arch-specific operations necessary prior to a ptrace stop. */ #define arch_ptrace_stop_needed() (0) #endif #ifndef arch_ptrace_stop /** * arch_ptrace_stop - Do machine-specific work before stopping for ptrace * * This is called with no locks held when arch_ptrace_stop_needed() has * just returned nonzero. It is allowed to block, e.g. for user memory * access. The arch can have machine-specific work to be done before * ptrace stops. On ia64, register backing store gets written back to user * memory here. Since this can be costly (requires dropping the siglock), * we only do it when the arch requires it for this particular stop, as * indicated by arch_ptrace_stop_needed(). */ #define arch_ptrace_stop() do { } while (0) #endif #ifndef current_pt_regs #define current_pt_regs() task_pt_regs(current) #endif #ifndef current_user_stack_pointer #define current_user_stack_pointer() user_stack_pointer(current_pt_regs()) #endif #ifndef exception_ip #define exception_ip(x) instruction_pointer(x) #endif extern int task_current_syscall(struct task_struct *target, struct syscall_info *info); extern void sigaction_compat_abi(struct k_sigaction *act, struct k_sigaction *oact); /* * ptrace report for syscall entry and exit looks identical. */ static inline int ptrace_report_syscall(unsigned long message) { int ptrace = current->ptrace; int signr; if (!(ptrace & PT_PTRACED)) return 0; signr = ptrace_notify(SIGTRAP | ((ptrace & PT_TRACESYSGOOD) ? 0x80 : 0), message); /* * this isn't the same as continuing with a signal, but it will do * for normal use. strace only continues with a signal if the * stopping signal is not SIGTRAP. -brl */ if (signr) send_sig(signr, current, 1); return fatal_signal_pending(current); } /** * ptrace_report_syscall_entry - task is about to attempt a system call * @regs: user register state of current task * * This will be called if %SYSCALL_WORK_SYSCALL_TRACE or * %SYSCALL_WORK_SYSCALL_EMU have been set, when the current task has just * entered the kernel for a system call. Full user register state is * available here. Changing the values in @regs can affect the system * call number and arguments to be tried. It is safe to block here, * preventing the system call from beginning. * * Returns zero normally, or nonzero if the calling arch code should abort * the system call. That must prevent normal entry so no system call is * made. If @task ever returns to user mode after this, its register state * is unspecified, but should be something harmless like an %ENOSYS error * return. It should preserve enough information so that syscall_rollback() * can work (see asm-generic/syscall.h). * * Called without locks, just after entering kernel mode. */ static inline __must_check int ptrace_report_syscall_entry( struct pt_regs *regs) { return ptrace_report_syscall(PTRACE_EVENTMSG_SYSCALL_ENTRY); } /** * ptrace_report_syscall_exit - task has just finished a system call * @regs: user register state of current task * @step: nonzero if simulating single-step or block-step * * This will be called if %SYSCALL_WORK_SYSCALL_TRACE has been set, when * the current task has just finished an attempted system call. Full * user register state is available here. It is safe to block here, * preventing signals from being processed. * * If @step is nonzero, this report is also in lieu of the normal * trap that would follow the system call instruction because * user_enable_block_step() or user_enable_single_step() was used. * In this case, %SYSCALL_WORK_SYSCALL_TRACE might not be set. * * Called without locks, just before checking for pending signals. */ static inline void ptrace_report_syscall_exit(struct pt_regs *regs, int step) { if (step) user_single_step_report(regs); else ptrace_report_syscall(PTRACE_EVENTMSG_SYSCALL_EXIT); } #endif
1 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 // SPDX-License-Identifier: GPL-2.0-or-later /* * locks.c * * Userspace file locking support * * Copyright (C) 2007 Oracle. All rights reserved. */ #include <linux/fs.h> #include <linux/filelock.h> #include <linux/fcntl.h> #include <cluster/masklog.h> #include "ocfs2.h" #include "dlmglue.h" #include "file.h" #include "inode.h" #include "locks.h" static int ocfs2_do_flock(struct file *file, struct inode *inode, int cmd, struct file_lock *fl) { int ret = 0, level = 0, trylock = 0; struct ocfs2_file_private *fp = file->private_data; struct ocfs2_lock_res *lockres = &fp->fp_flock; if (lock_is_write(fl)) level = 1; if (!IS_SETLKW(cmd)) trylock = 1; mutex_lock(&fp->fp_mutex); if (lockres->l_flags & OCFS2_LOCK_ATTACHED && lockres->l_level > LKM_NLMODE) { int old_level = 0; struct file_lock request; if (lockres->l_level == LKM_EXMODE) old_level = 1; if (level == old_level) goto out; /* * Converting an existing lock is not guaranteed to be * atomic, so we can get away with simply unlocking * here and allowing the lock code to try at the new * level. */ locks_init_lock(&request); request.c.flc_type = F_UNLCK; request.c.flc_flags = FL_FLOCK; locks_lock_file_wait(file, &request); ocfs2_file_unlock(file); } ret = ocfs2_file_lock(file, level, trylock); if (ret) { if (ret == -EAGAIN && trylock) ret = -EWOULDBLOCK; else mlog_errno(ret); goto out; } ret = locks_lock_file_wait(file, fl); if (ret) ocfs2_file_unlock(file); out: mutex_unlock(&fp->fp_mutex); return ret; } static int ocfs2_do_funlock(struct file *file, int cmd, struct file_lock *fl) { int ret; struct ocfs2_file_private *fp = file->private_data; mutex_lock(&fp->fp_mutex); ocfs2_file_unlock(file); ret = locks_lock_file_wait(file, fl); mutex_unlock(&fp->fp_mutex); return ret; } /* * Overall flow of ocfs2_flock() was influenced by gfs2_flock(). */ int ocfs2_flock(struct file *file, int cmd, struct file_lock *fl) { struct inode *inode = file->f_mapping->host; struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); if (!(fl->c.flc_flags & FL_FLOCK)) return -ENOLCK; if ((osb->s_mount_opt & OCFS2_MOUNT_LOCALFLOCKS) || ocfs2_mount_local(osb)) return locks_lock_file_wait(file, fl); if (lock_is_unlock(fl)) return ocfs2_do_funlock(file, cmd, fl); else return ocfs2_do_flock(file, inode, cmd, fl); } int ocfs2_lock(struct file *file, int cmd, struct file_lock *fl) { struct inode *inode = file->f_mapping->host; struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); if (!(fl->c.flc_flags & FL_POSIX)) return -ENOLCK; return ocfs2_plock(osb->cconn, OCFS2_I(inode)->ip_blkno, file, cmd, fl); }
1253 228 3 116 206 3 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_KSM_H #define __LINUX_KSM_H /* * Memory merging support. * * This code enables dynamic sharing of identical pages found in different * memory areas, even if they are not shared by fork(). */ #include <linux/bitops.h> #include <linux/mm.h> #include <linux/pagemap.h> #include <linux/rmap.h> #include <linux/sched.h> #ifdef CONFIG_KSM int ksm_madvise(struct vm_area_struct *vma, unsigned long start, unsigned long end, int advice, unsigned long *vm_flags); void ksm_add_vma(struct vm_area_struct *vma); int ksm_enable_merge_any(struct mm_struct *mm); int ksm_disable_merge_any(struct mm_struct *mm); int ksm_disable(struct mm_struct *mm); int __ksm_enter(struct mm_struct *mm); void __ksm_exit(struct mm_struct *mm); /* * To identify zeropages that were mapped by KSM, we reuse the dirty bit * in the PTE. If the PTE is dirty, the zeropage was mapped by KSM when * deduplicating memory. */ #define is_ksm_zero_pte(pte) (is_zero_pfn(pte_pfn(pte)) && pte_dirty(pte)) extern atomic_long_t ksm_zero_pages; static inline void ksm_map_zero_page(struct mm_struct *mm) { atomic_long_inc(&ksm_zero_pages); atomic_long_inc(&mm->ksm_zero_pages); } static inline void ksm_might_unmap_zero_page(struct mm_struct *mm, pte_t pte) { if (is_ksm_zero_pte(pte)) { atomic_long_dec(&ksm_zero_pages); atomic_long_dec(&mm->ksm_zero_pages); } } static inline long mm_ksm_zero_pages(struct mm_struct *mm) { return atomic_long_read(&mm->ksm_zero_pages); } static inline void ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm) { /* Adding mm to ksm is best effort on fork. */ if (test_bit(MMF_VM_MERGEABLE, &oldmm->flags)) __ksm_enter(mm); } static inline int ksm_execve(struct mm_struct *mm) { if (test_bit(MMF_VM_MERGE_ANY, &mm->flags)) return __ksm_enter(mm); return 0; } static inline void ksm_exit(struct mm_struct *mm) { if (test_bit(MMF_VM_MERGEABLE, &mm->flags)) __ksm_exit(mm); } /* * When do_swap_page() first faults in from swap what used to be a KSM page, * no problem, it will be assigned to this vma's anon_vma; but thereafter, * it might be faulted into a different anon_vma (or perhaps to a different * offset in the same anon_vma). do_swap_page() cannot do all the locking * needed to reconstitute a cross-anon_vma KSM page: for now it has to make * a copy, and leave remerging the pages to a later pass of ksmd. * * We'd like to make this conditional on vma->vm_flags & VM_MERGEABLE, * but what if the vma was unmerged while the page was swapped out? */ struct folio *ksm_might_need_to_copy(struct folio *folio, struct vm_area_struct *vma, unsigned long addr); void rmap_walk_ksm(struct folio *folio, struct rmap_walk_control *rwc); void folio_migrate_ksm(struct folio *newfolio, struct folio *folio); void collect_procs_ksm(const struct folio *folio, const struct page *page, struct list_head *to_kill, int force_early); long ksm_process_profit(struct mm_struct *); #else /* !CONFIG_KSM */ static inline void ksm_add_vma(struct vm_area_struct *vma) { } static inline int ksm_disable(struct mm_struct *mm) { return 0; } static inline void ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm) { } static inline int ksm_execve(struct mm_struct *mm) { return 0; } static inline void ksm_exit(struct mm_struct *mm) { } static inline void ksm_might_unmap_zero_page(struct mm_struct *mm, pte_t pte) { } static inline void collect_procs_ksm(const struct folio *folio, const struct page *page, struct list_head *to_kill, int force_early) { } #ifdef CONFIG_MMU static inline int ksm_madvise(struct vm_area_struct *vma, unsigned long start, unsigned long end, int advice, unsigned long *vm_flags) { return 0; } static inline struct folio *ksm_might_need_to_copy(struct folio *folio, struct vm_area_struct *vma, unsigned long addr) { return folio; } static inline void rmap_walk_ksm(struct folio *folio, struct rmap_walk_control *rwc) { } static inline void folio_migrate_ksm(struct folio *newfolio, struct folio *old) { } #endif /* CONFIG_MMU */ #endif /* !CONFIG_KSM */ #endif /* __LINUX_KSM_H */
1 17 27 5 24 14 14 16 16 1 5 5 5 5 3 2 20 20 13 14 16 20 1 5 5 5 10 10 9 1 10 10 6 9 18 18 2 18 18 2 2 21 21 10 9 10 1 8 1 10 1 1 19 3 15 16 3 10 14 14 4 10 5 5 5 4 5 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 // SPDX-License-Identifier: GPL-2.0 /* * linux/fs/sysv/itree.c * * Handling of indirect blocks' trees. * AV, Sep--Dec 2000 */ #include <linux/buffer_head.h> #include <linux/mount.h> #include <linux/mpage.h> #include <linux/string.h> #include "sysv.h" enum {DIRECT = 10, DEPTH = 4}; /* Have triple indirect */ static inline void dirty_indirect(struct buffer_head *bh, struct inode *inode) { mark_buffer_dirty_inode(bh, inode); if (IS_SYNC(inode)) sync_dirty_buffer(bh); } static int block_to_path(struct inode *inode, long block, int offsets[DEPTH]) { struct super_block *sb = inode->i_sb; struct sysv_sb_info *sbi = SYSV_SB(sb); int ptrs_bits = sbi->s_ind_per_block_bits; unsigned long indirect_blocks = sbi->s_ind_per_block, double_blocks = sbi->s_ind_per_block_2; int n = 0; if (block < 0) { printk("sysv_block_map: block < 0\n"); } else if (block < DIRECT) { offsets[n++] = block; } else if ( (block -= DIRECT) < indirect_blocks) { offsets[n++] = DIRECT; offsets[n++] = block; } else if ((block -= indirect_blocks) < double_blocks) { offsets[n++] = DIRECT+1; offsets[n++] = block >> ptrs_bits; offsets[n++] = block & (indirect_blocks - 1); } else if (((block -= double_blocks) >> (ptrs_bits * 2)) < indirect_blocks) { offsets[n++] = DIRECT+2; offsets[n++] = block >> (ptrs_bits * 2); offsets[n++] = (block >> ptrs_bits) & (indirect_blocks - 1); offsets[n++] = block & (indirect_blocks - 1); } else { /* nothing */; } return n; } static inline int block_to_cpu(struct sysv_sb_info *sbi, sysv_zone_t nr) { return sbi->s_block_base + fs32_to_cpu(sbi, nr); } typedef struct { sysv_zone_t *p; sysv_zone_t key; struct buffer_head *bh; } Indirect; static DEFINE_RWLOCK(pointers_lock); static inline void add_chain(Indirect *p, struct buffer_head *bh, sysv_zone_t *v) { p->key = *(p->p = v); p->bh = bh; } static inline int verify_chain(Indirect *from, Indirect *to) { while (from <= to && from->key == *from->p) from++; return (from > to); } static inline sysv_zone_t *block_end(struct buffer_head *bh) { return (sysv_zone_t*)((char*)bh->b_data + bh->b_size); } static Indirect *get_branch(struct inode *inode, int depth, int offsets[], Indirect chain[], int *err) { struct super_block *sb = inode->i_sb; Indirect *p = chain; struct buffer_head *bh; *err = 0; add_chain(chain, NULL, SYSV_I(inode)->i_data + *offsets); if (!p->key) goto no_block; while (--depth) { int block = block_to_cpu(SYSV_SB(sb), p->key); bh = sb_bread(sb, block); if (!bh) goto failure; read_lock(&pointers_lock); if (!verify_chain(chain, p)) goto changed; add_chain(++p, bh, (sysv_zone_t*)bh->b_data + *++offsets); read_unlock(&pointers_lock); if (!p->key) goto no_block; } return NULL; changed: read_unlock(&pointers_lock); brelse(bh); *err = -EAGAIN; goto no_block; failure: *err = -EIO; no_block: return p; } static int alloc_branch(struct inode *inode, int num, int *offsets, Indirect *branch) { int blocksize = inode->i_sb->s_blocksize; int n = 0; int i; branch[0].key = sysv_new_block(inode->i_sb); if (branch[0].key) for (n = 1; n < num; n++) { struct buffer_head *bh; int parent; /* Allocate the next block */ branch[n].key = sysv_new_block(inode->i_sb); if (!branch[n].key) break; /* * Get buffer_head for parent block, zero it out and set * the pointer to new one, then send parent to disk. */ parent = block_to_cpu(SYSV_SB(inode->i_sb), branch[n-1].key); bh = sb_getblk(inode->i_sb, parent); if (!bh) { sysv_free_block(inode->i_sb, branch[n].key); break; } lock_buffer(bh); memset(bh->b_data, 0, blocksize); branch[n].bh = bh; branch[n].p = (sysv_zone_t*) bh->b_data + offsets[n]; *branch[n].p = branch[n].key; set_buffer_uptodate(bh); unlock_buffer(bh); dirty_indirect(bh, inode); } if (n == num) return 0; /* Allocation failed, free what we already allocated */ for (i = 1; i < n; i++) bforget(branch[i].bh); for (i = 0; i < n; i++) sysv_free_block(inode->i_sb, branch[i].key); return -ENOSPC; } static inline int splice_branch(struct inode *inode, Indirect chain[], Indirect *where, int num) { int i; /* Verify that place we are splicing to is still there and vacant */ write_lock(&pointers_lock); if (!verify_chain(chain, where-1) || *where->p) goto changed; *where->p = where->key; write_unlock(&pointers_lock); inode_set_ctime_current(inode); /* had we spliced it onto indirect block? */ if (where->bh) dirty_indirect(where->bh, inode); if (IS_SYNC(inode)) sysv_sync_inode(inode); else mark_inode_dirty(inode); return 0; changed: write_unlock(&pointers_lock); for (i = 1; i < num; i++) bforget(where[i].bh); for (i = 0; i < num; i++) sysv_free_block(inode->i_sb, where[i].key); return -EAGAIN; } static int get_block(struct inode *inode, sector_t iblock, struct buffer_head *bh_result, int create) { int err = -EIO; int offsets[DEPTH]; Indirect chain[DEPTH]; struct super_block *sb = inode->i_sb; Indirect *partial; int left; int depth = block_to_path(inode, iblock, offsets); if (depth == 0) goto out; reread: partial = get_branch(inode, depth, offsets, chain, &err); /* Simplest case - block found, no allocation needed */ if (!partial) { got_it: map_bh(bh_result, sb, block_to_cpu(SYSV_SB(sb), chain[depth-1].key)); /* Clean up and exit */ partial = chain+depth-1; /* the whole chain */ goto cleanup; } /* Next simple case - plain lookup or failed read of indirect block */ if (!create || err == -EIO) { cleanup: while (partial > chain) { brelse(partial->bh); partial--; } out: return err; } /* * Indirect block might be removed by truncate while we were * reading it. Handling of that case (forget what we've got and * reread) is taken out of the main path. */ if (err == -EAGAIN) goto changed; left = (chain + depth) - partial; err = alloc_branch(inode, left, offsets+(partial-chain), partial); if (err) goto cleanup; if (splice_branch(inode, chain, partial, left) < 0) goto changed; set_buffer_new(bh_result); goto got_it; changed: while (partial > chain) { brelse(partial->bh); partial--; } goto reread; } static inline int all_zeroes(sysv_zone_t *p, sysv_zone_t *q) { while (p < q) if (*p++) return 0; return 1; } static Indirect *find_shared(struct inode *inode, int depth, int offsets[], Indirect chain[], sysv_zone_t *top) { Indirect *partial, *p; int k, err; *top = 0; for (k = depth; k > 1 && !offsets[k-1]; k--) ; partial = get_branch(inode, k, offsets, chain, &err); write_lock(&pointers_lock); if (!partial) partial = chain + k-1; /* * If the branch acquired continuation since we've looked at it - * fine, it should all survive and (new) top doesn't belong to us. */ if (!partial->key && *partial->p) { write_unlock(&pointers_lock); goto no_top; } for (p=partial; p>chain && all_zeroes((sysv_zone_t*)p->bh->b_data,p->p); p--) ; /* * OK, we've found the last block that must survive. The rest of our * branch should be detached before unlocking. However, if that rest * of branch is all ours and does not grow immediately from the inode * it's easier to cheat and just decrement partial->p. */ if (p == chain + k - 1 && p > chain) { p->p--; } else { *top = *p->p; *p->p = 0; } write_unlock(&pointers_lock); while (partial > p) { brelse(partial->bh); partial--; } no_top: return partial; } static inline void free_data(struct inode *inode, sysv_zone_t *p, sysv_zone_t *q) { for ( ; p < q ; p++) { sysv_zone_t nr = *p; if (nr) { *p = 0; sysv_free_block(inode->i_sb, nr); mark_inode_dirty(inode); } } } static void free_branches(struct inode *inode, sysv_zone_t *p, sysv_zone_t *q, int depth) { struct buffer_head * bh; struct super_block *sb = inode->i_sb; if (depth--) { for ( ; p < q ; p++) { int block; sysv_zone_t nr = *p; if (!nr) continue; *p = 0; block = block_to_cpu(SYSV_SB(sb), nr); bh = sb_bread(sb, block); if (!bh) continue; free_branches(inode, (sysv_zone_t*)bh->b_data, block_end(bh), depth); bforget(bh); sysv_free_block(sb, nr); mark_inode_dirty(inode); } } else free_data(inode, p, q); } void sysv_truncate (struct inode * inode) { sysv_zone_t *i_data = SYSV_I(inode)->i_data; int offsets[DEPTH]; Indirect chain[DEPTH]; Indirect *partial; sysv_zone_t nr = 0; int n; long iblock; unsigned blocksize; if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))) return; blocksize = inode->i_sb->s_blocksize; iblock = (inode->i_size + blocksize-1) >> inode->i_sb->s_blocksize_bits; block_truncate_page(inode->i_mapping, inode->i_size, get_block); n = block_to_path(inode, iblock, offsets); if (n == 0) return; if (n == 1) { free_data(inode, i_data+offsets[0], i_data + DIRECT); goto do_indirects; } partial = find_shared(inode, n, offsets, chain, &nr); /* Kill the top of shared branch (already detached) */ if (nr) { if (partial == chain) mark_inode_dirty(inode); else dirty_indirect(partial->bh, inode); free_branches(inode, &nr, &nr+1, (chain+n-1) - partial); } /* Clear the ends of indirect blocks on the shared branch */ while (partial > chain) { free_branches(inode, partial->p + 1, block_end(partial->bh), (chain+n-1) - partial); dirty_indirect(partial->bh, inode); brelse (partial->bh); partial--; } do_indirects: /* Kill the remaining (whole) subtrees (== subtrees deeper than...) */ while (n < DEPTH) { nr = i_data[DIRECT + n - 1]; if (nr) { i_data[DIRECT + n - 1] = 0; mark_inode_dirty(inode); free_branches(inode, &nr, &nr+1, n); } n++; } inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode)); if (IS_SYNC(inode)) sysv_sync_inode (inode); else mark_inode_dirty(inode); } static unsigned sysv_nblocks(struct super_block *s, loff_t size) { struct sysv_sb_info *sbi = SYSV_SB(s); int ptrs_bits = sbi->s_ind_per_block_bits; unsigned blocks, res, direct = DIRECT, i = DEPTH; blocks = (size + s->s_blocksize - 1) >> s->s_blocksize_bits; res = blocks; while (--i && blocks > direct) { blocks = ((blocks - direct - 1) >> ptrs_bits) + 1; res += blocks; direct = 1; } return res; } int sysv_getattr(struct mnt_idmap *idmap, const struct path *path, struct kstat *stat, u32 request_mask, unsigned int flags) { struct super_block *s = path->dentry->d_sb; generic_fillattr(&nop_mnt_idmap, request_mask, d_inode(path->dentry), stat); stat->blocks = (s->s_blocksize / 512) * sysv_nblocks(s, stat->size); stat->blksize = s->s_blocksize; return 0; } static int sysv_writepages(struct address_space *mapping, struct writeback_control *wbc) { return mpage_writepages(mapping, wbc, get_block); } static int sysv_read_folio(struct file *file, struct folio *folio) { return block_read_full_folio(folio, get_block); } int sysv_prepare_chunk(struct folio *folio, loff_t pos, unsigned len) { return __block_write_begin(folio, pos, len, get_block); } static void sysv_write_failed(struct address_space *mapping, loff_t to) { struct inode *inode = mapping->host; if (to > inode->i_size) { truncate_pagecache(inode, inode->i_size); sysv_truncate(inode); } } static int sysv_write_begin(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, struct folio **foliop, void **fsdata) { int ret; ret = block_write_begin(mapping, pos, len, foliop, get_block); if (unlikely(ret)) sysv_write_failed(mapping, pos + len); return ret; } static sector_t sysv_bmap(struct address_space *mapping, sector_t block) { return generic_block_bmap(mapping,block,get_block); } const struct address_space_operations sysv_aops = { .dirty_folio = block_dirty_folio, .invalidate_folio = block_invalidate_folio, .read_folio = sysv_read_folio, .writepages = sysv_writepages, .write_begin = sysv_write_begin, .write_end = generic_write_end, .migrate_folio = buffer_migrate_folio, .bmap = sysv_bmap };
8 54 49 9 9 53 55 55 12 2 8 8 8 7 8 8 8 11 16 8 2 2 2 2 6 6 6 5 6 6 6 6 6 6 6 6 6 6 4 4 5 5 5 1 59 16 1 5 27 6 11 5 4 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 // SPDX-License-Identifier: GPL-2.0 /* * file.c - part of debugfs, a tiny little debug file system * * Copyright (C) 2004 Greg Kroah-Hartman <greg@kroah.com> * Copyright (C) 2004 IBM Inc. * * debugfs is for people to use instead of /proc or /sys. * See Documentation/filesystems/ for more details. */ #include <linux/module.h> #include <linux/fs.h> #include <linux/seq_file.h> #include <linux/pagemap.h> #include <linux/debugfs.h> #include <linux/io.h> #include <linux/slab.h> #include <linux/atomic.h> #include <linux/device.h> #include <linux/pm_runtime.h> #include <linux/poll.h> #include <linux/security.h> #include "internal.h" struct poll_table_struct; static ssize_t default_read_file(struct file *file, char __user *buf, size_t count, loff_t *ppos) { return 0; } static ssize_t default_write_file(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { return count; } const struct file_operations debugfs_noop_file_operations = { .read = default_read_file, .write = default_write_file, .open = simple_open, .llseek = noop_llseek, }; #define F_DENTRY(filp) ((filp)->f_path.dentry) const struct file_operations *debugfs_real_fops(const struct file *filp) { struct debugfs_fsdata *fsd = F_DENTRY(filp)->d_fsdata; if ((unsigned long)fsd & DEBUGFS_FSDATA_IS_REAL_FOPS_BIT) { /* * Urgh, we've been called w/o a protecting * debugfs_file_get(). */ WARN_ON(1); return NULL; } return fsd->real_fops; } EXPORT_SYMBOL_GPL(debugfs_real_fops); /** * debugfs_file_get - mark the beginning of file data access * @dentry: the dentry object whose data is being accessed. * * Up to a matching call to debugfs_file_put(), any successive call * into the file removing functions debugfs_remove() and * debugfs_remove_recursive() will block. Since associated private * file data may only get freed after a successful return of any of * the removal functions, you may safely access it after a successful * call to debugfs_file_get() without worrying about lifetime issues. * * If -%EIO is returned, the file has already been removed and thus, * it is not safe to access any of its data. If, on the other hand, * it is allowed to access the file data, zero is returned. */ int debugfs_file_get(struct dentry *dentry) { struct debugfs_fsdata *fsd; void *d_fsd; /* * This could only happen if some debugfs user erroneously calls * debugfs_file_get() on a dentry that isn't even a file, let * them know about it. */ if (WARN_ON(!d_is_reg(dentry))) return -EINVAL; d_fsd = READ_ONCE(dentry->d_fsdata); if (!((unsigned long)d_fsd & DEBUGFS_FSDATA_IS_REAL_FOPS_BIT)) { fsd = d_fsd; } else { fsd = kmalloc(sizeof(*fsd), GFP_KERNEL); if (!fsd) return -ENOMEM; if ((unsigned long)d_fsd & DEBUGFS_FSDATA_IS_SHORT_FOPS_BIT) { fsd->real_fops = NULL; fsd->short_fops = (void *)((unsigned long)d_fsd & ~(DEBUGFS_FSDATA_IS_REAL_FOPS_BIT | DEBUGFS_FSDATA_IS_SHORT_FOPS_BIT)); } else { fsd->real_fops = (void *)((unsigned long)d_fsd & ~DEBUGFS_FSDATA_IS_REAL_FOPS_BIT); fsd->short_fops = NULL; } refcount_set(&fsd->active_users, 1); init_completion(&fsd->active_users_drained); INIT_LIST_HEAD(&fsd->cancellations); mutex_init(&fsd->cancellations_mtx); if (cmpxchg(&dentry->d_fsdata, d_fsd, fsd) != d_fsd) { mutex_destroy(&fsd->cancellations_mtx); kfree(fsd); fsd = READ_ONCE(dentry->d_fsdata); } } /* * In case of a successful cmpxchg() above, this check is * strictly necessary and must follow it, see the comment in * __debugfs_remove_file(). * OTOH, if the cmpxchg() hasn't been executed or wasn't * successful, this serves the purpose of not starving * removers. */ if (d_unlinked(dentry)) return -EIO; if (!refcount_inc_not_zero(&fsd->active_users)) return -EIO; return 0; } EXPORT_SYMBOL_GPL(debugfs_file_get); /** * debugfs_file_put - mark the end of file data access * @dentry: the dentry object formerly passed to * debugfs_file_get(). * * Allow any ongoing concurrent call into debugfs_remove() or * debugfs_remove_recursive() blocked by a former call to * debugfs_file_get() to proceed and return to its caller. */ void debugfs_file_put(struct dentry *dentry) { struct debugfs_fsdata *fsd = READ_ONCE(dentry->d_fsdata); if (refcount_dec_and_test(&fsd->active_users)) complete(&fsd->active_users_drained); } EXPORT_SYMBOL_GPL(debugfs_file_put); /** * debugfs_enter_cancellation - enter a debugfs cancellation * @file: the file being accessed * @cancellation: the cancellation object, the cancel callback * inside of it must be initialized * * When a debugfs file is removed it needs to wait for all active * operations to complete. However, the operation itself may need * to wait for hardware or completion of some asynchronous process * or similar. As such, it may need to be cancelled to avoid long * waits or even deadlocks. * * This function can be used inside a debugfs handler that may * need to be cancelled. As soon as this function is called, the * cancellation's 'cancel' callback may be called, at which point * the caller should proceed to call debugfs_leave_cancellation() * and leave the debugfs handler function as soon as possible. * Note that the 'cancel' callback is only ever called in the * context of some kind of debugfs_remove(). * * This function must be paired with debugfs_leave_cancellation(). */ void debugfs_enter_cancellation(struct file *file, struct debugfs_cancellation *cancellation) { struct debugfs_fsdata *fsd; struct dentry *dentry = F_DENTRY(file); INIT_LIST_HEAD(&cancellation->list); if (WARN_ON(!d_is_reg(dentry))) return; if (WARN_ON(!cancellation->cancel)) return; fsd = READ_ONCE(dentry->d_fsdata); if (WARN_ON(!fsd || ((unsigned long)fsd & DEBUGFS_FSDATA_IS_REAL_FOPS_BIT))) return; mutex_lock(&fsd->cancellations_mtx); list_add(&cancellation->list, &fsd->cancellations); mutex_unlock(&fsd->cancellations_mtx); /* if we're already removing wake it up to cancel */ if (d_unlinked(dentry)) complete(&fsd->active_users_drained); } EXPORT_SYMBOL_GPL(debugfs_enter_cancellation); /** * debugfs_leave_cancellation - leave cancellation section * @file: the file being accessed * @cancellation: the cancellation previously registered with * debugfs_enter_cancellation() * * See the documentation of debugfs_enter_cancellation(). */ void debugfs_leave_cancellation(struct file *file, struct debugfs_cancellation *cancellation) { struct debugfs_fsdata *fsd; struct dentry *dentry = F_DENTRY(file); if (WARN_ON(!d_is_reg(dentry))) return; fsd = READ_ONCE(dentry->d_fsdata); if (WARN_ON(!fsd || ((unsigned long)fsd & DEBUGFS_FSDATA_IS_REAL_FOPS_BIT))) return; mutex_lock(&fsd->cancellations_mtx); if (!list_empty(&cancellation->list)) list_del(&cancellation->list); mutex_unlock(&fsd->cancellations_mtx); } EXPORT_SYMBOL_GPL(debugfs_leave_cancellation); /* * Only permit access to world-readable files when the kernel is locked down. * We also need to exclude any file that has ways to write or alter it as root * can bypass the permissions check. */ static int debugfs_locked_down(struct inode *inode, struct file *filp, const struct file_operations *real_fops) { if ((inode->i_mode & 07777 & ~0444) == 0 && !(filp->f_mode & FMODE_WRITE) && (!real_fops || (!real_fops->unlocked_ioctl && !real_fops->compat_ioctl && !real_fops->mmap))) return 0; if (security_locked_down(LOCKDOWN_DEBUGFS)) return -EPERM; return 0; } static int open_proxy_open(struct inode *inode, struct file *filp) { struct dentry *dentry = F_DENTRY(filp); const struct file_operations *real_fops = NULL; int r; r = debugfs_file_get(dentry); if (r) return r == -EIO ? -ENOENT : r; real_fops = debugfs_real_fops(filp); r = debugfs_locked_down(inode, filp, real_fops); if (r) goto out; if (!fops_get(real_fops)) { #ifdef CONFIG_MODULES if (real_fops->owner && real_fops->owner->state == MODULE_STATE_GOING) { r = -ENXIO; goto out; } #endif /* Huh? Module did not clean up after itself at exit? */ WARN(1, "debugfs file owner did not clean up at exit: %pd", dentry); r = -ENXIO; goto out; } replace_fops(filp, real_fops); if (real_fops->open) r = real_fops->open(inode, filp); out: debugfs_file_put(dentry); return r; } const struct file_operations debugfs_open_proxy_file_operations = { .open = open_proxy_open, }; #define PROTO(args...) args #define ARGS(args...) args #define FULL_PROXY_FUNC(name, ret_type, filp, proto, args) \ static ret_type full_proxy_ ## name(proto) \ { \ struct dentry *dentry = F_DENTRY(filp); \ const struct file_operations *real_fops; \ ret_type r; \ \ r = debugfs_file_get(dentry); \ if (unlikely(r)) \ return r; \ real_fops = debugfs_real_fops(filp); \ r = real_fops->name(args); \ debugfs_file_put(dentry); \ return r; \ } #define FULL_PROXY_FUNC_BOTH(name, ret_type, filp, proto, args) \ static ret_type full_proxy_ ## name(proto) \ { \ struct dentry *dentry = F_DENTRY(filp); \ struct debugfs_fsdata *fsd; \ ret_type r; \ \ r = debugfs_file_get(dentry); \ if (unlikely(r)) \ return r; \ fsd = dentry->d_fsdata; \ if (fsd->real_fops) \ r = fsd->real_fops->name(args); \ else \ r = fsd->short_fops->name(args); \ debugfs_file_put(dentry); \ return r; \ } FULL_PROXY_FUNC_BOTH(llseek, loff_t, filp, PROTO(struct file *filp, loff_t offset, int whence), ARGS(filp, offset, whence)); FULL_PROXY_FUNC_BOTH(read, ssize_t, filp, PROTO(struct file *filp, char __user *buf, size_t size, loff_t *ppos), ARGS(filp, buf, size, ppos)); FULL_PROXY_FUNC_BOTH(write, ssize_t, filp, PROTO(struct file *filp, const char __user *buf, size_t size, loff_t *ppos), ARGS(filp, buf, size, ppos)); FULL_PROXY_FUNC(unlocked_ioctl, long, filp, PROTO(struct file *filp, unsigned int cmd, unsigned long arg), ARGS(filp, cmd, arg)); static __poll_t full_proxy_poll(struct file *filp, struct poll_table_struct *wait) { struct dentry *dentry = F_DENTRY(filp); __poll_t r = 0; const struct file_operations *real_fops; if (debugfs_file_get(dentry)) return EPOLLHUP; real_fops = debugfs_real_fops(filp); r = real_fops->poll(filp, wait); debugfs_file_put(dentry); return r; } static int full_proxy_release(struct inode *inode, struct file *filp) { const struct dentry *dentry = F_DENTRY(filp); const struct file_operations *real_fops = debugfs_real_fops(filp); const struct file_operations *proxy_fops = filp->f_op; int r = 0; /* * We must not protect this against removal races here: the * original releaser should be called unconditionally in order * not to leak any resources. Releasers must not assume that * ->i_private is still being meaningful here. */ if (real_fops && real_fops->release) r = real_fops->release(inode, filp); replace_fops(filp, d_inode(dentry)->i_fop); kfree(proxy_fops); fops_put(real_fops); return r; } static void __full_proxy_fops_init(struct file_operations *proxy_fops, struct debugfs_fsdata *fsd) { proxy_fops->release = full_proxy_release; if ((fsd->real_fops && fsd->real_fops->llseek) || (fsd->short_fops && fsd->short_fops->llseek)) proxy_fops->llseek = full_proxy_llseek; if ((fsd->real_fops && fsd->real_fops->read) || (fsd->short_fops && fsd->short_fops->read)) proxy_fops->read = full_proxy_read; if ((fsd->real_fops && fsd->real_fops->write) || (fsd->short_fops && fsd->short_fops->write)) proxy_fops->write = full_proxy_write; if (fsd->real_fops && fsd->real_fops->poll) proxy_fops->poll = full_proxy_poll; if (fsd->real_fops && fsd->real_fops->unlocked_ioctl) proxy_fops->unlocked_ioctl = full_proxy_unlocked_ioctl; } static int full_proxy_open(struct inode *inode, struct file *filp) { struct dentry *dentry = F_DENTRY(filp); const struct file_operations *real_fops; struct file_operations *proxy_fops = NULL; struct debugfs_fsdata *fsd; int r; r = debugfs_file_get(dentry); if (r) return r == -EIO ? -ENOENT : r; fsd = dentry->d_fsdata; real_fops = fsd->real_fops; r = debugfs_locked_down(inode, filp, real_fops); if (r) goto out; if (real_fops && !fops_get(real_fops)) { #ifdef CONFIG_MODULES if (real_fops->owner && real_fops->owner->state == MODULE_STATE_GOING) { r = -ENXIO; goto out; } #endif /* Huh? Module did not cleanup after itself at exit? */ WARN(1, "debugfs file owner did not clean up at exit: %pd", dentry); r = -ENXIO; goto out; } proxy_fops = kzalloc(sizeof(*proxy_fops), GFP_KERNEL); if (!proxy_fops) { r = -ENOMEM; goto free_proxy; } __full_proxy_fops_init(proxy_fops, fsd); replace_fops(filp, proxy_fops); if (!real_fops || real_fops->open) { if (real_fops) r = real_fops->open(inode, filp); else r = simple_open(inode, filp); if (r) { replace_fops(filp, d_inode(dentry)->i_fop); goto free_proxy; } else if (filp->f_op != proxy_fops) { /* No protection against file removal anymore. */ WARN(1, "debugfs file owner replaced proxy fops: %pd", dentry); goto free_proxy; } } goto out; free_proxy: kfree(proxy_fops); fops_put(real_fops); out: debugfs_file_put(dentry); return r; } const struct file_operations debugfs_full_proxy_file_operations = { .open = full_proxy_open, }; ssize_t debugfs_attr_read(struct file *file, char __user *buf, size_t len, loff_t *ppos) { struct dentry *dentry = F_DENTRY(file); ssize_t ret; ret = debugfs_file_get(dentry); if (unlikely(ret)) return ret; ret = simple_attr_read(file, buf, len, ppos); debugfs_file_put(dentry); return ret; } EXPORT_SYMBOL_GPL(debugfs_attr_read); static ssize_t debugfs_attr_write_xsigned(struct file *file, const char __user *buf, size_t len, loff_t *ppos, bool is_signed) { struct dentry *dentry = F_DENTRY(file); ssize_t ret; ret = debugfs_file_get(dentry); if (unlikely(ret)) return ret; if (is_signed) ret = simple_attr_write_signed(file, buf, len, ppos); else ret = simple_attr_write(file, buf, len, ppos); debugfs_file_put(dentry); return ret; } ssize_t debugfs_attr_write(struct file *file, const char __user *buf, size_t len, loff_t *ppos) { return debugfs_attr_write_xsigned(file, buf, len, ppos, false); } EXPORT_SYMBOL_GPL(debugfs_attr_write); ssize_t debugfs_attr_write_signed(struct file *file, const char __user *buf, size_t len, loff_t *ppos) { return debugfs_attr_write_xsigned(file, buf, len, ppos, true); } EXPORT_SYMBOL_GPL(debugfs_attr_write_signed); static struct dentry *debugfs_create_mode_unsafe(const char *name, umode_t mode, struct dentry *parent, void *value, const struct file_operations *fops, const struct file_operations *fops_ro, const struct file_operations *fops_wo) { /* if there are no write bits set, make read only */ if (!(mode & S_IWUGO)) return debugfs_create_file_unsafe(name, mode, parent, value, fops_ro); /* if there are no read bits set, make write only */ if (!(mode & S_IRUGO)) return debugfs_create_file_unsafe(name, mode, parent, value, fops_wo); return debugfs_create_file_unsafe(name, mode, parent, value, fops); } static int debugfs_u8_set(void *data, u64 val) { *(u8 *)data = val; return 0; } static int debugfs_u8_get(void *data, u64 *val) { *val = *(u8 *)data; return 0; } DEFINE_DEBUGFS_ATTRIBUTE(fops_u8, debugfs_u8_get, debugfs_u8_set, "%llu\n"); DEFINE_DEBUGFS_ATTRIBUTE(fops_u8_ro, debugfs_u8_get, NULL, "%llu\n"); DEFINE_DEBUGFS_ATTRIBUTE(fops_u8_wo, NULL, debugfs_u8_set, "%llu\n"); /** * debugfs_create_u8 - create a debugfs file that is used to read and write an unsigned 8-bit value * @name: a pointer to a string containing the name of the file to create. * @mode: the permission that the file should have * @parent: a pointer to the parent dentry for this file. This should be a * directory dentry if set. If this parameter is %NULL, then the * file will be created in the root of the debugfs filesystem. * @value: a pointer to the variable that the file should read to and write * from. * * This function creates a file in debugfs with the given name that * contains the value of the variable @value. If the @mode variable is so * set, it can be read from, and written to. */ void debugfs_create_u8(const char *name, umode_t mode, struct dentry *parent, u8 *value) { debugfs_create_mode_unsafe(name, mode, parent, value, &fops_u8, &fops_u8_ro, &fops_u8_wo); } EXPORT_SYMBOL_GPL(debugfs_create_u8); static int debugfs_u16_set(void *data, u64 val) { *(u16 *)data = val; return 0; } static int debugfs_u16_get(void *data, u64 *val) { *val = *(u16 *)data; return 0; } DEFINE_DEBUGFS_ATTRIBUTE(fops_u16, debugfs_u16_get, debugfs_u16_set, "%llu\n"); DEFINE_DEBUGFS_ATTRIBUTE(fops_u16_ro, debugfs_u16_get, NULL, "%llu\n"); DEFINE_DEBUGFS_ATTRIBUTE(fops_u16_wo, NULL, debugfs_u16_set, "%llu\n"); /** * debugfs_create_u16 - create a debugfs file that is used to read and write an unsigned 16-bit value * @name: a pointer to a string containing the name of the file to create. * @mode: the permission that the file should have * @parent: a pointer to the parent dentry for this file. This should be a * directory dentry if set. If this parameter is %NULL, then the * file will be created in the root of the debugfs filesystem. * @value: a pointer to the variable that the file should read to and write * from. * * This function creates a file in debugfs with the given name that * contains the value of the variable @value. If the @mode variable is so * set, it can be read from, and written to. */ void debugfs_create_u16(const char *name, umode_t mode, struct dentry *parent, u16 *value) { debugfs_create_mode_unsafe(name, mode, parent, value, &fops_u16, &fops_u16_ro, &fops_u16_wo); } EXPORT_SYMBOL_GPL(debugfs_create_u16); static int debugfs_u32_set(void *data, u64 val) { *(u32 *)data = val; return 0; } static int debugfs_u32_get(void *data, u64 *val) { *val = *(u32 *)data; return 0; } DEFINE_DEBUGFS_ATTRIBUTE(fops_u32, debugfs_u32_get, debugfs_u32_set, "%llu\n"); DEFINE_DEBUGFS_ATTRIBUTE(fops_u32_ro, debugfs_u32_get, NULL, "%llu\n"); DEFINE_DEBUGFS_ATTRIBUTE(fops_u32_wo, NULL, debugfs_u32_set, "%llu\n"); /** * debugfs_create_u32 - create a debugfs file that is used to read and write an unsigned 32-bit value * @name: a pointer to a string containing the name of the file to create. * @mode: the permission that the file should have * @parent: a pointer to the parent dentry for this file. This should be a * directory dentry if set. If this parameter is %NULL, then the * file will be created in the root of the debugfs filesystem. * @value: a pointer to the variable that the file should read to and write * from. * * This function creates a file in debugfs with the given name that * contains the value of the variable @value. If the @mode variable is so * set, it can be read from, and written to. */ void debugfs_create_u32(const char *name, umode_t mode, struct dentry *parent, u32 *value) { debugfs_create_mode_unsafe(name, mode, parent, value, &fops_u32, &fops_u32_ro, &fops_u32_wo); } EXPORT_SYMBOL_GPL(debugfs_create_u32); static int debugfs_u64_set(void *data, u64 val) { *(u64 *)data = val; return 0; } static int debugfs_u64_get(void *data, u64 *val) { *val = *(u64 *)data; return 0; } DEFINE_DEBUGFS_ATTRIBUTE(fops_u64, debugfs_u64_get, debugfs_u64_set, "%llu\n"); DEFINE_DEBUGFS_ATTRIBUTE(fops_u64_ro, debugfs_u64_get, NULL, "%llu\n"); DEFINE_DEBUGFS_ATTRIBUTE(fops_u64_wo, NULL, debugfs_u64_set, "%llu\n"); /** * debugfs_create_u64 - create a debugfs file that is used to read and write an unsigned 64-bit value * @name: a pointer to a string containing the name of the file to create. * @mode: the permission that the file should have * @parent: a pointer to the parent dentry for this file. This should be a * directory dentry if set. If this parameter is %NULL, then the * file will be created in the root of the debugfs filesystem. * @value: a pointer to the variable that the file should read to and write * from. * * This function creates a file in debugfs with the given name that * contains the value of the variable @value. If the @mode variable is so * set, it can be read from, and written to. */ void debugfs_create_u64(const char *name, umode_t mode, struct dentry *parent, u64 *value) { debugfs_create_mode_unsafe(name, mode, parent, value, &fops_u64, &fops_u64_ro, &fops_u64_wo); } EXPORT_SYMBOL_GPL(debugfs_create_u64); static int debugfs_ulong_set(void *data, u64 val) { *(unsigned long *)data = val; return 0; } static int debugfs_ulong_get(void *data, u64 *val) { *val = *(unsigned long *)data; return 0; } DEFINE_DEBUGFS_ATTRIBUTE(fops_ulong, debugfs_ulong_get, debugfs_ulong_set, "%llu\n"); DEFINE_DEBUGFS_ATTRIBUTE(fops_ulong_ro, debugfs_ulong_get, NULL, "%llu\n"); DEFINE_DEBUGFS_ATTRIBUTE(fops_ulong_wo, NULL, debugfs_ulong_set, "%llu\n"); /** * debugfs_create_ulong - create a debugfs file that is used to read and write * an unsigned long value. * @name: a pointer to a string containing the name of the file to create. * @mode: the permission that the file should have * @parent: a pointer to the parent dentry for this file. This should be a * directory dentry if set. If this parameter is %NULL, then the * file will be created in the root of the debugfs filesystem. * @value: a pointer to the variable that the file should read to and write * from. * * This function creates a file in debugfs with the given name that * contains the value of the variable @value. If the @mode variable is so * set, it can be read from, and written to. */ void debugfs_create_ulong(const char *name, umode_t mode, struct dentry *parent, unsigned long *value) { debugfs_create_mode_unsafe(name, mode, parent, value, &fops_ulong, &fops_ulong_ro, &fops_ulong_wo); } EXPORT_SYMBOL_GPL(debugfs_create_ulong); DEFINE_DEBUGFS_ATTRIBUTE(fops_x8, debugfs_u8_get, debugfs_u8_set, "0x%02llx\n"); DEFINE_DEBUGFS_ATTRIBUTE(fops_x8_ro, debugfs_u8_get, NULL, "0x%02llx\n"); DEFINE_DEBUGFS_ATTRIBUTE(fops_x8_wo, NULL, debugfs_u8_set, "0x%02llx\n"); DEFINE_DEBUGFS_ATTRIBUTE(fops_x16, debugfs_u16_get, debugfs_u16_set, "0x%04llx\n"); DEFINE_DEBUGFS_ATTRIBUTE(fops_x16_ro, debugfs_u16_get, NULL, "0x%04llx\n"); DEFINE_DEBUGFS_ATTRIBUTE(fops_x16_wo, NULL, debugfs_u16_set, "0x%04llx\n"); DEFINE_DEBUGFS_ATTRIBUTE(fops_x32, debugfs_u32_get, debugfs_u32_set, "0x%08llx\n"); DEFINE_DEBUGFS_ATTRIBUTE(fops_x32_ro, debugfs_u32_get, NULL, "0x%08llx\n"); DEFINE_DEBUGFS_ATTRIBUTE(fops_x32_wo, NULL, debugfs_u32_set, "0x%08llx\n"); DEFINE_DEBUGFS_ATTRIBUTE(fops_x64, debugfs_u64_get, debugfs_u64_set, "0x%016llx\n"); DEFINE_DEBUGFS_ATTRIBUTE(fops_x64_ro, debugfs_u64_get, NULL, "0x%016llx\n"); DEFINE_DEBUGFS_ATTRIBUTE(fops_x64_wo, NULL, debugfs_u64_set, "0x%016llx\n"); /* * debugfs_create_x{8,16,32,64} - create a debugfs file that is used to read and write an unsigned {8,16,32,64}-bit value * * These functions are exactly the same as the above functions (but use a hex * output for the decimal challenged). For details look at the above unsigned * decimal functions. */ /** * debugfs_create_x8 - create a debugfs file that is used to read and write an unsigned 8-bit value * @name: a pointer to a string containing the name of the file to create. * @mode: the permission that the file should have * @parent: a pointer to the parent dentry for this file. This should be a * directory dentry if set. If this parameter is %NULL, then the * file will be created in the root of the debugfs filesystem. * @value: a pointer to the variable that the file should read to and write * from. */ void debugfs_create_x8(const char *name, umode_t mode, struct dentry *parent, u8 *value) { debugfs_create_mode_unsafe(name, mode, parent, value, &fops_x8, &fops_x8_ro, &fops_x8_wo); } EXPORT_SYMBOL_GPL(debugfs_create_x8); /** * debugfs_create_x16 - create a debugfs file that is used to read and write an unsigned 16-bit value * @name: a pointer to a string containing the name of the file to create. * @mode: the permission that the file should have * @parent: a pointer to the parent dentry for this file. This should be a * directory dentry if set. If this parameter is %NULL, then the * file will be created in the root of the debugfs filesystem. * @value: a pointer to the variable that the file should read to and write * from. */ void debugfs_create_x16(const char *name, umode_t mode, struct dentry *parent, u16 *value) { debugfs_create_mode_unsafe(name, mode, parent, value, &fops_x16, &fops_x16_ro, &fops_x16_wo); } EXPORT_SYMBOL_GPL(debugfs_create_x16); /** * debugfs_create_x32 - create a debugfs file that is used to read and write an unsigned 32-bit value * @name: a pointer to a string containing the name of the file to create. * @mode: the permission that the file should have * @parent: a pointer to the parent dentry for this file. This should be a * directory dentry if set. If this parameter is %NULL, then the * file will be created in the root of the debugfs filesystem. * @value: a pointer to the variable that the file should read to and write * from. */ void debugfs_create_x32(const char *name, umode_t mode, struct dentry *parent, u32 *value) { debugfs_create_mode_unsafe(name, mode, parent, value, &fops_x32, &fops_x32_ro, &fops_x32_wo); } EXPORT_SYMBOL_GPL(debugfs_create_x32); /** * debugfs_create_x64 - create a debugfs file that is used to read and write an unsigned 64-bit value * @name: a pointer to a string containing the name of the file to create. * @mode: the permission that the file should have * @parent: a pointer to the parent dentry for this file. This should be a * directory dentry if set. If this parameter is %NULL, then the * file will be created in the root of the debugfs filesystem. * @value: a pointer to the variable that the file should read to and write * from. */ void debugfs_create_x64(const char *name, umode_t mode, struct dentry *parent, u64 *value) { debugfs_create_mode_unsafe(name, mode, parent, value, &fops_x64, &fops_x64_ro, &fops_x64_wo); } EXPORT_SYMBOL_GPL(debugfs_create_x64); static int debugfs_size_t_set(void *data, u64 val) { *(size_t *)data = val; return 0; } static int debugfs_size_t_get(void *data, u64 *val) { *val = *(size_t *)data; return 0; } DEFINE_DEBUGFS_ATTRIBUTE(fops_size_t, debugfs_size_t_get, debugfs_size_t_set, "%llu\n"); /* %llu and %zu are more or less the same */ DEFINE_DEBUGFS_ATTRIBUTE(fops_size_t_ro, debugfs_size_t_get, NULL, "%llu\n"); DEFINE_DEBUGFS_ATTRIBUTE(fops_size_t_wo, NULL, debugfs_size_t_set, "%llu\n"); /** * debugfs_create_size_t - create a debugfs file that is used to read and write an size_t value * @name: a pointer to a string containing the name of the file to create. * @mode: the permission that the file should have * @parent: a pointer to the parent dentry for this file. This should be a * directory dentry if set. If this parameter is %NULL, then the * file will be created in the root of the debugfs filesystem. * @value: a pointer to the variable that the file should read to and write * from. */ void debugfs_create_size_t(const char *name, umode_t mode, struct dentry *parent, size_t *value) { debugfs_create_mode_unsafe(name, mode, parent, value, &fops_size_t, &fops_size_t_ro, &fops_size_t_wo); } EXPORT_SYMBOL_GPL(debugfs_create_size_t); static int debugfs_atomic_t_set(void *data, u64 val) { atomic_set((atomic_t *)data, val); return 0; } static int debugfs_atomic_t_get(void *data, u64 *val) { *val = atomic_read((atomic_t *)data); return 0; } DEFINE_DEBUGFS_ATTRIBUTE_SIGNED(fops_atomic_t, debugfs_atomic_t_get, debugfs_atomic_t_set, "%lld\n"); DEFINE_DEBUGFS_ATTRIBUTE_SIGNED(fops_atomic_t_ro, debugfs_atomic_t_get, NULL, "%lld\n"); DEFINE_DEBUGFS_ATTRIBUTE_SIGNED(fops_atomic_t_wo, NULL, debugfs_atomic_t_set, "%lld\n"); /** * debugfs_create_atomic_t - create a debugfs file that is used to read and * write an atomic_t value * @name: a pointer to a string containing the name of the file to create. * @mode: the permission that the file should have * @parent: a pointer to the parent dentry for this file. This should be a * directory dentry if set. If this parameter is %NULL, then the * file will be created in the root of the debugfs filesystem. * @value: a pointer to the variable that the file should read to and write * from. */ void debugfs_create_atomic_t(const char *name, umode_t mode, struct dentry *parent, atomic_t *value) { debugfs_create_mode_unsafe(name, mode, parent, value, &fops_atomic_t, &fops_atomic_t_ro, &fops_atomic_t_wo); } EXPORT_SYMBOL_GPL(debugfs_create_atomic_t); ssize_t debugfs_read_file_bool(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { char buf[2]; bool val; int r; struct dentry *dentry = F_DENTRY(file); r = debugfs_file_get(dentry); if (unlikely(r)) return r; val = *(bool *)file->private_data; debugfs_file_put(dentry); if (val) buf[0] = 'Y'; else buf[0] = 'N'; buf[1] = '\n'; return simple_read_from_buffer(user_buf, count, ppos, buf, 2); } EXPORT_SYMBOL_GPL(debugfs_read_file_bool); ssize_t debugfs_write_file_bool(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos) { bool bv; int r; bool *val = file->private_data; struct dentry *dentry = F_DENTRY(file); r = kstrtobool_from_user(user_buf, count, &bv); if (!r) { r = debugfs_file_get(dentry); if (unlikely(r)) return r; *val = bv; debugfs_file_put(dentry); } return count; } EXPORT_SYMBOL_GPL(debugfs_write_file_bool); static const struct file_operations fops_bool = { .read = debugfs_read_file_bool, .write = debugfs_write_file_bool, .open = simple_open, .llseek = default_llseek, }; static const struct file_operations fops_bool_ro = { .read = debugfs_read_file_bool, .open = simple_open, .llseek = default_llseek, }; static const struct file_operations fops_bool_wo = { .write = debugfs_write_file_bool, .open = simple_open, .llseek = default_llseek, }; /** * debugfs_create_bool - create a debugfs file that is used to read and write a boolean value * @name: a pointer to a string containing the name of the file to create. * @mode: the permission that the file should have * @parent: a pointer to the parent dentry for this file. This should be a * directory dentry if set. If this parameter is %NULL, then the * file will be created in the root of the debugfs filesystem. * @value: a pointer to the variable that the file should read to and write * from. * * This function creates a file in debugfs with the given name that * contains the value of the variable @value. If the @mode variable is so * set, it can be read from, and written to. */ void debugfs_create_bool(const char *name, umode_t mode, struct dentry *parent, bool *value) { debugfs_create_mode_unsafe(name, mode, parent, value, &fops_bool, &fops_bool_ro, &fops_bool_wo); } EXPORT_SYMBOL_GPL(debugfs_create_bool); ssize_t debugfs_read_file_str(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct dentry *dentry = F_DENTRY(file); char *str, *copy = NULL; int copy_len, len; ssize_t ret; ret = debugfs_file_get(dentry); if (unlikely(ret)) return ret; str = *(char **)file->private_data; len = strlen(str) + 1; copy = kmalloc(len, GFP_KERNEL); if (!copy) { debugfs_file_put(dentry); return -ENOMEM; } copy_len = strscpy(copy, str, len); debugfs_file_put(dentry); if (copy_len < 0) { kfree(copy); return copy_len; } copy[copy_len] = '\n'; ret = simple_read_from_buffer(user_buf, count, ppos, copy, len); kfree(copy); return ret; } EXPORT_SYMBOL_GPL(debugfs_create_str); static ssize_t debugfs_write_file_str(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos) { struct dentry *dentry = F_DENTRY(file); char *old, *new = NULL; int pos = *ppos; int r; r = debugfs_file_get(dentry); if (unlikely(r)) return r; old = *(char **)file->private_data; /* only allow strict concatenation */ r = -EINVAL; if (pos && pos != strlen(old)) goto error; r = -E2BIG; if (pos + count + 1 > PAGE_SIZE) goto error; r = -ENOMEM; new = kmalloc(pos + count + 1, GFP_KERNEL); if (!new) goto error; if (pos) memcpy(new, old, pos); r = -EFAULT; if (copy_from_user(new + pos, user_buf, count)) goto error; new[pos + count] = '\0'; strim(new); rcu_assign_pointer(*(char __rcu **)file->private_data, new); synchronize_rcu(); kfree(old); debugfs_file_put(dentry); return count; error: kfree(new); debugfs_file_put(dentry); return r; } static const struct file_operations fops_str = { .read = debugfs_read_file_str, .write = debugfs_write_file_str, .open = simple_open, .llseek = default_llseek, }; static const struct file_operations fops_str_ro = { .read = debugfs_read_file_str, .open = simple_open, .llseek = default_llseek, }; static const struct file_operations fops_str_wo = { .write = debugfs_write_file_str, .open = simple_open, .llseek = default_llseek, }; /** * debugfs_create_str - create a debugfs file that is used to read and write a string value * @name: a pointer to a string containing the name of the file to create. * @mode: the permission that the file should have * @parent: a pointer to the parent dentry for this file. This should be a * directory dentry if set. If this parameter is %NULL, then the * file will be created in the root of the debugfs filesystem. * @value: a pointer to the variable that the file should read to and write * from. * * This function creates a file in debugfs with the given name that * contains the value of the variable @value. If the @mode variable is so * set, it can be read from, and written to. */ void debugfs_create_str(const char *name, umode_t mode, struct dentry *parent, char **value) { debugfs_create_mode_unsafe(name, mode, parent, value, &fops_str, &fops_str_ro, &fops_str_wo); } static ssize_t read_file_blob(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct debugfs_blob_wrapper *blob = file->private_data; struct dentry *dentry = F_DENTRY(file); ssize_t r; r = debugfs_file_get(dentry); if (unlikely(r)) return r; r = simple_read_from_buffer(user_buf, count, ppos, blob->data, blob->size); debugfs_file_put(dentry); return r; } static ssize_t write_file_blob(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos) { struct debugfs_blob_wrapper *blob = file->private_data; struct dentry *dentry = F_DENTRY(file); ssize_t r; r = debugfs_file_get(dentry); if (unlikely(r)) return r; r = simple_write_to_buffer(blob->data, blob->size, ppos, user_buf, count); debugfs_file_put(dentry); return r; } static const struct file_operations fops_blob = { .read = read_file_blob, .write = write_file_blob, .open = simple_open, .llseek = default_llseek, }; /** * debugfs_create_blob - create a debugfs file that is used to read and write * a binary blob * @name: a pointer to a string containing the name of the file to create. * @mode: the permission that the file should have * @parent: a pointer to the parent dentry for this file. This should be a * directory dentry if set. If this parameter is %NULL, then the * file will be created in the root of the debugfs filesystem. * @blob: a pointer to a struct debugfs_blob_wrapper which contains a pointer * to the blob data and the size of the data. * * This function creates a file in debugfs with the given name that exports * @blob->data as a binary blob. If the @mode variable is so set it can be * read from and written to. * * This function will return a pointer to a dentry if it succeeds. This * pointer must be passed to the debugfs_remove() function when the file is * to be removed (no automatic cleanup happens if your module is unloaded, * you are responsible here.) If an error occurs, ERR_PTR(-ERROR) will be * returned. * * If debugfs is not enabled in the kernel, the value ERR_PTR(-ENODEV) will * be returned. */ struct dentry *debugfs_create_blob(const char *name, umode_t mode, struct dentry *parent, struct debugfs_blob_wrapper *blob) { return debugfs_create_file_unsafe(name, mode & 0644, parent, blob, &fops_blob); } EXPORT_SYMBOL_GPL(debugfs_create_blob); static size_t u32_format_array(char *buf, size_t bufsize, u32 *array, int array_size) { size_t ret = 0; while (--array_size >= 0) { size_t len; char term = array_size ? ' ' : '\n'; len = snprintf(buf, bufsize, "%u%c", *array++, term); ret += len; buf += len; bufsize -= len; } return ret; } static int u32_array_open(struct inode *inode, struct file *file) { struct debugfs_u32_array *data = inode->i_private; int size, elements = data->n_elements; char *buf; /* * Max size: * - 10 digits + ' '/'\n' = 11 bytes per number * - terminating NUL character */ size = elements*11; buf = kmalloc(size+1, GFP_KERNEL); if (!buf) return -ENOMEM; buf[size] = 0