| 3781 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 | /* SPDX-License-Identifier: GPL-2.0 */ /* * * Definitions for mount interface. This describes the in the kernel build * linkedlist with mounted filesystems. * * Author: Marco van Wieringen <mvw@planets.elm.net> * */ #ifndef _LINUX_MOUNT_H #define _LINUX_MOUNT_H #include <linux/types.h> #include <asm/barrier.h> struct super_block; struct dentry; struct user_namespace; struct mnt_idmap; struct file_system_type; struct fs_context; struct file; struct path; enum mount_flags { MNT_NOSUID = 0x01, MNT_NODEV = 0x02, MNT_NOEXEC = 0x04, MNT_NOATIME = 0x08, MNT_NODIRATIME = 0x10, MNT_RELATIME = 0x20, MNT_READONLY = 0x40, /* does the user want this to be r/o? */ MNT_NOSYMFOLLOW = 0x80, MNT_SHRINKABLE = 0x100, MNT_INTERNAL = 0x4000, MNT_LOCK_ATIME = 0x040000, MNT_LOCK_NOEXEC = 0x080000, MNT_LOCK_NOSUID = 0x100000, MNT_LOCK_NODEV = 0x200000, MNT_LOCK_READONLY = 0x400000, MNT_LOCKED = 0x800000, MNT_DOOMED = 0x1000000, MNT_SYNC_UMOUNT = 0x2000000, MNT_UMOUNT = 0x8000000, MNT_USER_SETTABLE_MASK = MNT_NOSUID | MNT_NODEV | MNT_NOEXEC | MNT_NOATIME | MNT_NODIRATIME | MNT_RELATIME | MNT_READONLY | MNT_NOSYMFOLLOW, MNT_ATIME_MASK = MNT_NOATIME | MNT_NODIRATIME | MNT_RELATIME, MNT_INTERNAL_FLAGS = MNT_INTERNAL | MNT_DOOMED | MNT_SYNC_UMOUNT | MNT_LOCKED }; struct vfsmount { struct dentry *mnt_root; /* root of the mounted tree */ struct super_block *mnt_sb; /* pointer to superblock */ int mnt_flags; struct mnt_idmap *mnt_idmap; } __randomize_layout; static inline struct mnt_idmap *mnt_idmap(const struct vfsmount *mnt) { /* Pairs with smp_store_release() in do_idmap_mount(). */ return READ_ONCE(mnt->mnt_idmap); } extern int mnt_want_write(struct vfsmount *mnt); extern int mnt_want_write_file(struct file *file); extern void mnt_drop_write(struct vfsmount *mnt); extern void mnt_drop_write_file(struct file *file); extern void mntput(struct vfsmount *mnt); extern struct vfsmount *mntget(struct vfsmount *mnt); extern void mnt_make_shortterm(struct vfsmount *mnt); extern struct vfsmount *mnt_clone_internal(const struct path *path); extern bool __mnt_is_readonly(const struct vfsmount *mnt); extern bool mnt_may_suid(struct vfsmount *mnt); extern struct vfsmount *clone_private_mount(const struct path *path); int mnt_get_write_access(struct vfsmount *mnt); void mnt_put_write_access(struct vfsmount *mnt); extern struct vfsmount *fc_mount(struct fs_context *fc); extern struct vfsmount *fc_mount_longterm(struct fs_context *fc); extern struct vfsmount *vfs_create_mount(struct fs_context *fc); extern struct vfsmount *vfs_kern_mount(struct file_system_type *type, int flags, const char *name, void *data); extern void mnt_set_expiry(struct vfsmount *mnt, struct list_head *expiry_list); extern void mark_mounts_for_expiry(struct list_head *mounts); extern bool path_is_mountpoint(const struct path *path); extern bool our_mnt(struct vfsmount *mnt); extern struct vfsmount *kern_mount(struct file_system_type *); extern void kern_unmount(struct vfsmount *mnt); extern int may_umount_tree(struct vfsmount *); extern int may_umount(struct vfsmount *); int do_mount(const char *, const char __user *, const char *, unsigned long, void *); extern const struct path *collect_paths(const struct path *, struct path *, unsigned); extern void drop_collected_paths(const struct path *, const struct path *); extern void kern_unmount_array(struct vfsmount *mnt[], unsigned int num); extern int cifs_root_data(char **dev, char **opts); #endif /* _LINUX_MOUNT_H */ |
| 14 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 | /* SPDX-License-Identifier: GPL-2.0 */ /* Copyright (C) B.A.T.M.A.N. contributors: * * Simon Wunderlich */ #ifndef _NET_BATMAN_ADV_BLA_H_ #define _NET_BATMAN_ADV_BLA_H_ #include "main.h" #include <linux/compiler.h> #include <linux/netdevice.h> #include <linux/netlink.h> #include <linux/skbuff.h> #include <linux/stddef.h> #include <linux/types.h> /** * batadv_bla_is_loopdetect_mac() - check if the mac address is from a loop * detect frame sent by bridge loop avoidance * @mac: mac address to check * * Return: true if the it looks like a loop detect frame * (mac starts with BA:BE), false otherwise */ static inline bool batadv_bla_is_loopdetect_mac(const uint8_t *mac) { if (mac[0] == 0xba && mac[1] == 0xbe) return true; return false; } #ifdef CONFIG_BATMAN_ADV_BLA bool batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb, unsigned short vid, int packet_type); bool batadv_bla_tx(struct batadv_priv *bat_priv, struct sk_buff *skb, unsigned short vid); bool batadv_bla_is_backbone_gw(struct sk_buff *skb, struct batadv_orig_node *orig_node, int hdr_size); int batadv_bla_claim_dump(struct sk_buff *msg, struct netlink_callback *cb); int batadv_bla_backbone_dump(struct sk_buff *msg, struct netlink_callback *cb); bool batadv_bla_is_backbone_gw_orig(struct batadv_priv *bat_priv, u8 *orig, unsigned short vid); bool batadv_bla_check_bcast_duplist(struct batadv_priv *bat_priv, struct sk_buff *skb); void batadv_bla_update_orig_address(struct batadv_priv *bat_priv, struct batadv_hard_iface *primary_if, struct batadv_hard_iface *oldif); void batadv_bla_status_update(struct net_device *net_dev); int batadv_bla_init(struct batadv_priv *bat_priv); void batadv_bla_free(struct batadv_priv *bat_priv); #ifdef CONFIG_BATMAN_ADV_DAT bool batadv_bla_check_claim(struct batadv_priv *bat_priv, u8 *addr, unsigned short vid); #endif #define BATADV_BLA_CRC_INIT 0 #else /* ifdef CONFIG_BATMAN_ADV_BLA */ static inline bool batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb, unsigned short vid, int packet_type) { return false; } static inline bool batadv_bla_tx(struct batadv_priv *bat_priv, struct sk_buff *skb, unsigned short vid) { return false; } static inline bool batadv_bla_is_backbone_gw(struct sk_buff *skb, struct batadv_orig_node *orig_node, int hdr_size) { return false; } static inline bool batadv_bla_is_backbone_gw_orig(struct batadv_priv *bat_priv, u8 *orig, unsigned short vid) { return false; } static inline bool batadv_bla_check_bcast_duplist(struct batadv_priv *bat_priv, struct sk_buff *skb) { return false; } static inline void batadv_bla_update_orig_address(struct batadv_priv *bat_priv, struct batadv_hard_iface *primary_if, struct batadv_hard_iface *oldif) { } static inline int batadv_bla_init(struct batadv_priv *bat_priv) { return 1; } static inline void batadv_bla_free(struct batadv_priv *bat_priv) { } static inline int batadv_bla_claim_dump(struct sk_buff *msg, struct netlink_callback *cb) { return -EOPNOTSUPP; } static inline int batadv_bla_backbone_dump(struct sk_buff *msg, struct netlink_callback *cb) { return -EOPNOTSUPP; } static inline bool batadv_bla_check_claim(struct batadv_priv *bat_priv, u8 *addr, unsigned short vid) { return true; } #endif /* ifdef CONFIG_BATMAN_ADV_BLA */ #endif /* ifndef _NET_BATMAN_ADV_BLA_H_ */ |
| 2 2 2 2 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 | // SPDX-License-Identifier: MIT #include <linux/export.h> #include <drm/clients/drm_client_setup.h> #include <drm/drm_device.h> #include <drm/drm_drv.h> #include <drm/drm_fourcc.h> #include <drm/drm_print.h> #include "drm_client_internal.h" static char drm_client_default[16] = CONFIG_DRM_CLIENT_DEFAULT; module_param_string(active, drm_client_default, sizeof(drm_client_default), 0444); MODULE_PARM_DESC(active, "Choose which drm client to start, default is " CONFIG_DRM_CLIENT_DEFAULT); /** * drm_client_setup() - Setup in-kernel DRM clients * @dev: DRM device * @format: Preferred pixel format for the device. Use NULL, unless * there is clearly a driver-preferred format. * * This function sets up the in-kernel DRM clients. Restore, hotplug * events and teardown are all taken care of. * * Drivers should call drm_client_setup() after registering the new * DRM device with drm_dev_register(). This function is safe to call * even when there are no connectors present. Setup will be retried * on the next hotplug event. * * The clients are destroyed by drm_dev_unregister(). */ void drm_client_setup(struct drm_device *dev, const struct drm_format_info *format) { if (!drm_core_check_feature(dev, DRIVER_MODESET)) { drm_dbg(dev, "driver does not support mode-setting, skipping DRM clients\n"); return; } #ifdef CONFIG_DRM_FBDEV_EMULATION if (!strcmp(drm_client_default, "fbdev")) { int ret; ret = drm_fbdev_client_setup(dev, format); if (ret) drm_warn(dev, "Failed to set up DRM client; error %d\n", ret); return; } #endif #ifdef CONFIG_DRM_CLIENT_LOG if (!strcmp(drm_client_default, "log")) { drm_log_register(dev); return; } #endif if (strcmp(drm_client_default, "")) drm_warn(dev, "Unknown DRM client %s\n", drm_client_default); } EXPORT_SYMBOL(drm_client_setup); /** * drm_client_setup_with_fourcc() - Setup in-kernel DRM clients for color mode * @dev: DRM device * @fourcc: Preferred pixel format as 4CC code for the device * * This function sets up the in-kernel DRM clients. It is equivalent * to drm_client_setup(), but expects a 4CC code as second argument. */ void drm_client_setup_with_fourcc(struct drm_device *dev, u32 fourcc) { drm_client_setup(dev, drm_format_info(fourcc)); } EXPORT_SYMBOL(drm_client_setup_with_fourcc); /** * drm_client_setup_with_color_mode() - Setup in-kernel DRM clients for color mode * @dev: DRM device * @color_mode: Preferred color mode for the device * * This function sets up the in-kernel DRM clients. It is equivalent * to drm_client_setup(), but expects a color mode as second argument. * * Do not use this function in new drivers. Prefer drm_client_setup() with a * format of NULL. */ void drm_client_setup_with_color_mode(struct drm_device *dev, unsigned int color_mode) { u32 fourcc = drm_driver_color_mode_format(dev, color_mode); drm_client_setup_with_fourcc(dev, fourcc); } EXPORT_SYMBOL(drm_client_setup_with_color_mode); MODULE_DESCRIPTION("In-kernel DRM clients"); MODULE_LICENSE("GPL and additional rights"); |
| 8 7 2 1 1 5 4 5 2 1 20 5 4 1 3 1 7 8 1 1 1 2 46 42 1 1 1 1 1 3 1 3 1 4 1 4 1 6 2 1 6 11 1 2 2 2 4 1 6 3 3 4 2 5 1 1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 | // SPDX-License-Identifier: GPL-2.0 /* * (C) 2001 Clemson University and The University of Chicago * Copyright 2018 Omnibond Systems, L.L.C. * * See COPYING in top-level directory. */ /* * Linux VFS inode operations. */ #include <linux/blkdev.h> #include <linux/fileattr.h> #include "protocol.h" #include "orangefs-kernel.h" #include "orangefs-bufmap.h" static int orangefs_writepage_locked(struct folio *folio, struct writeback_control *wbc) { struct inode *inode = folio->mapping->host; struct orangefs_write_range *wr = NULL; struct iov_iter iter; struct bio_vec bv; size_t wlen; ssize_t ret; loff_t len, off; folio_start_writeback(folio); len = i_size_read(inode); if (folio->private) { wr = folio->private; off = wr->pos; if ((off + wr->len > len) && (off <= len)) wlen = len - off; else wlen = wr->len; if (wlen == 0) wlen = wr->len; } else { WARN_ON(1); off = folio_pos(folio); wlen = folio_size(folio); if (wlen > len - off) wlen = len - off; } WARN_ON(wlen == 0); bvec_set_folio(&bv, folio, wlen, offset_in_folio(folio, off)); iov_iter_bvec(&iter, ITER_SOURCE, &bv, 1, wlen); ret = wait_for_direct_io(ORANGEFS_IO_WRITE, inode, &off, &iter, wlen, len, wr, NULL, NULL); if (ret < 0) { mapping_set_error(folio->mapping, ret); } else { ret = 0; } kfree(folio_detach_private(folio)); return ret; } struct orangefs_writepages { loff_t off; size_t len; kuid_t uid; kgid_t gid; int maxpages; int nfolios; struct address_space *mapping; struct folio **folios; struct bio_vec *bv; }; static int orangefs_writepages_work(struct orangefs_writepages *ow, struct writeback_control *wbc) { struct inode *inode = ow->mapping->host; struct orangefs_write_range *wrp, wr; struct iov_iter iter; ssize_t ret; size_t start; loff_t len, off; int i; len = i_size_read(inode); start = offset_in_folio(ow->folios[0], ow->off); for (i = 0; i < ow->nfolios; i++) { folio_start_writeback(ow->folios[i]); bvec_set_folio(&ow->bv[i], ow->folios[i], folio_size(ow->folios[i]) - start, start); start = 0; } iov_iter_bvec(&iter, ITER_SOURCE, ow->bv, ow->nfolios, ow->len); WARN_ON(ow->off >= len); if (ow->off + ow->len > len) ow->len = len - ow->off; off = ow->off; wr.uid = ow->uid; wr.gid = ow->gid; ret = wait_for_direct_io(ORANGEFS_IO_WRITE, inode, &off, &iter, ow->len, 0, &wr, NULL, NULL); if (ret < 0) mapping_set_error(ow->mapping, ret); else ret = 0; for (i = 0; i < ow->nfolios; i++) { wrp = folio_detach_private(ow->folios[i]); kfree(wrp); folio_end_writeback(ow->folios[i]); folio_unlock(ow->folios[i]); } return ret; } static int orangefs_writepages_callback(struct folio *folio, struct writeback_control *wbc, struct orangefs_writepages *ow) { struct orangefs_write_range *wr = folio->private; int ret; if (!wr) { folio_unlock(folio); /* It's not private so there's nothing to write, right? */ printk("writepages_callback not private!\n"); BUG(); return 0; } ret = -1; if (ow->nfolios == 0) { ow->off = wr->pos; ow->len = wr->len; ow->uid = wr->uid; ow->gid = wr->gid; ow->folios[ow->nfolios++] = folio; ret = 0; goto done; } if (!uid_eq(ow->uid, wr->uid) || !gid_eq(ow->gid, wr->gid)) { orangefs_writepages_work(ow, wbc); ow->nfolios = 0; ret = -1; goto done; } if (ow->off + ow->len == wr->pos) { ow->len += wr->len; ow->folios[ow->nfolios++] = folio; ret = 0; goto done; } done: if (ret == -1) { if (ow->nfolios) { orangefs_writepages_work(ow, wbc); ow->nfolios = 0; } ret = orangefs_writepage_locked(folio, wbc); mapping_set_error(folio->mapping, ret); folio_unlock(folio); folio_end_writeback(folio); } else { if (ow->nfolios == ow->maxpages) { orangefs_writepages_work(ow, wbc); ow->nfolios = 0; } } return ret; } static int orangefs_writepages(struct address_space *mapping, struct writeback_control *wbc) { struct orangefs_writepages *ow; struct blk_plug plug; int error; struct folio *folio = NULL; ow = kzalloc(sizeof(struct orangefs_writepages), GFP_KERNEL); if (!ow) return -ENOMEM; ow->maxpages = orangefs_bufmap_size_query()/PAGE_SIZE; ow->folios = kcalloc(ow->maxpages, sizeof(struct folio *), GFP_KERNEL); if (!ow->folios) { kfree(ow); return -ENOMEM; } ow->bv = kcalloc(ow->maxpages, sizeof(struct bio_vec), GFP_KERNEL); if (!ow->bv) { kfree(ow->folios); kfree(ow); return -ENOMEM; } ow->mapping = mapping; blk_start_plug(&plug); while ((folio = writeback_iter(mapping, wbc, folio, &error))) error = orangefs_writepages_callback(folio, wbc, ow); if (ow->nfolios) error = orangefs_writepages_work(ow, wbc); blk_finish_plug(&plug); kfree(ow->folios); kfree(ow->bv); kfree(ow); return error; } static int orangefs_launder_folio(struct folio *); static void orangefs_readahead(struct readahead_control *rac) { loff_t offset; struct iov_iter iter; struct inode *inode = rac->mapping->host; struct xarray *i_pages; struct folio *folio; loff_t new_start = readahead_pos(rac); int ret; size_t new_len = 0; loff_t bytes_remaining = inode->i_size - readahead_pos(rac); loff_t pages_remaining = bytes_remaining / PAGE_SIZE; if (pages_remaining >= 1024) new_len = 4194304; else if (pages_remaining > readahead_count(rac)) new_len = bytes_remaining; if (new_len) readahead_expand(rac, new_start, new_len); offset = readahead_pos(rac); i_pages = &rac->mapping->i_pages; iov_iter_xarray(&iter, ITER_DEST, i_pages, offset, readahead_length(rac)); /* read in the pages. */ if ((ret = wait_for_direct_io(ORANGEFS_IO_READ, inode, &offset, &iter, readahead_length(rac), inode->i_size, NULL, NULL, rac->file)) < 0) gossip_debug(GOSSIP_FILE_DEBUG, "%s: wait_for_direct_io failed. \n", __func__); else ret = 0; /* clean up. */ while ((folio = readahead_folio(rac))) { if (!ret) folio_mark_uptodate(folio); folio_unlock(folio); } } static int orangefs_read_folio(struct file *file, struct folio *folio) { struct inode *inode = folio->mapping->host; struct iov_iter iter; struct bio_vec bv; ssize_t ret; loff_t off; /* offset of this folio in the file */ if (folio_test_dirty(folio)) orangefs_launder_folio(folio); off = folio_pos(folio); bvec_set_folio(&bv, folio, folio_size(folio), 0); iov_iter_bvec(&iter, ITER_DEST, &bv, 1, folio_size(folio)); ret = wait_for_direct_io(ORANGEFS_IO_READ, inode, &off, &iter, folio_size(folio), inode->i_size, NULL, NULL, file); /* this will only zero remaining unread portions of the folio data */ iov_iter_zero(~0U, &iter); /* takes care of potential aliasing */ flush_dcache_folio(folio); if (ret > 0) ret = 0; folio_end_read(folio, ret == 0); return ret; } static int orangefs_write_begin(const struct kiocb *iocb, struct address_space *mapping, loff_t pos, unsigned len, struct folio **foliop, void **fsdata) { struct orangefs_write_range *wr; struct folio *folio; int ret; folio = __filemap_get_folio(mapping, pos / PAGE_SIZE, FGP_WRITEBEGIN, mapping_gfp_mask(mapping)); if (IS_ERR(folio)) return PTR_ERR(folio); *foliop = folio; if (folio_test_dirty(folio) && !folio_test_private(folio)) { /* * Should be impossible. If it happens, launder the page * since we don't know what's dirty. This will WARN in * orangefs_writepage_locked. */ ret = orangefs_launder_folio(folio); if (ret) return ret; } if (folio_test_private(folio)) { struct orangefs_write_range *wr; wr = folio_get_private(folio); if (wr->pos + wr->len == pos && uid_eq(wr->uid, current_fsuid()) && gid_eq(wr->gid, current_fsgid())) { wr->len += len; goto okay; } else { wr->pos = pos; wr->len = len; ret = orangefs_launder_folio(folio); if (ret) return ret; } } wr = kmalloc(sizeof *wr, GFP_KERNEL); if (!wr) return -ENOMEM; wr->pos = pos; wr->len = len; wr->uid = current_fsuid(); wr->gid = current_fsgid(); folio_attach_private(folio, wr); okay: return 0; } static int orangefs_write_end(const struct kiocb *iocb, struct address_space *mapping, loff_t pos, unsigned len, unsigned copied, struct folio *folio, void *fsdata) { struct inode *inode = folio->mapping->host; loff_t last_pos = pos + copied; /* * No need to use i_size_read() here, the i_size * cannot change under us because we hold the i_mutex. */ if (last_pos > inode->i_size) i_size_write(inode, last_pos); /* zero the stale part of the folio if we did a short copy */ if (!folio_test_uptodate(folio)) { unsigned from = pos & (PAGE_SIZE - 1); if (copied < len) { folio_zero_range(folio, from + copied, len - copied); } /* Set fully written pages uptodate. */ if (pos == folio_pos(folio) && (len == PAGE_SIZE || pos + len == inode->i_size)) { folio_zero_segment(folio, from + copied, PAGE_SIZE); folio_mark_uptodate(folio); } } folio_mark_dirty(folio); folio_unlock(folio); folio_put(folio); mark_inode_dirty_sync(file_inode(iocb->ki_filp)); return copied; } static void orangefs_invalidate_folio(struct folio *folio, size_t offset, size_t length) { struct orangefs_write_range *wr = folio_get_private(folio); if (offset == 0 && length == PAGE_SIZE) { kfree(folio_detach_private(folio)); return; /* write range entirely within invalidate range (or equal) */ } else if (folio_pos(folio) + offset <= wr->pos && wr->pos + wr->len <= folio_pos(folio) + offset + length) { kfree(folio_detach_private(folio)); /* XXX is this right? only caller in fs */ folio_cancel_dirty(folio); return; /* invalidate range chops off end of write range */ } else if (wr->pos < folio_pos(folio) + offset && wr->pos + wr->len <= folio_pos(folio) + offset + length && folio_pos(folio) + offset < wr->pos + wr->len) { size_t x; x = wr->pos + wr->len - (folio_pos(folio) + offset); WARN_ON(x > wr->len); wr->len -= x; wr->uid = current_fsuid(); wr->gid = current_fsgid(); /* invalidate range chops off beginning of write range */ } else if (folio_pos(folio) + offset <= wr->pos && folio_pos(folio) + offset + length < wr->pos + wr->len && wr->pos < folio_pos(folio) + offset + length) { size_t x; x = folio_pos(folio) + offset + length - wr->pos; WARN_ON(x > wr->len); wr->pos += x; wr->len -= x; wr->uid = current_fsuid(); wr->gid = current_fsgid(); /* invalidate range entirely within write range (punch hole) */ } else if (wr->pos < folio_pos(folio) + offset && folio_pos(folio) + offset + length < wr->pos + wr->len) { /* XXX what do we do here... should not WARN_ON */ WARN_ON(1); /* punch hole */ /* * should we just ignore this and write it out anyway? * it hardly makes sense */ return; /* non-overlapping ranges */ } else { /* WARN if they do overlap */ if (!((folio_pos(folio) + offset + length <= wr->pos) ^ (wr->pos + wr->len <= folio_pos(folio) + offset))) { WARN_ON(1); printk("invalidate range offset %llu length %zu\n", folio_pos(folio) + offset, length); printk("write range offset %llu length %zu\n", wr->pos, wr->len); } return; } /* * Above there are returns where wr is freed or where we WARN. * Thus the following runs if wr was modified above. */ orangefs_launder_folio(folio); } static bool orangefs_release_folio(struct folio *folio, gfp_t foo) { return !folio_test_private(folio); } static void orangefs_free_folio(struct folio *folio) { kfree(folio_detach_private(folio)); } static int orangefs_launder_folio(struct folio *folio) { int r = 0; struct writeback_control wbc = { .sync_mode = WB_SYNC_ALL, .nr_to_write = 0, }; folio_wait_writeback(folio); if (folio_clear_dirty_for_io(folio)) { r = orangefs_writepage_locked(folio, &wbc); folio_end_writeback(folio); } return r; } static ssize_t orangefs_direct_IO(struct kiocb *iocb, struct iov_iter *iter) { /* * Comment from original do_readv_writev: * Common entry point for read/write/readv/writev * This function will dispatch it to either the direct I/O * or buffered I/O path depending on the mount options and/or * augmented/extended metadata attached to the file. * Note: File extended attributes override any mount options. */ struct file *file = iocb->ki_filp; loff_t pos = iocb->ki_pos; enum ORANGEFS_io_type type = iov_iter_rw(iter) == WRITE ? ORANGEFS_IO_WRITE : ORANGEFS_IO_READ; loff_t *offset = &pos; struct inode *inode = file->f_mapping->host; struct orangefs_inode_s *orangefs_inode = ORANGEFS_I(inode); struct orangefs_khandle *handle = &orangefs_inode->refn.khandle; size_t count = iov_iter_count(iter); ssize_t total_count = 0; ssize_t ret = -EINVAL; gossip_debug(GOSSIP_FILE_DEBUG, "%s-BEGIN(%pU): count(%d) after estimate_max_iovecs.\n", __func__, handle, (int)count); if (type == ORANGEFS_IO_WRITE) { gossip_debug(GOSSIP_FILE_DEBUG, "%s(%pU): proceeding with offset : %llu, " "size %d\n", __func__, handle, llu(*offset), (int)count); } if (count == 0) { ret = 0; goto out; } while (iov_iter_count(iter)) { size_t each_count = iov_iter_count(iter); size_t amt_complete; /* how much to transfer in this loop iteration */ if (each_count > orangefs_bufmap_size_query()) each_count = orangefs_bufmap_size_query(); gossip_debug(GOSSIP_FILE_DEBUG, "%s(%pU): size of each_count(%d)\n", __func__, handle, (int)each_count); gossip_debug(GOSSIP_FILE_DEBUG, "%s(%pU): BEFORE wait_for_io: offset is %d\n", __func__, handle, (int)*offset); ret = wait_for_direct_io(type, inode, offset, iter, each_count, 0, NULL, NULL, file); gossip_debug(GOSSIP_FILE_DEBUG, "%s(%pU): return from wait_for_io:%d\n", __func__, handle, (int)ret); if (ret < 0) goto out; *offset += ret; total_count += ret; amt_complete = ret; gossip_debug(GOSSIP_FILE_DEBUG, "%s(%pU): AFTER wait_for_io: offset is %d\n", __func__, handle, (int)*offset); /* * if we got a short I/O operations, * fall out and return what we got so far */ if (amt_complete < each_count) break; } /*end while */ out: if (total_count > 0) ret = total_count; if (ret > 0) { if (type == ORANGEFS_IO_READ) { file_accessed(file); } else { file_update_time(file); if (*offset > i_size_read(inode)) i_size_write(inode, *offset); } } gossip_debug(GOSSIP_FILE_DEBUG, "%s(%pU): Value(%d) returned.\n", __func__, handle, (int)ret); return ret; } /** ORANGEFS2 implementation of address space operations */ static const struct address_space_operations orangefs_address_operations = { .readahead = orangefs_readahead, .read_folio = orangefs_read_folio, .writepages = orangefs_writepages, .dirty_folio = filemap_dirty_folio, .write_begin = orangefs_write_begin, .write_end = orangefs_write_end, .invalidate_folio = orangefs_invalidate_folio, .release_folio = orangefs_release_folio, .free_folio = orangefs_free_folio, .migrate_folio = filemap_migrate_folio, .launder_folio = orangefs_launder_folio, .direct_IO = orangefs_direct_IO, }; vm_fault_t orangefs_page_mkwrite(struct vm_fault *vmf) { struct folio *folio = page_folio(vmf->page); struct inode *inode = file_inode(vmf->vma->vm_file); struct orangefs_inode_s *orangefs_inode = ORANGEFS_I(inode); unsigned long *bitlock = &orangefs_inode->bitlock; vm_fault_t ret; struct orangefs_write_range *wr; sb_start_pagefault(inode->i_sb); if (wait_on_bit(bitlock, 1, TASK_KILLABLE)) { ret = VM_FAULT_RETRY; goto out; } folio_lock(folio); if (folio_test_dirty(folio) && !folio_test_private(folio)) { /* * Should be impossible. If it happens, launder the folio * since we don't know what's dirty. This will WARN in * orangefs_writepage_locked. */ if (orangefs_launder_folio(folio)) { ret = VM_FAULT_LOCKED|VM_FAULT_RETRY; goto out; } } if (folio_test_private(folio)) { wr = folio_get_private(folio); if (uid_eq(wr->uid, current_fsuid()) && gid_eq(wr->gid, current_fsgid())) { wr->pos = page_offset(vmf->page); wr->len = PAGE_SIZE; goto okay; } else { if (orangefs_launder_folio(folio)) { ret = VM_FAULT_LOCKED|VM_FAULT_RETRY; goto out; } } } wr = kmalloc(sizeof *wr, GFP_KERNEL); if (!wr) { ret = VM_FAULT_LOCKED|VM_FAULT_RETRY; goto out; } wr->pos = page_offset(vmf->page); wr->len = PAGE_SIZE; wr->uid = current_fsuid(); wr->gid = current_fsgid(); folio_attach_private(folio, wr); okay: file_update_time(vmf->vma->vm_file); if (folio->mapping != inode->i_mapping) { folio_unlock(folio); ret = VM_FAULT_LOCKED|VM_FAULT_NOPAGE; goto out; } /* * We mark the folio dirty already here so that when freeze is in * progress, we are guaranteed that writeback during freezing will * see the dirty folio and writeprotect it again. */ folio_mark_dirty(folio); folio_wait_stable(folio); ret = VM_FAULT_LOCKED; out: sb_end_pagefault(inode->i_sb); return ret; } static int orangefs_setattr_size(struct inode *inode, struct iattr *iattr) { struct orangefs_inode_s *orangefs_inode = ORANGEFS_I(inode); struct orangefs_kernel_op_s *new_op; loff_t orig_size; int ret = -EINVAL; gossip_debug(GOSSIP_INODE_DEBUG, "%s: %pU: Handle is %pU | fs_id %d | size is %llu\n", __func__, get_khandle_from_ino(inode), &orangefs_inode->refn.khandle, orangefs_inode->refn.fs_id, iattr->ia_size); /* Ensure that we have a up to date size, so we know if it changed. */ ret = orangefs_inode_getattr(inode, ORANGEFS_GETATTR_SIZE); if (ret == -ESTALE) ret = -EIO; if (ret) { gossip_err("%s: orangefs_inode_getattr failed, ret:%d:.\n", __func__, ret); return ret; } orig_size = i_size_read(inode); /* This is truncate_setsize in a different order. */ truncate_pagecache(inode, iattr->ia_size); i_size_write(inode, iattr->ia_size); if (iattr->ia_size > orig_size) pagecache_isize_extended(inode, orig_size, iattr->ia_size); new_op = op_alloc(ORANGEFS_VFS_OP_TRUNCATE); if (!new_op) return -ENOMEM; new_op->upcall.req.truncate.refn = orangefs_inode->refn; new_op->upcall.req.truncate.size = (__s64) iattr->ia_size; ret = service_operation(new_op, __func__, get_interruptible_flag(inode)); /* * the truncate has no downcall members to retrieve, but * the status value tells us if it went through ok or not */ gossip_debug(GOSSIP_INODE_DEBUG, "%s: ret:%d:\n", __func__, ret); op_release(new_op); if (ret != 0) return ret; if (orig_size != i_size_read(inode)) iattr->ia_valid |= ATTR_CTIME | ATTR_MTIME; return ret; } int __orangefs_setattr(struct inode *inode, struct iattr *iattr) { int ret; if (iattr->ia_valid & ATTR_MODE) { if (iattr->ia_mode & (S_ISVTX)) { if (is_root_handle(inode)) { /* * allow sticky bit to be set on root (since * it shows up that way by default anyhow), * but don't show it to the server */ iattr->ia_mode -= S_ISVTX; } else { gossip_debug(GOSSIP_UTILS_DEBUG, "User attempted to set sticky bit on non-root directory; returning EINVAL.\n"); ret = -EINVAL; goto out; } } if (iattr->ia_mode & (S_ISUID)) { gossip_debug(GOSSIP_UTILS_DEBUG, "Attempting to set setuid bit (not supported); returning EINVAL.\n"); ret = -EINVAL; goto out; } } if (iattr->ia_valid & ATTR_SIZE) { ret = orangefs_setattr_size(inode, iattr); if (ret) goto out; } again: spin_lock(&inode->i_lock); if (ORANGEFS_I(inode)->attr_valid) { if (uid_eq(ORANGEFS_I(inode)->attr_uid, current_fsuid()) && gid_eq(ORANGEFS_I(inode)->attr_gid, current_fsgid())) { ORANGEFS_I(inode)->attr_valid = iattr->ia_valid; } else { spin_unlock(&inode->i_lock); write_inode_now(inode, 1); goto again; } } else { ORANGEFS_I(inode)->attr_valid = iattr->ia_valid; ORANGEFS_I(inode)->attr_uid = current_fsuid(); ORANGEFS_I(inode)->attr_gid = current_fsgid(); } setattr_copy(&nop_mnt_idmap, inode, iattr); spin_unlock(&inode->i_lock); mark_inode_dirty(inode); ret = 0; out: return ret; } int __orangefs_setattr_mode(struct dentry *dentry, struct iattr *iattr) { int ret; struct inode *inode = d_inode(dentry); ret = __orangefs_setattr(inode, iattr); /* change mode on a file that has ACLs */ if (!ret && (iattr->ia_valid & ATTR_MODE)) ret = posix_acl_chmod(&nop_mnt_idmap, dentry, inode->i_mode); return ret; } /* * Change attributes of an object referenced by dentry. */ int orangefs_setattr(struct mnt_idmap *idmap, struct dentry *dentry, struct iattr *iattr) { int ret; gossip_debug(GOSSIP_INODE_DEBUG, "__orangefs_setattr: called on %pd\n", dentry); ret = setattr_prepare(&nop_mnt_idmap, dentry, iattr); if (ret) goto out; ret = __orangefs_setattr_mode(dentry, iattr); sync_inode_metadata(d_inode(dentry), 1); out: gossip_debug(GOSSIP_INODE_DEBUG, "orangefs_setattr: returning %d\n", ret); return ret; } /* * Obtain attributes of an object given a dentry */ int orangefs_getattr(struct mnt_idmap *idmap, const struct path *path, struct kstat *stat, u32 request_mask, unsigned int flags) { int ret; struct inode *inode = path->dentry->d_inode; gossip_debug(GOSSIP_INODE_DEBUG, "orangefs_getattr: called on %pd mask %u\n", path->dentry, request_mask); ret = orangefs_inode_getattr(inode, request_mask & STATX_SIZE ? ORANGEFS_GETATTR_SIZE : 0); if (ret == 0) { generic_fillattr(&nop_mnt_idmap, request_mask, inode, stat); /* override block size reported to stat */ if (!(request_mask & STATX_SIZE)) stat->result_mask &= ~STATX_SIZE; generic_fill_statx_attr(inode, stat); } return ret; } int orangefs_permission(struct mnt_idmap *idmap, struct inode *inode, int mask) { int ret; if (mask & MAY_NOT_BLOCK) return -ECHILD; gossip_debug(GOSSIP_INODE_DEBUG, "%s: refreshing\n", __func__); /* Make sure the permission (and other common attrs) are up to date. */ ret = orangefs_inode_getattr(inode, 0); if (ret < 0) return ret; return generic_permission(&nop_mnt_idmap, inode, mask); } int orangefs_update_time(struct inode *inode, int flags) { struct iattr iattr; gossip_debug(GOSSIP_INODE_DEBUG, "orangefs_update_time: %pU\n", get_khandle_from_ino(inode)); flags = generic_update_time(inode, flags); memset(&iattr, 0, sizeof iattr); if (flags & S_ATIME) iattr.ia_valid |= ATTR_ATIME; if (flags & S_CTIME) iattr.ia_valid |= ATTR_CTIME; if (flags & S_MTIME) iattr.ia_valid |= ATTR_MTIME; return __orangefs_setattr(inode, &iattr); } static int orangefs_fileattr_get(struct dentry *dentry, struct file_kattr *fa) { u64 val = 0; int ret; gossip_debug(GOSSIP_FILE_DEBUG, "%s: called on %pd\n", __func__, dentry); ret = orangefs_inode_getxattr(d_inode(dentry), "user.pvfs2.meta_hint", &val, sizeof(val)); if (ret < 0 && ret != -ENODATA) return ret; gossip_debug(GOSSIP_FILE_DEBUG, "%s: flags=%u\n", __func__, (u32) val); fileattr_fill_flags(fa, val); return 0; } static int orangefs_fileattr_set(struct mnt_idmap *idmap, struct dentry *dentry, struct file_kattr *fa) { u64 val = 0; gossip_debug(GOSSIP_FILE_DEBUG, "%s: called on %pd\n", __func__, dentry); /* * ORANGEFS_MIRROR_FL is set internally when the mirroring mode is * turned on for a file. The user is not allowed to turn on this bit, * but the bit is present if the user first gets the flags and then * updates the flags with some new settings. So, we ignore it in the * following edit. bligon. */ if (fileattr_has_fsx(fa) || (fa->flags & ~(FS_IMMUTABLE_FL | FS_APPEND_FL | FS_NOATIME_FL | ORANGEFS_MIRROR_FL))) { gossip_err("%s: only supports setting one of FS_IMMUTABLE_FL|FS_APPEND_FL|FS_NOATIME_FL\n", __func__); return -EOPNOTSUPP; } val = fa->flags; gossip_debug(GOSSIP_FILE_DEBUG, "%s: flags=%u\n", __func__, (u32) val); return orangefs_inode_setxattr(d_inode(dentry), "user.pvfs2.meta_hint", &val, sizeof(val), 0); } /* ORANGEFS2 implementation of VFS inode operations for files */ static const struct inode_operations orangefs_file_inode_operations = { .get_inode_acl = orangefs_get_acl, .set_acl = orangefs_set_acl, .setattr = orangefs_setattr, .getattr = orangefs_getattr, .listxattr = orangefs_listxattr, .permission = orangefs_permission, .update_time = orangefs_update_time, .fileattr_get = orangefs_fileattr_get, .fileattr_set = orangefs_fileattr_set, }; static int orangefs_init_iops(struct inode *inode) { inode->i_mapping->a_ops = &orangefs_address_operations; switch (inode->i_mode & S_IFMT) { case S_IFREG: inode->i_op = &orangefs_file_inode_operations; inode->i_fop = &orangefs_file_operations; break; case S_IFLNK: inode->i_op = &orangefs_symlink_inode_operations; break; case S_IFDIR: inode->i_op = &orangefs_dir_inode_operations; inode->i_fop = &orangefs_dir_operations; break; default: gossip_debug(GOSSIP_INODE_DEBUG, "%s: unsupported mode\n", __func__); return -EINVAL; } return 0; } /* * Given an ORANGEFS object identifier (fsid, handle), convert it into * a ino_t type that will be used as a hash-index from where the handle will * be searched for in the VFS hash table of inodes. */ static inline ino_t orangefs_handle_hash(struct orangefs_object_kref *ref) { if (!ref) return 0; return orangefs_khandle_to_ino(&(ref->khandle)); } /* * Called to set up an inode from iget5_locked. */ static int orangefs_set_inode(struct inode *inode, void *data) { struct orangefs_object_kref *ref = (struct orangefs_object_kref *) data; ORANGEFS_I(inode)->refn.fs_id = ref->fs_id; ORANGEFS_I(inode)->refn.khandle = ref->khandle; ORANGEFS_I(inode)->attr_valid = 0; hash_init(ORANGEFS_I(inode)->xattr_cache); ORANGEFS_I(inode)->mapping_time = jiffies - 1; ORANGEFS_I(inode)->bitlock = 0; return 0; } /* * Called to determine if handles match. */ static int orangefs_test_inode(struct inode *inode, void *data) { struct orangefs_object_kref *ref = (struct orangefs_object_kref *) data; struct orangefs_inode_s *orangefs_inode = NULL; orangefs_inode = ORANGEFS_I(inode); /* test handles and fs_ids... */ return (!ORANGEFS_khandle_cmp(&(orangefs_inode->refn.khandle), &(ref->khandle)) && orangefs_inode->refn.fs_id == ref->fs_id); } /* * Front-end to lookup the inode-cache maintained by the VFS using the ORANGEFS * file handle. * * @sb: the file system super block instance. * @ref: The ORANGEFS object for which we are trying to locate an inode. */ struct inode *orangefs_iget(struct super_block *sb, struct orangefs_object_kref *ref) { struct inode *inode = NULL; unsigned long hash; int error; hash = orangefs_handle_hash(ref); inode = iget5_locked(sb, hash, orangefs_test_inode, orangefs_set_inode, ref); if (!inode) return ERR_PTR(-ENOMEM); if (!(inode->i_state & I_NEW)) return inode; error = orangefs_inode_getattr(inode, ORANGEFS_GETATTR_NEW); if (error) { iget_failed(inode); return ERR_PTR(error); } inode->i_ino = hash; /* needed for stat etc */ orangefs_init_iops(inode); unlock_new_inode(inode); gossip_debug(GOSSIP_INODE_DEBUG, "iget handle %pU, fsid %d hash %ld i_ino %lu\n", &ref->khandle, ref->fs_id, hash, inode->i_ino); return inode; } /* * Allocate an inode for a newly created file and insert it into the inode hash. */ struct inode *orangefs_new_inode(struct super_block *sb, struct inode *dir, umode_t mode, dev_t dev, struct orangefs_object_kref *ref) { struct posix_acl *acl = NULL, *default_acl = NULL; unsigned long hash = orangefs_handle_hash(ref); struct inode *inode; int error; gossip_debug(GOSSIP_INODE_DEBUG, "%s:(sb is %p | MAJOR(dev)=%u | MINOR(dev)=%u mode=%o)\n", __func__, sb, MAJOR(dev), MINOR(dev), mode); inode = new_inode(sb); if (!inode) return ERR_PTR(-ENOMEM); error = posix_acl_create(dir, &mode, &default_acl, &acl); if (error) goto out_iput; orangefs_set_inode(inode, ref); inode->i_ino = hash; /* needed for stat etc */ error = orangefs_inode_getattr(inode, ORANGEFS_GETATTR_NEW); if (error) goto out_iput; orangefs_init_iops(inode); inode->i_rdev = dev; if (default_acl) { error = __orangefs_set_acl(inode, default_acl, ACL_TYPE_DEFAULT); if (error) goto out_iput; } if (acl) { error = __orangefs_set_acl(inode, acl, ACL_TYPE_ACCESS); if (error) goto out_iput; } error = insert_inode_locked4(inode, hash, orangefs_test_inode, ref); if (error < 0) goto out_iput; gossip_debug(GOSSIP_INODE_DEBUG, "Initializing ACL's for inode %pU\n", get_khandle_from_ino(inode)); if (mode != inode->i_mode) { struct iattr iattr = { .ia_mode = mode, .ia_valid = ATTR_MODE, }; inode->i_mode = mode; __orangefs_setattr(inode, &iattr); __posix_acl_chmod(&acl, GFP_KERNEL, inode->i_mode); } posix_acl_release(acl); posix_acl_release(default_acl); return inode; out_iput: iput(inode); posix_acl_release(acl); posix_acl_release(default_acl); return ERR_PTR(error); } |
| 3 3 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 | // SPDX-License-Identifier: GPL-2.0-or-later /* * PTP virtual clock driver * * Copyright 2021 NXP */ #include <linux/slab.h> #include <linux/hashtable.h> #include "ptp_private.h" #define PTP_VCLOCK_CC_SHIFT 31 #define PTP_VCLOCK_CC_MULT (1 << PTP_VCLOCK_CC_SHIFT) #define PTP_VCLOCK_FADJ_SHIFT 9 #define PTP_VCLOCK_FADJ_DENOMINATOR 15625ULL #define PTP_VCLOCK_REFRESH_INTERVAL (HZ * 2) /* protects vclock_hash addition/deletion */ static DEFINE_SPINLOCK(vclock_hash_lock); static DEFINE_READ_MOSTLY_HASHTABLE(vclock_hash, 8); static void ptp_vclock_hash_add(struct ptp_vclock *vclock) { spin_lock(&vclock_hash_lock); hlist_add_head_rcu(&vclock->vclock_hash_node, &vclock_hash[vclock->clock->index % HASH_SIZE(vclock_hash)]); spin_unlock(&vclock_hash_lock); } static void ptp_vclock_hash_del(struct ptp_vclock *vclock) { spin_lock(&vclock_hash_lock); hlist_del_init_rcu(&vclock->vclock_hash_node); spin_unlock(&vclock_hash_lock); synchronize_rcu(); } static int ptp_vclock_adjfine(struct ptp_clock_info *ptp, long scaled_ppm) { struct ptp_vclock *vclock = info_to_vclock(ptp); s64 adj; adj = (s64)scaled_ppm << PTP_VCLOCK_FADJ_SHIFT; adj = div_s64(adj, PTP_VCLOCK_FADJ_DENOMINATOR); if (mutex_lock_interruptible(&vclock->lock)) return -EINTR; timecounter_read(&vclock->tc); vclock->cc.mult = PTP_VCLOCK_CC_MULT + adj; mutex_unlock(&vclock->lock); return 0; } static int ptp_vclock_adjtime(struct ptp_clock_info *ptp, s64 delta) { struct ptp_vclock *vclock = info_to_vclock(ptp); if (mutex_lock_interruptible(&vclock->lock)) return -EINTR; timecounter_adjtime(&vclock->tc, delta); mutex_unlock(&vclock->lock); return 0; } static int ptp_vclock_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts) { struct ptp_vclock *vclock = info_to_vclock(ptp); u64 ns; if (mutex_lock_interruptible(&vclock->lock)) return -EINTR; ns = timecounter_read(&vclock->tc); mutex_unlock(&vclock->lock); *ts = ns_to_timespec64(ns); return 0; } static int ptp_vclock_gettimex(struct ptp_clock_info *ptp, struct timespec64 *ts, struct ptp_system_timestamp *sts) { struct ptp_vclock *vclock = info_to_vclock(ptp); struct ptp_clock *pptp = vclock->pclock; struct timespec64 pts; int err; u64 ns; err = pptp->info->getcyclesx64(pptp->info, &pts, sts); if (err) return err; if (mutex_lock_interruptible(&vclock->lock)) return -EINTR; ns = timecounter_cyc2time(&vclock->tc, timespec64_to_ns(&pts)); mutex_unlock(&vclock->lock); *ts = ns_to_timespec64(ns); return 0; } static int ptp_vclock_settime(struct ptp_clock_info *ptp, const struct timespec64 *ts) { struct ptp_vclock *vclock = info_to_vclock(ptp); u64 ns = timespec64_to_ns(ts); if (mutex_lock_interruptible(&vclock->lock)) return -EINTR; timecounter_init(&vclock->tc, &vclock->cc, ns); mutex_unlock(&vclock->lock); return 0; } static int ptp_vclock_getcrosststamp(struct ptp_clock_info *ptp, struct system_device_crosststamp *xtstamp) { struct ptp_vclock *vclock = info_to_vclock(ptp); struct ptp_clock *pptp = vclock->pclock; int err; u64 ns; err = pptp->info->getcrosscycles(pptp->info, xtstamp); if (err) return err; if (mutex_lock_interruptible(&vclock->lock)) return -EINTR; ns = timecounter_cyc2time(&vclock->tc, ktime_to_ns(xtstamp->device)); mutex_unlock(&vclock->lock); xtstamp->device = ns_to_ktime(ns); return 0; } static long ptp_vclock_refresh(struct ptp_clock_info *ptp) { struct ptp_vclock *vclock = info_to_vclock(ptp); struct timespec64 ts; ptp_vclock_gettime(&vclock->info, &ts); return PTP_VCLOCK_REFRESH_INTERVAL; } static void ptp_vclock_set_subclass(struct ptp_clock *ptp) { lockdep_set_subclass(&ptp->clock.rwsem, PTP_LOCK_VIRTUAL); } static const struct ptp_clock_info ptp_vclock_info = { .owner = THIS_MODULE, .name = "ptp virtual clock", .max_adj = 500000000, .adjfine = ptp_vclock_adjfine, .adjtime = ptp_vclock_adjtime, .settime64 = ptp_vclock_settime, .do_aux_work = ptp_vclock_refresh, }; static u64 ptp_vclock_read(struct cyclecounter *cc) { struct ptp_vclock *vclock = cc_to_vclock(cc); struct ptp_clock *ptp = vclock->pclock; struct timespec64 ts = {}; ptp->info->getcycles64(ptp->info, &ts); return timespec64_to_ns(&ts); } static const struct cyclecounter ptp_vclock_cc = { .read = ptp_vclock_read, .mask = CYCLECOUNTER_MASK(32), .mult = PTP_VCLOCK_CC_MULT, .shift = PTP_VCLOCK_CC_SHIFT, }; struct ptp_vclock *ptp_vclock_register(struct ptp_clock *pclock) { struct ptp_vclock *vclock; vclock = kzalloc(sizeof(*vclock), GFP_KERNEL); if (!vclock) return NULL; vclock->pclock = pclock; vclock->info = ptp_vclock_info; if (pclock->info->getcyclesx64) vclock->info.gettimex64 = ptp_vclock_gettimex; else vclock->info.gettime64 = ptp_vclock_gettime; if (pclock->info->getcrosscycles) vclock->info.getcrosststamp = ptp_vclock_getcrosststamp; vclock->cc = ptp_vclock_cc; snprintf(vclock->info.name, PTP_CLOCK_NAME_LEN, "ptp%d_virt", pclock->index); INIT_HLIST_NODE(&vclock->vclock_hash_node); mutex_init(&vclock->lock); vclock->clock = ptp_clock_register(&vclock->info, &pclock->dev); if (IS_ERR_OR_NULL(vclock->clock)) { kfree(vclock); return NULL; } ptp_vclock_set_subclass(vclock->clock); timecounter_init(&vclock->tc, &vclock->cc, 0); ptp_schedule_worker(vclock->clock, PTP_VCLOCK_REFRESH_INTERVAL); ptp_vclock_hash_add(vclock); return vclock; } void ptp_vclock_unregister(struct ptp_vclock *vclock) { ptp_vclock_hash_del(vclock); ptp_clock_unregister(vclock->clock); kfree(vclock); } #if IS_BUILTIN(CONFIG_PTP_1588_CLOCK) int ptp_get_vclocks_index(int pclock_index, int **vclock_index) { char name[PTP_CLOCK_NAME_LEN] = ""; struct ptp_clock *ptp; struct device *dev; int num = 0; if (pclock_index < 0) return num; snprintf(name, PTP_CLOCK_NAME_LEN, "ptp%d", pclock_index); dev = class_find_device_by_name(&ptp_class, name); if (!dev) return num; ptp = dev_get_drvdata(dev); if (mutex_lock_interruptible(&ptp->n_vclocks_mux)) { put_device(dev); return num; } *vclock_index = kzalloc(sizeof(int) * ptp->n_vclocks, GFP_KERNEL); if (!(*vclock_index)) goto out; memcpy(*vclock_index, ptp->vclock_index, sizeof(int) * ptp->n_vclocks); num = ptp->n_vclocks; out: mutex_unlock(&ptp->n_vclocks_mux); put_device(dev); return num; } EXPORT_SYMBOL(ptp_get_vclocks_index); ktime_t ptp_convert_timestamp(const ktime_t *hwtstamp, int vclock_index) { unsigned int hash = vclock_index % HASH_SIZE(vclock_hash); struct ptp_vclock *vclock; u64 ns; u64 vclock_ns = 0; ns = ktime_to_ns(*hwtstamp); rcu_read_lock(); hlist_for_each_entry_rcu(vclock, &vclock_hash[hash], vclock_hash_node) { if (vclock->clock->index != vclock_index) continue; if (mutex_lock_interruptible(&vclock->lock)) break; vclock_ns = timecounter_cyc2time(&vclock->tc, ns); mutex_unlock(&vclock->lock); break; } rcu_read_unlock(); return ns_to_ktime(vclock_ns); } EXPORT_SYMBOL(ptp_convert_timestamp); #endif |
| 2 20 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 | // SPDX-License-Identifier: GPL-2.0 #include <linux/memblock.h> #include <linux/compiler.h> #include <linux/fs.h> #include <linux/init.h> #include <linux/ksm.h> #include <linux/mm.h> #include <linux/mmzone.h> #include <linux/huge_mm.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/hugetlb.h> #include <linux/memremap.h> #include <linux/memcontrol.h> #include <linux/mmu_notifier.h> #include <linux/page_idle.h> #include <linux/kernel-page-flags.h> #include <linux/uaccess.h> #include "internal.h" #define KPMSIZE sizeof(u64) #define KPMMASK (KPMSIZE - 1) #define KPMBITS (KPMSIZE * BITS_PER_BYTE) enum kpage_operation { KPAGE_FLAGS, KPAGE_COUNT, KPAGE_CGROUP, }; static inline unsigned long get_max_dump_pfn(void) { #ifdef CONFIG_SPARSEMEM /* * The memmap of early sections is completely populated and marked * online even if max_pfn does not fall on a section boundary - * pfn_to_online_page() will succeed on all pages. Allow inspecting * these memmaps. */ return round_up(max_pfn, PAGES_PER_SECTION); #else return max_pfn; #endif } static u64 get_kpage_count(const struct page *page) { struct page_snapshot ps; u64 ret; snapshot_page(&ps, page); if (IS_ENABLED(CONFIG_PAGE_MAPCOUNT)) ret = folio_precise_page_mapcount(&ps.folio_snapshot, &ps.page_snapshot); else ret = folio_average_page_mapcount(&ps.folio_snapshot); return ret; } static ssize_t kpage_read(struct file *file, char __user *buf, size_t count, loff_t *ppos, enum kpage_operation op) { const unsigned long max_dump_pfn = get_max_dump_pfn(); u64 __user *out = (u64 __user *)buf; struct page *page; unsigned long src = *ppos; unsigned long pfn; ssize_t ret = 0; u64 info; pfn = src / KPMSIZE; if (src & KPMMASK || count & KPMMASK) return -EINVAL; if (src >= max_dump_pfn * KPMSIZE) return 0; count = min_t(unsigned long, count, (max_dump_pfn * KPMSIZE) - src); while (count > 0) { /* * TODO: ZONE_DEVICE support requires to identify * memmaps that were actually initialized. */ page = pfn_to_online_page(pfn); if (page) { switch (op) { case KPAGE_FLAGS: info = stable_page_flags(page); break; case KPAGE_COUNT: info = get_kpage_count(page); break; case KPAGE_CGROUP: info = page_cgroup_ino(page); break; default: info = 0; break; } } else info = 0; if (put_user(info, out)) { ret = -EFAULT; break; } pfn++; out++; count -= KPMSIZE; cond_resched(); } *ppos += (char __user *)out - buf; if (!ret) ret = (char __user *)out - buf; return ret; } /* /proc/kpagecount - an array exposing page mapcounts * * Each entry is a u64 representing the corresponding * physical page mapcount. */ static ssize_t kpagecount_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { return kpage_read(file, buf, count, ppos, KPAGE_COUNT); } static const struct proc_ops kpagecount_proc_ops = { .proc_flags = PROC_ENTRY_PERMANENT, .proc_lseek = mem_lseek, .proc_read = kpagecount_read, }; static inline u64 kpf_copy_bit(u64 kflags, int ubit, int kbit) { return ((kflags >> kbit) & 1) << ubit; } u64 stable_page_flags(const struct page *page) { const struct folio *folio; struct page_snapshot ps; unsigned long k; unsigned long mapping; bool is_anon; u64 u = 0; /* * pseudo flag: KPF_NOPAGE * it differentiates a memory hole from a page with no flags */ if (!page) return 1 << KPF_NOPAGE; snapshot_page(&ps, page); folio = &ps.folio_snapshot; k = folio->flags.f; mapping = (unsigned long)folio->mapping; is_anon = mapping & FOLIO_MAPPING_ANON; /* * pseudo flags for the well known (anonymous) memory mapped pages */ if (folio_mapped(folio)) u |= 1 << KPF_MMAP; if (is_anon) { u |= 1 << KPF_ANON; if (mapping & FOLIO_MAPPING_KSM) u |= 1 << KPF_KSM; } /* * compound pages: export both head/tail info * they together define a compound page's start/end pos and order */ if (ps.idx == 0) u |= kpf_copy_bit(k, KPF_COMPOUND_HEAD, PG_head); else u |= 1 << KPF_COMPOUND_TAIL; if (folio_test_hugetlb(folio)) u |= 1 << KPF_HUGE; else if (folio_test_large(folio) && folio_test_large_rmappable(folio)) { /* Note: we indicate any THPs here, not just PMD-sized ones */ u |= 1 << KPF_THP; } else if (is_huge_zero_pfn(ps.pfn)) { u |= 1 << KPF_ZERO_PAGE; u |= 1 << KPF_THP; } else if (is_zero_pfn(ps.pfn)) { u |= 1 << KPF_ZERO_PAGE; } if (ps.flags & PAGE_SNAPSHOT_PG_BUDDY) u |= 1 << KPF_BUDDY; if (folio_test_offline(folio)) u |= 1 << KPF_OFFLINE; if (folio_test_pgtable(folio)) u |= 1 << KPF_PGTABLE; if (folio_test_slab(folio)) u |= 1 << KPF_SLAB; #if defined(CONFIG_PAGE_IDLE_FLAG) && defined(CONFIG_64BIT) u |= kpf_copy_bit(k, KPF_IDLE, PG_idle); #else if (ps.flags & PAGE_SNAPSHOT_PG_IDLE) u |= 1 << KPF_IDLE; #endif u |= kpf_copy_bit(k, KPF_LOCKED, PG_locked); u |= kpf_copy_bit(k, KPF_DIRTY, PG_dirty); u |= kpf_copy_bit(k, KPF_UPTODATE, PG_uptodate); u |= kpf_copy_bit(k, KPF_WRITEBACK, PG_writeback); u |= kpf_copy_bit(k, KPF_LRU, PG_lru); u |= kpf_copy_bit(k, KPF_REFERENCED, PG_referenced); u |= kpf_copy_bit(k, KPF_ACTIVE, PG_active); u |= kpf_copy_bit(k, KPF_RECLAIM, PG_reclaim); #define SWAPCACHE ((1 << PG_swapbacked) | (1 << PG_swapcache)) if ((k & SWAPCACHE) == SWAPCACHE) u |= 1 << KPF_SWAPCACHE; u |= kpf_copy_bit(k, KPF_SWAPBACKED, PG_swapbacked); u |= kpf_copy_bit(k, KPF_UNEVICTABLE, PG_unevictable); u |= kpf_copy_bit(k, KPF_MLOCKED, PG_mlocked); #ifdef CONFIG_MEMORY_FAILURE if (u & (1 << KPF_HUGE)) u |= kpf_copy_bit(k, KPF_HWPOISON, PG_hwpoison); else u |= kpf_copy_bit(ps.page_snapshot.flags.f, KPF_HWPOISON, PG_hwpoison); #endif u |= kpf_copy_bit(k, KPF_RESERVED, PG_reserved); u |= kpf_copy_bit(k, KPF_OWNER_2, PG_owner_2); u |= kpf_copy_bit(k, KPF_PRIVATE, PG_private); u |= kpf_copy_bit(k, KPF_PRIVATE_2, PG_private_2); u |= kpf_copy_bit(k, KPF_OWNER_PRIVATE, PG_owner_priv_1); u |= kpf_copy_bit(k, KPF_ARCH, PG_arch_1); #ifdef CONFIG_ARCH_USES_PG_ARCH_2 u |= kpf_copy_bit(k, KPF_ARCH_2, PG_arch_2); #endif #ifdef CONFIG_ARCH_USES_PG_ARCH_3 u |= kpf_copy_bit(k, KPF_ARCH_3, PG_arch_3); #endif return u; } EXPORT_SYMBOL_GPL(stable_page_flags); /* /proc/kpageflags - an array exposing page flags * * Each entry is a u64 representing the corresponding * physical page flags. */ static ssize_t kpageflags_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { return kpage_read(file, buf, count, ppos, KPAGE_FLAGS); } static const struct proc_ops kpageflags_proc_ops = { .proc_flags = PROC_ENTRY_PERMANENT, .proc_lseek = mem_lseek, .proc_read = kpageflags_read, }; #ifdef CONFIG_MEMCG static ssize_t kpagecgroup_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { return kpage_read(file, buf, count, ppos, KPAGE_CGROUP); } static const struct proc_ops kpagecgroup_proc_ops = { .proc_flags = PROC_ENTRY_PERMANENT, .proc_lseek = mem_lseek, .proc_read = kpagecgroup_read, }; #endif /* CONFIG_MEMCG */ static int __init proc_page_init(void) { proc_create("kpagecount", S_IRUSR, NULL, &kpagecount_proc_ops); proc_create("kpageflags", S_IRUSR, NULL, &kpageflags_proc_ops); #ifdef CONFIG_MEMCG proc_create("kpagecgroup", S_IRUSR, NULL, &kpagecgroup_proc_ops); #endif return 0; } fs_initcall(proc_page_init); |
| 40 41 14 42 5 535 534 530 36 36 33 33 33 40 33 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 | // SPDX-License-Identifier: GPL-2.0-only /* * The "user cache". * * (C) Copyright 1991-2000 Linus Torvalds * * We have a per-user structure to keep track of how many * processes, files etc the user has claimed, in order to be * able to have per-user limits for system resources. */ #include <linux/init.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/bitops.h> #include <linux/key.h> #include <linux/sched/user.h> #include <linux/interrupt.h> #include <linux/export.h> #include <linux/user_namespace.h> #include <linux/binfmts.h> #include <linux/proc_ns.h> #if IS_ENABLED(CONFIG_BINFMT_MISC) struct binfmt_misc init_binfmt_misc = { .entries = LIST_HEAD_INIT(init_binfmt_misc.entries), .enabled = true, .entries_lock = __RW_LOCK_UNLOCKED(init_binfmt_misc.entries_lock), }; EXPORT_SYMBOL_GPL(init_binfmt_misc); #endif /* * userns count is 1 for root user, 1 for init_uts_ns, * and 1 for... ? */ struct user_namespace init_user_ns = { .uid_map = { { .extent[0] = { .first = 0, .lower_first = 0, .count = 4294967295U, }, .nr_extents = 1, }, }, .gid_map = { { .extent[0] = { .first = 0, .lower_first = 0, .count = 4294967295U, }, .nr_extents = 1, }, }, .projid_map = { { .extent[0] = { .first = 0, .lower_first = 0, .count = 4294967295U, }, .nr_extents = 1, }, }, .ns.ns_type = ns_common_type(&init_user_ns), .ns.__ns_ref = REFCOUNT_INIT(3), .owner = GLOBAL_ROOT_UID, .group = GLOBAL_ROOT_GID, .ns.inum = ns_init_inum(&init_user_ns), #ifdef CONFIG_USER_NS .ns.ops = &userns_operations, #endif .flags = USERNS_INIT_FLAGS, #ifdef CONFIG_KEYS .keyring_name_list = LIST_HEAD_INIT(init_user_ns.keyring_name_list), .keyring_sem = __RWSEM_INITIALIZER(init_user_ns.keyring_sem), #endif #if IS_ENABLED(CONFIG_BINFMT_MISC) .binfmt_misc = &init_binfmt_misc, #endif }; EXPORT_SYMBOL_GPL(init_user_ns); /* * UID task count cache, to get fast user lookup in "alloc_uid" * when changing user ID's (ie setuid() and friends). */ #define UIDHASH_BITS (IS_ENABLED(CONFIG_BASE_SMALL) ? 3 : 7) #define UIDHASH_SZ (1 << UIDHASH_BITS) #define UIDHASH_MASK (UIDHASH_SZ - 1) #define __uidhashfn(uid) (((uid >> UIDHASH_BITS) + uid) & UIDHASH_MASK) #define uidhashentry(uid) (uidhash_table + __uidhashfn((__kuid_val(uid)))) static struct kmem_cache *uid_cachep; static struct hlist_head uidhash_table[UIDHASH_SZ]; /* * The uidhash_lock is mostly taken from process context, but it is * occasionally also taken from softirq/tasklet context, when * task-structs get RCU-freed. Hence all locking must be softirq-safe. * But free_uid() is also called with local interrupts disabled, and running * local_bh_enable() with local interrupts disabled is an error - we'll run * softirq callbacks, and they can unconditionally enable interrupts, and * the caller of free_uid() didn't expect that.. */ static DEFINE_SPINLOCK(uidhash_lock); /* root_user.__count is 1, for init task cred */ struct user_struct root_user = { .__count = REFCOUNT_INIT(1), .uid = GLOBAL_ROOT_UID, .ratelimit = RATELIMIT_STATE_INIT(root_user.ratelimit, 0, 0), }; /* * These routines must be called with the uidhash spinlock held! */ static void uid_hash_insert(struct user_struct *up, struct hlist_head *hashent) { hlist_add_head(&up->uidhash_node, hashent); } static void uid_hash_remove(struct user_struct *up) { hlist_del_init(&up->uidhash_node); } static struct user_struct *uid_hash_find(kuid_t uid, struct hlist_head *hashent) { struct user_struct *user; hlist_for_each_entry(user, hashent, uidhash_node) { if (uid_eq(user->uid, uid)) { refcount_inc(&user->__count); return user; } } return NULL; } static int user_epoll_alloc(struct user_struct *up) { #ifdef CONFIG_EPOLL return percpu_counter_init(&up->epoll_watches, 0, GFP_KERNEL); #else return 0; #endif } static void user_epoll_free(struct user_struct *up) { #ifdef CONFIG_EPOLL percpu_counter_destroy(&up->epoll_watches); #endif } /* IRQs are disabled and uidhash_lock is held upon function entry. * IRQ state (as stored in flags) is restored and uidhash_lock released * upon function exit. */ static void free_user(struct user_struct *up, unsigned long flags) __releases(&uidhash_lock) { uid_hash_remove(up); spin_unlock_irqrestore(&uidhash_lock, flags); user_epoll_free(up); kmem_cache_free(uid_cachep, up); } /* * Locate the user_struct for the passed UID. If found, take a ref on it. The * caller must undo that ref with free_uid(). * * If the user_struct could not be found, return NULL. */ struct user_struct *find_user(kuid_t uid) { struct user_struct *ret; unsigned long flags; spin_lock_irqsave(&uidhash_lock, flags); ret = uid_hash_find(uid, uidhashentry(uid)); spin_unlock_irqrestore(&uidhash_lock, flags); return ret; } void free_uid(struct user_struct *up) { unsigned long flags; if (!up) return; if (refcount_dec_and_lock_irqsave(&up->__count, &uidhash_lock, &flags)) free_user(up, flags); } EXPORT_SYMBOL_GPL(free_uid); struct user_struct *alloc_uid(kuid_t uid) { struct hlist_head *hashent = uidhashentry(uid); struct user_struct *up, *new; spin_lock_irq(&uidhash_lock); up = uid_hash_find(uid, hashent); spin_unlock_irq(&uidhash_lock); if (!up) { new = kmem_cache_zalloc(uid_cachep, GFP_KERNEL); if (!new) return NULL; new->uid = uid; refcount_set(&new->__count, 1); if (user_epoll_alloc(new)) { kmem_cache_free(uid_cachep, new); return NULL; } ratelimit_state_init(&new->ratelimit, HZ, 100); ratelimit_set_flags(&new->ratelimit, RATELIMIT_MSG_ON_RELEASE); /* * Before adding this, check whether we raced * on adding the same user already.. */ spin_lock_irq(&uidhash_lock); up = uid_hash_find(uid, hashent); if (up) { user_epoll_free(new); kmem_cache_free(uid_cachep, new); } else { uid_hash_insert(new, hashent); up = new; } spin_unlock_irq(&uidhash_lock); } return up; } static int __init uid_cache_init(void) { int n; uid_cachep = kmem_cache_create("uid_cache", sizeof(struct user_struct), 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); for(n = 0; n < UIDHASH_SZ; ++n) INIT_HLIST_HEAD(uidhash_table + n); if (user_epoll_alloc(&root_user)) panic("root_user epoll percpu counter alloc failed"); /* Insert the root user immediately (init already runs as root) */ spin_lock_irq(&uidhash_lock); uid_hash_insert(&root_user, uidhashentry(GLOBAL_ROOT_UID)); spin_unlock_irq(&uidhash_lock); return 0; } subsys_initcall(uid_cache_init); |
| 14 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 | /* SPDX-License-Identifier: GPL-2.0-or-later */ /* Definitions for key type implementations * * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) */ #ifndef _LINUX_KEY_TYPE_H #define _LINUX_KEY_TYPE_H #include <linux/key.h> #include <linux/errno.h> #ifdef CONFIG_KEYS struct kernel_pkey_query; struct kernel_pkey_params; /* * Pre-parsed payload, used by key add, update and instantiate. * * This struct will be cleared and data and datalen will be set with the data * and length parameters from the caller and quotalen will be set from * def_datalen from the key type. Then if the preparse() op is provided by the * key type, that will be called. Then the struct will be passed to the * instantiate() or the update() op. * * If the preparse() op is given, the free_preparse() op will be called to * clear the contents. */ struct key_preparsed_payload { const char *orig_description; /* Actual or proposed description (maybe NULL) */ char *description; /* Proposed key description (or NULL) */ union key_payload payload; /* Proposed payload */ const void *data; /* Raw data */ size_t datalen; /* Raw datalen */ size_t quotalen; /* Quota length for proposed payload */ time64_t expiry; /* Expiry time of key */ } __randomize_layout; typedef int (*request_key_actor_t)(struct key *auth_key, void *aux); /* * Preparsed matching criterion. */ struct key_match_data { /* Comparison function, defaults to exact description match, but can be * overridden by type->match_preparse(). Should return true if a match * is found and false if not. */ bool (*cmp)(const struct key *key, const struct key_match_data *match_data); const void *raw_data; /* Raw match data */ void *preparsed; /* For ->match_preparse() to stash stuff */ unsigned lookup_type; /* Type of lookup for this search. */ #define KEYRING_SEARCH_LOOKUP_DIRECT 0x0000 /* Direct lookup by description. */ #define KEYRING_SEARCH_LOOKUP_ITERATE 0x0001 /* Iterative search. */ }; /* * kernel managed key type definition */ struct key_type { /* name of the type */ const char *name; /* default payload length for quota precalculation (optional) * - this can be used instead of calling key_payload_reserve(), that * function only needs to be called if the real datalen is different */ size_t def_datalen; unsigned int flags; #define KEY_TYPE_NET_DOMAIN 0x00000001 /* Keys of this type have a net namespace domain */ #define KEY_TYPE_INSTANT_REAP 0x00000002 /* Keys of this type don't have a delay after expiring */ /* vet a description */ int (*vet_description)(const char *description); /* Preparse the data blob from userspace that is to be the payload, * generating a proposed description and payload that will be handed to * the instantiate() and update() ops. */ int (*preparse)(struct key_preparsed_payload *prep); /* Free a preparse data structure. */ void (*free_preparse)(struct key_preparsed_payload *prep); /* instantiate a key of this type * - this method should call key_payload_reserve() to determine if the * user's quota will hold the payload */ int (*instantiate)(struct key *key, struct key_preparsed_payload *prep); /* update a key of this type (optional) * - this method should call key_payload_reserve() to recalculate the * quota consumption * - the key must be locked against read when modifying */ int (*update)(struct key *key, struct key_preparsed_payload *prep); /* Preparse the data supplied to ->match() (optional). The * data to be preparsed can be found in match_data->raw_data. * The lookup type can also be set by this function. */ int (*match_preparse)(struct key_match_data *match_data); /* Free preparsed match data (optional). This should be supplied it * ->match_preparse() is supplied. */ void (*match_free)(struct key_match_data *match_data); /* clear some of the data from a key on revokation (optional) * - the key's semaphore will be write-locked by the caller */ void (*revoke)(struct key *key); /* clear the data from a key (optional) */ void (*destroy)(struct key *key); /* describe a key */ void (*describe)(const struct key *key, struct seq_file *p); /* read a key's data (optional) * - permission checks will be done by the caller * - the key's semaphore will be readlocked by the caller * - should return the amount of data that could be read, no matter how * much is copied into the buffer * - shouldn't do the copy if the buffer is NULL */ long (*read)(const struct key *key, char *buffer, size_t buflen); /* handle request_key() for this type instead of invoking * /sbin/request-key (optional) * - key is the key to instantiate * - authkey is the authority to assume when instantiating this key * - op is the operation to be done, usually "create" * - the call must not return until the instantiation process has run * its course */ request_key_actor_t request_key; /* Look up a keyring access restriction (optional) * * - NULL is a valid return value (meaning the requested restriction * is known but will never block addition of a key) * - should return -EINVAL if the restriction is unknown */ struct key_restriction *(*lookup_restriction)(const char *params); /* Asymmetric key accessor functions. */ int (*asym_query)(const struct kernel_pkey_params *params, struct kernel_pkey_query *info); int (*asym_eds_op)(struct kernel_pkey_params *params, const void *in, void *out); int (*asym_verify_signature)(struct kernel_pkey_params *params, const void *in, const void *in2); /* internal fields */ struct list_head link; /* link in types list */ struct lock_class_key lock_class; /* key->sem lock class */ } __randomize_layout; extern struct key_type key_type_keyring; extern int register_key_type(struct key_type *ktype); extern void unregister_key_type(struct key_type *ktype); extern int key_payload_reserve(struct key *key, size_t datalen); extern int key_instantiate_and_link(struct key *key, const void *data, size_t datalen, struct key *keyring, struct key *authkey); extern int key_reject_and_link(struct key *key, unsigned timeout, unsigned error, struct key *keyring, struct key *authkey); extern void complete_request_key(struct key *authkey, int error); static inline int key_negate_and_link(struct key *key, unsigned timeout, struct key *keyring, struct key *authkey) { return key_reject_and_link(key, timeout, ENOKEY, keyring, authkey); } extern int generic_key_instantiate(struct key *key, struct key_preparsed_payload *prep); #endif /* CONFIG_KEYS */ #endif /* _LINUX_KEY_TYPE_H */ |
| 5 4 4 3 6 1 6 3 1 2 2 2 2 2 4 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 | // SPDX-License-Identifier: GPL-2.0-or-later /* Kerberos-based RxRPC security * * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <crypto/skcipher.h> #include <linux/module.h> #include <linux/net.h> #include <linux/skbuff.h> #include <linux/udp.h> #include <linux/scatterlist.h> #include <linux/ctype.h> #include <linux/slab.h> #include <linux/key-type.h> #include <net/sock.h> #include <net/af_rxrpc.h> #include <keys/rxrpc-type.h> #include "ar-internal.h" #define RXKAD_VERSION 2 #define MAXKRB5TICKETLEN 1024 #define RXKAD_TKT_TYPE_KERBEROS_V5 256 #define ANAME_SZ 40 /* size of authentication name */ #define INST_SZ 40 /* size of principal's instance */ #define REALM_SZ 40 /* size of principal's auth domain */ #define SNAME_SZ 40 /* size of service name */ #define RXKAD_ALIGN 8 struct rxkad_level1_hdr { __be32 data_size; /* true data size (excluding padding) */ }; struct rxkad_level2_hdr { __be32 data_size; /* true data size (excluding padding) */ __be32 checksum; /* decrypted data checksum */ }; static int rxkad_prime_packet_security(struct rxrpc_connection *conn, struct crypto_sync_skcipher *ci); /* * this holds a pinned cipher so that keventd doesn't get called by the cipher * alloc routine, but since we have it to hand, we use it to decrypt RESPONSE * packets */ static struct crypto_sync_skcipher *rxkad_ci; static struct skcipher_request *rxkad_ci_req; static DEFINE_MUTEX(rxkad_ci_mutex); /* * Parse the information from a server key * * The data should be the 8-byte secret key. */ static int rxkad_preparse_server_key(struct key_preparsed_payload *prep) { struct crypto_skcipher *ci; if (prep->datalen != 8) return -EINVAL; memcpy(&prep->payload.data[2], prep->data, 8); ci = crypto_alloc_skcipher("pcbc(des)", 0, CRYPTO_ALG_ASYNC); if (IS_ERR(ci)) { _leave(" = %ld", PTR_ERR(ci)); return PTR_ERR(ci); } if (crypto_skcipher_setkey(ci, prep->data, 8) < 0) BUG(); prep->payload.data[0] = ci; _leave(" = 0"); return 0; } static void rxkad_free_preparse_server_key(struct key_preparsed_payload *prep) { if (prep->payload.data[0]) crypto_free_skcipher(prep->payload.data[0]); } static void rxkad_destroy_server_key(struct key *key) { if (key->payload.data[0]) { crypto_free_skcipher(key->payload.data[0]); key->payload.data[0] = NULL; } } /* * initialise connection security */ static int rxkad_init_connection_security(struct rxrpc_connection *conn, struct rxrpc_key_token *token) { struct crypto_sync_skcipher *ci; int ret; _enter("{%d},{%x}", conn->debug_id, key_serial(conn->key)); conn->security_ix = token->security_index; ci = crypto_alloc_sync_skcipher("pcbc(fcrypt)", 0, 0); if (IS_ERR(ci)) { _debug("no cipher"); ret = PTR_ERR(ci); goto error; } if (crypto_sync_skcipher_setkey(ci, token->kad->session_key, sizeof(token->kad->session_key)) < 0) BUG(); switch (conn->security_level) { case RXRPC_SECURITY_PLAIN: case RXRPC_SECURITY_AUTH: case RXRPC_SECURITY_ENCRYPT: break; default: ret = -EKEYREJECTED; goto error; } ret = rxkad_prime_packet_security(conn, ci); if (ret < 0) goto error_ci; conn->rxkad.cipher = ci; return 0; error_ci: crypto_free_sync_skcipher(ci); error: _leave(" = %d", ret); return ret; } /* * Work out how much data we can put in a packet. */ static struct rxrpc_txbuf *rxkad_alloc_txbuf(struct rxrpc_call *call, size_t remain, gfp_t gfp) { struct rxrpc_txbuf *txb; size_t shdr, alloc, limit, part; remain = umin(remain, 65535 - sizeof(struct rxrpc_wire_header)); switch (call->conn->security_level) { default: alloc = umin(remain, RXRPC_JUMBO_DATALEN); return rxrpc_alloc_data_txbuf(call, alloc, 1, gfp); case RXRPC_SECURITY_AUTH: shdr = sizeof(struct rxkad_level1_hdr); break; case RXRPC_SECURITY_ENCRYPT: shdr = sizeof(struct rxkad_level2_hdr); break; } limit = round_down(RXRPC_JUMBO_DATALEN, RXKAD_ALIGN) - shdr; if (remain < limit) { part = remain; alloc = round_up(shdr + part, RXKAD_ALIGN); } else { part = limit; alloc = RXRPC_JUMBO_DATALEN; } txb = rxrpc_alloc_data_txbuf(call, alloc, RXKAD_ALIGN, gfp); if (!txb) return NULL; txb->crypto_header = 0; txb->sec_header = shdr; txb->offset += shdr; txb->space = part; return txb; } /* * prime the encryption state with the invariant parts of a connection's * description */ static int rxkad_prime_packet_security(struct rxrpc_connection *conn, struct crypto_sync_skcipher *ci) { struct skcipher_request *req; struct rxrpc_key_token *token; struct scatterlist sg; struct rxrpc_crypt iv; __be32 *tmpbuf; size_t tmpsize = 4 * sizeof(__be32); _enter(""); if (!conn->key) return 0; tmpbuf = kmalloc(tmpsize, GFP_KERNEL); if (!tmpbuf) return -ENOMEM; req = skcipher_request_alloc(&ci->base, GFP_NOFS); if (!req) { kfree(tmpbuf); return -ENOMEM; } token = conn->key->payload.data[0]; memcpy(&iv, token->kad->session_key, sizeof(iv)); tmpbuf[0] = htonl(conn->proto.epoch); tmpbuf[1] = htonl(conn->proto.cid); tmpbuf[2] = 0; tmpbuf[3] = htonl(conn->security_ix); sg_init_one(&sg, tmpbuf, tmpsize); skcipher_request_set_sync_tfm(req, ci); skcipher_request_set_callback(req, 0, NULL, NULL); skcipher_request_set_crypt(req, &sg, &sg, tmpsize, iv.x); crypto_skcipher_encrypt(req); skcipher_request_free(req); memcpy(&conn->rxkad.csum_iv, tmpbuf + 2, sizeof(conn->rxkad.csum_iv)); kfree(tmpbuf); _leave(" = 0"); return 0; } /* * Allocate and prepare the crypto request on a call. For any particular call, * this is called serially for the packets, so no lock should be necessary. */ static struct skcipher_request *rxkad_get_call_crypto(struct rxrpc_call *call) { struct crypto_skcipher *tfm = &call->conn->rxkad.cipher->base; return skcipher_request_alloc(tfm, GFP_NOFS); } /* * Clean up the crypto on a call. */ static void rxkad_free_call_crypto(struct rxrpc_call *call) { } /* * partially encrypt a packet (level 1 security) */ static int rxkad_secure_packet_auth(const struct rxrpc_call *call, struct rxrpc_txbuf *txb, struct skcipher_request *req) { struct rxkad_level1_hdr *hdr = txb->data; struct rxrpc_crypt iv; struct scatterlist sg; size_t pad; u16 check; _enter(""); check = txb->seq ^ call->call_id; hdr->data_size = htonl((u32)check << 16 | txb->len); txb->pkt_len = sizeof(struct rxkad_level1_hdr) + txb->len; pad = txb->pkt_len; pad = RXKAD_ALIGN - pad; pad &= RXKAD_ALIGN - 1; if (pad) { memset(txb->data + txb->offset, 0, pad); txb->pkt_len += pad; } /* start the encryption afresh */ memset(&iv, 0, sizeof(iv)); sg_init_one(&sg, hdr, 8); skcipher_request_set_sync_tfm(req, call->conn->rxkad.cipher); skcipher_request_set_callback(req, 0, NULL, NULL); skcipher_request_set_crypt(req, &sg, &sg, 8, iv.x); crypto_skcipher_encrypt(req); skcipher_request_zero(req); _leave(" = 0"); return 0; } /* * wholly encrypt a packet (level 2 security) */ static int rxkad_secure_packet_encrypt(const struct rxrpc_call *call, struct rxrpc_txbuf *txb, struct skcipher_request *req) { const struct rxrpc_key_token *token; struct rxkad_level2_hdr *rxkhdr = txb->data; struct rxrpc_crypt iv; struct scatterlist sg; size_t content, pad; u16 check; int ret; _enter(""); check = txb->seq ^ call->call_id; rxkhdr->data_size = htonl(txb->len | (u32)check << 16); rxkhdr->checksum = 0; content = sizeof(struct rxkad_level2_hdr) + txb->len; txb->pkt_len = round_up(content, RXKAD_ALIGN); pad = txb->pkt_len - content; if (pad) memset(txb->data + txb->offset, 0, pad); /* encrypt from the session key */ token = call->conn->key->payload.data[0]; memcpy(&iv, token->kad->session_key, sizeof(iv)); sg_init_one(&sg, rxkhdr, txb->pkt_len); skcipher_request_set_sync_tfm(req, call->conn->rxkad.cipher); skcipher_request_set_callback(req, 0, NULL, NULL); skcipher_request_set_crypt(req, &sg, &sg, txb->pkt_len, iv.x); ret = crypto_skcipher_encrypt(req); skcipher_request_zero(req); return ret; } /* * checksum an RxRPC packet header */ static int rxkad_secure_packet(struct rxrpc_call *call, struct rxrpc_txbuf *txb) { struct skcipher_request *req; struct rxrpc_crypt iv; struct scatterlist sg; union { __be32 buf[2]; } crypto __aligned(8); u32 x, y; int ret; _enter("{%d{%x}},{#%u},%u,", call->debug_id, key_serial(call->conn->key), txb->seq, txb->len); if (!call->conn->rxkad.cipher) return 0; ret = key_validate(call->conn->key); if (ret < 0) return ret; req = rxkad_get_call_crypto(call); if (!req) return -ENOMEM; /* continue encrypting from where we left off */ memcpy(&iv, call->conn->rxkad.csum_iv.x, sizeof(iv)); /* calculate the security checksum */ x = (call->cid & RXRPC_CHANNELMASK) << (32 - RXRPC_CIDSHIFT); x |= txb->seq & 0x3fffffff; crypto.buf[0] = htonl(call->call_id); crypto.buf[1] = htonl(x); sg_init_one(&sg, crypto.buf, 8); skcipher_request_set_sync_tfm(req, call->conn->rxkad.cipher); skcipher_request_set_callback(req, 0, NULL, NULL); skcipher_request_set_crypt(req, &sg, &sg, 8, iv.x); crypto_skcipher_encrypt(req); skcipher_request_zero(req); y = ntohl(crypto.buf[1]); y = (y >> 16) & 0xffff; if (y == 0) y = 1; /* zero checksums are not permitted */ txb->cksum = htons(y); switch (call->conn->security_level) { case RXRPC_SECURITY_PLAIN: txb->pkt_len = txb->len; ret = 0; break; case RXRPC_SECURITY_AUTH: ret = rxkad_secure_packet_auth(call, txb, req); if (txb->alloc_size == RXRPC_JUMBO_DATALEN) txb->jumboable = true; break; case RXRPC_SECURITY_ENCRYPT: ret = rxkad_secure_packet_encrypt(call, txb, req); if (txb->alloc_size == RXRPC_JUMBO_DATALEN) txb->jumboable = true; break; default: ret = -EPERM; break; } /* Clear excess space in the packet */ if (txb->pkt_len < txb->alloc_size) { size_t gap = txb->alloc_size - txb->pkt_len; void *p = txb->data; memset(p + txb->pkt_len, 0, gap); } skcipher_request_free(req); _leave(" = %d [set %x]", ret, y); return ret; } /* * decrypt partial encryption on a packet (level 1 security) */ static int rxkad_verify_packet_1(struct rxrpc_call *call, struct sk_buff *skb, rxrpc_seq_t seq, struct skcipher_request *req) { struct rxkad_level1_hdr sechdr; struct rxrpc_skb_priv *sp = rxrpc_skb(skb); struct rxrpc_crypt iv; struct scatterlist sg[16]; u32 data_size, buf; u16 check; int ret; _enter(""); if (sp->len < 8) return rxrpc_abort_eproto(call, skb, RXKADSEALEDINCON, rxkad_abort_1_short_header); /* Decrypt the skbuff in-place. TODO: We really want to decrypt * directly into the target buffer. */ sg_init_table(sg, ARRAY_SIZE(sg)); ret = skb_to_sgvec(skb, sg, sp->offset, 8); if (unlikely(ret < 0)) return ret; /* start the decryption afresh */ memset(&iv, 0, sizeof(iv)); skcipher_request_set_sync_tfm(req, call->conn->rxkad.cipher); skcipher_request_set_callback(req, 0, NULL, NULL); skcipher_request_set_crypt(req, sg, sg, 8, iv.x); crypto_skcipher_decrypt(req); skcipher_request_zero(req); /* Extract the decrypted packet length */ if (skb_copy_bits(skb, sp->offset, &sechdr, sizeof(sechdr)) < 0) return rxrpc_abort_eproto(call, skb, RXKADDATALEN, rxkad_abort_1_short_encdata); sp->offset += sizeof(sechdr); sp->len -= sizeof(sechdr); buf = ntohl(sechdr.data_size); data_size = buf & 0xffff; check = buf >> 16; check ^= seq ^ call->call_id; check &= 0xffff; if (check != 0) return rxrpc_abort_eproto(call, skb, RXKADSEALEDINCON, rxkad_abort_1_short_check); if (data_size > sp->len) return rxrpc_abort_eproto(call, skb, RXKADDATALEN, rxkad_abort_1_short_data); sp->len = data_size; _leave(" = 0 [dlen=%x]", data_size); return 0; } /* * wholly decrypt a packet (level 2 security) */ static int rxkad_verify_packet_2(struct rxrpc_call *call, struct sk_buff *skb, rxrpc_seq_t seq, struct skcipher_request *req) { const struct rxrpc_key_token *token; struct rxkad_level2_hdr sechdr; struct rxrpc_skb_priv *sp = rxrpc_skb(skb); struct rxrpc_crypt iv; struct scatterlist _sg[4], *sg; u32 data_size, buf; u16 check; int nsg, ret; _enter(",{%d}", sp->len); if (sp->len < 8) return rxrpc_abort_eproto(call, skb, RXKADSEALEDINCON, rxkad_abort_2_short_header); /* Decrypt the skbuff in-place. TODO: We really want to decrypt * directly into the target buffer. */ sg = _sg; nsg = skb_shinfo(skb)->nr_frags + 1; if (nsg <= 4) { nsg = 4; } else { sg = kmalloc_array(nsg, sizeof(*sg), GFP_NOIO); if (!sg) return -ENOMEM; } sg_init_table(sg, nsg); ret = skb_to_sgvec(skb, sg, sp->offset, sp->len); if (unlikely(ret < 0)) { if (sg != _sg) kfree(sg); return ret; } /* decrypt from the session key */ token = call->conn->key->payload.data[0]; memcpy(&iv, token->kad->session_key, sizeof(iv)); skcipher_request_set_sync_tfm(req, call->conn->rxkad.cipher); skcipher_request_set_callback(req, 0, NULL, NULL); skcipher_request_set_crypt(req, sg, sg, sp->len, iv.x); crypto_skcipher_decrypt(req); skcipher_request_zero(req); if (sg != _sg) kfree(sg); /* Extract the decrypted packet length */ if (skb_copy_bits(skb, sp->offset, &sechdr, sizeof(sechdr)) < 0) return rxrpc_abort_eproto(call, skb, RXKADDATALEN, rxkad_abort_2_short_len); sp->offset += sizeof(sechdr); sp->len -= sizeof(sechdr); buf = ntohl(sechdr.data_size); data_size = buf & 0xffff; check = buf >> 16; check ^= seq ^ call->call_id; check &= 0xffff; if (check != 0) return rxrpc_abort_eproto(call, skb, RXKADSEALEDINCON, rxkad_abort_2_short_check); if (data_size > sp->len) return rxrpc_abort_eproto(call, skb, RXKADDATALEN, rxkad_abort_2_short_data); sp->len = data_size; _leave(" = 0 [dlen=%x]", data_size); return 0; } /* * Verify the security on a received packet and the subpackets therein. */ static int rxkad_verify_packet(struct rxrpc_call *call, struct sk_buff *skb) { struct rxrpc_skb_priv *sp = rxrpc_skb(skb); struct skcipher_request *req; struct rxrpc_crypt iv; struct scatterlist sg; union { __be32 buf[2]; } crypto __aligned(8); rxrpc_seq_t seq = sp->hdr.seq; int ret; u16 cksum; u32 x, y; _enter("{%d{%x}},{#%u}", call->debug_id, key_serial(call->conn->key), seq); |