| 260 2 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 | /* SPDX-License-Identifier: GPL-2.0 */ /* * sysfs.h - definitions for the device driver filesystem * * Copyright (c) 2001,2002 Patrick Mochel * Copyright (c) 2004 Silicon Graphics, Inc. * Copyright (c) 2007 SUSE Linux Products GmbH * Copyright (c) 2007 Tejun Heo <teheo@suse.de> * * Please see Documentation/filesystems/sysfs.rst for more information. */ #ifndef _SYSFS_H_ #define _SYSFS_H_ #include <linux/kernfs.h> #include <linux/compiler.h> #include <linux/errno.h> #include <linux/list.h> #include <linux/lockdep.h> #include <linux/kobject_ns.h> #include <linux/stat.h> #include <linux/atomic.h> struct kobject; struct module; struct bin_attribute; enum kobj_ns_type; struct attribute { const char *name; umode_t mode; #ifdef CONFIG_DEBUG_LOCK_ALLOC bool ignore_lockdep:1; struct lock_class_key *key; struct lock_class_key skey; #endif }; /** * sysfs_attr_init - initialize a dynamically allocated sysfs attribute * @attr: struct attribute to initialize * * Initialize a dynamically allocated struct attribute so we can * make lockdep happy. This is a new requirement for attributes * and initially this is only needed when lockdep is enabled. * Lockdep gives a nice error when your attribute is added to * sysfs if you don't have this. */ #ifdef CONFIG_DEBUG_LOCK_ALLOC #define sysfs_attr_init(attr) \ do { \ static struct lock_class_key __key; \ \ (attr)->key = &__key; \ } while (0) #else #define sysfs_attr_init(attr) do {} while (0) #endif /** * struct attribute_group - data structure used to declare an attribute group. * @name: Optional: Attribute group name * If specified, the attribute group will be created in a * new subdirectory with this name. Additionally when a * group is named, @is_visible and @is_bin_visible may * return SYSFS_GROUP_INVISIBLE to control visibility of * the directory itself. * @is_visible: Optional: Function to return permissions associated with an * attribute of the group. Will be called repeatedly for * each non-binary attribute in the group. Only read/write * permissions as well as SYSFS_PREALLOC are accepted. Must * return 0 if an attribute is not visible. The returned * value will replace static permissions defined in struct * attribute. Use SYSFS_GROUP_VISIBLE() when assigning this * callback to specify separate _group_visible() and * _attr_visible() handlers. * @is_bin_visible: * Optional: Function to return permissions associated with a * binary attribute of the group. Will be called repeatedly * for each binary attribute in the group. Only read/write * permissions as well as SYSFS_PREALLOC (and the * visibility flags for named groups) are accepted. Must * return 0 if a binary attribute is not visible. The * returned value will replace static permissions defined * in struct bin_attribute. If @is_visible is not set, Use * SYSFS_GROUP_VISIBLE() when assigning this callback to * specify separate _group_visible() and _attr_visible() * handlers. * @bin_size: * Optional: Function to return the size of a binary attribute * of the group. Will be called repeatedly for each binary * attribute in the group. Overwrites the size field embedded * inside the attribute itself. * @attrs: Pointer to NULL terminated list of attributes. * @bin_attrs: Pointer to NULL terminated list of binary attributes. * Either attrs or bin_attrs or both must be provided. */ struct attribute_group { const char *name; umode_t (*is_visible)(struct kobject *, struct attribute *, int); umode_t (*is_bin_visible)(struct kobject *, const struct bin_attribute *, int); size_t (*bin_size)(struct kobject *, const struct bin_attribute *, int); struct attribute **attrs; union { const struct bin_attribute *const *bin_attrs; const struct bin_attribute *const *bin_attrs_new; }; }; #define SYSFS_PREALLOC 010000 #define SYSFS_GROUP_INVISIBLE 020000 /* * DEFINE_SYSFS_GROUP_VISIBLE(name): * A helper macro to pair with the assignment of ".is_visible = * SYSFS_GROUP_VISIBLE(name)", that arranges for the directory * associated with a named attribute_group to optionally be hidden. * This allows for static declaration of attribute_groups, and the * simplification of attribute visibility lifetime that implies, * without polluting sysfs with empty attribute directories. * Ex. * * static umode_t example_attr_visible(struct kobject *kobj, * struct attribute *attr, int n) * { * if (example_attr_condition) * return 0; * else if (ro_attr_condition) * return 0444; * return a->mode; * } * * static bool example_group_visible(struct kobject *kobj) * { * if (example_group_condition) * return false; * return true; * } * * DEFINE_SYSFS_GROUP_VISIBLE(example); * * static struct attribute_group example_group = { * .name = "example", * .is_visible = SYSFS_GROUP_VISIBLE(example), * .attrs = &example_attrs, * }; * * Note that it expects <name>_attr_visible and <name>_group_visible to * be defined. For cases where individual attributes do not need * separate visibility consideration, only entire group visibility at * once, see DEFINE_SIMPLE_SYSFS_GROUP_VISIBLE(). */ #define DEFINE_SYSFS_GROUP_VISIBLE(name) \ static inline umode_t sysfs_group_visible_##name( \ struct kobject *kobj, struct attribute *attr, int n) \ { \ if (n == 0 && !name##_group_visible(kobj)) \ return SYSFS_GROUP_INVISIBLE; \ return name##_attr_visible(kobj, attr, n); \ } /* * DEFINE_SIMPLE_SYSFS_GROUP_VISIBLE(name): * A helper macro to pair with SYSFS_GROUP_VISIBLE() that like * DEFINE_SYSFS_GROUP_VISIBLE() controls group visibility, but does * not require the implementation of a per-attribute visibility * callback. * Ex. * * static bool example_group_visible(struct kobject *kobj) * { * if (example_group_condition) * return false; * return true; * } * * DEFINE_SIMPLE_SYSFS_GROUP_VISIBLE(example); * * static struct attribute_group example_group = { * .name = "example", * .is_visible = SYSFS_GROUP_VISIBLE(example), * .attrs = &example_attrs, * }; */ #define DEFINE_SIMPLE_SYSFS_GROUP_VISIBLE(name) \ static inline umode_t sysfs_group_visible_##name( \ struct kobject *kobj, struct attribute *a, int n) \ { \ if (n == 0 && !name##_group_visible(kobj)) \ return SYSFS_GROUP_INVISIBLE; \ return a->mode; \ } /* * Same as DEFINE_SYSFS_GROUP_VISIBLE, but for groups with only binary * attributes. If an attribute_group defines both text and binary * attributes, the group visibility is determined by the function * specified to is_visible() not is_bin_visible() */ #define DEFINE_SYSFS_BIN_GROUP_VISIBLE(name) \ static inline umode_t sysfs_group_visible_##name( \ struct kobject *kobj, const struct bin_attribute *attr, int n) \ { \ if (n == 0 && !name##_group_visible(kobj)) \ return SYSFS_GROUP_INVISIBLE; \ return name##_attr_visible(kobj, attr, n); \ } #define DEFINE_SIMPLE_SYSFS_BIN_GROUP_VISIBLE(name) \ static inline umode_t sysfs_group_visible_##name( \ struct kobject *kobj, const struct bin_attribute *a, int n) \ { \ if (n == 0 && !name##_group_visible(kobj)) \ return SYSFS_GROUP_INVISIBLE; \ return a->mode; \ } #define SYSFS_GROUP_VISIBLE(fn) sysfs_group_visible_##fn /* * Use these macros to make defining attributes easier. * See include/linux/device.h for examples.. */ #define __ATTR(_name, _mode, _show, _store) { \ .attr = {.name = __stringify(_name), \ .mode = VERIFY_OCTAL_PERMISSIONS(_mode) }, \ .show = _show, \ .store = _store, \ } #define __ATTR_PREALLOC(_name, _mode, _show, _store) { \ .attr = {.name = __stringify(_name), \ .mode = SYSFS_PREALLOC | VERIFY_OCTAL_PERMISSIONS(_mode) },\ .show = _show, \ .store = _store, \ } #define __ATTR_RO(_name) { \ .attr = { .name = __stringify(_name), .mode = 0444 }, \ .show = _name##_show, \ } #define __ATTR_RO_MODE(_name, _mode) { \ .attr = { .name = __stringify(_name), \ .mode = VERIFY_OCTAL_PERMISSIONS(_mode) }, \ .show = _name##_show, \ } #define __ATTR_RW_MODE(_name, _mode) { \ .attr = { .name = __stringify(_name), \ .mode = VERIFY_OCTAL_PERMISSIONS(_mode) }, \ .show = _name##_show, \ .store = _name##_store, \ } #define __ATTR_WO(_name) { \ .attr = { .name = __stringify(_name), .mode = 0200 }, \ .store = _name##_store, \ } #define __ATTR_RW(_name) __ATTR(_name, 0644, _name##_show, _name##_store) #define __ATTR_NULL { .attr = { .name = NULL } } #ifdef CONFIG_DEBUG_LOCK_ALLOC #define __ATTR_IGNORE_LOCKDEP(_name, _mode, _show, _store) { \ .attr = {.name = __stringify(_name), .mode = _mode, \ .ignore_lockdep = true }, \ .show = _show, \ .store = _store, \ } #else #define __ATTR_IGNORE_LOCKDEP __ATTR #endif #define __ATTRIBUTE_GROUPS(_name) \ static const struct attribute_group *_name##_groups[] = { \ &_name##_group, \ NULL, \ } #define ATTRIBUTE_GROUPS(_name) \ static const struct attribute_group _name##_group = { \ .attrs = _name##_attrs, \ }; \ __ATTRIBUTE_GROUPS(_name) #define BIN_ATTRIBUTE_GROUPS(_name) \ static const struct attribute_group _name##_group = { \ .bin_attrs_new = _name##_attrs, \ }; \ __ATTRIBUTE_GROUPS(_name) struct file; struct vm_area_struct; struct address_space; struct bin_attribute { struct attribute attr; size_t size; void *private; struct address_space *(*f_mapping)(void); ssize_t (*read)(struct file *, struct kobject *, const struct bin_attribute *, char *, loff_t, size_t); ssize_t (*read_new)(struct file *, struct kobject *, const struct bin_attribute *, char *, loff_t, size_t); ssize_t (*write)(struct file *, struct kobject *, const struct bin_attribute *, char *, loff_t, size_t); ssize_t (*write_new)(struct file *, struct kobject *, const struct bin_attribute *, char *, loff_t, size_t); loff_t (*llseek)(struct file *, struct kobject *, const struct bin_attribute *, loff_t, int); int (*mmap)(struct file *, struct kobject *, const struct bin_attribute *attr, struct vm_area_struct *vma); }; /** * sysfs_bin_attr_init - initialize a dynamically allocated bin_attribute * @attr: struct bin_attribute to initialize * * Initialize a dynamically allocated struct bin_attribute so we * can make lockdep happy. This is a new requirement for * attributes and initially this is only needed when lockdep is * enabled. Lockdep gives a nice error when your attribute is * added to sysfs if you don't have this. */ #define sysfs_bin_attr_init(bin_attr) sysfs_attr_init(&(bin_attr)->attr) /* macros to create static binary attributes easier */ #define __BIN_ATTR(_name, _mode, _read, _write, _size) { \ .attr = { .name = __stringify(_name), .mode = _mode }, \ .read = _read, \ .write = _write, \ .size = _size, \ } #define __BIN_ATTR_RO(_name, _size) \ __BIN_ATTR(_name, 0444, _name##_read, NULL, _size) #define __BIN_ATTR_WO(_name, _size) \ __BIN_ATTR(_name, 0200, NULL, _name##_write, _size) #define __BIN_ATTR_RW(_name, _size) \ __BIN_ATTR(_name, 0644, _name##_read, _name##_write, _size) #define __BIN_ATTR_NULL __ATTR_NULL #define BIN_ATTR(_name, _mode, _read, _write, _size) \ struct bin_attribute bin_attr_##_name = __BIN_ATTR(_name, _mode, _read, \ _write, _size) #define BIN_ATTR_RO(_name, _size) \ struct bin_attribute bin_attr_##_name = __BIN_ATTR_RO(_name, _size) #define BIN_ATTR_WO(_name, _size) \ struct bin_attribute bin_attr_##_name = __BIN_ATTR_WO(_name, _size) #define BIN_ATTR_RW(_name, _size) \ struct bin_attribute bin_attr_##_name = __BIN_ATTR_RW(_name, _size) #define __BIN_ATTR_ADMIN_RO(_name, _size) \ __BIN_ATTR(_name, 0400, _name##_read, NULL, _size) #define __BIN_ATTR_ADMIN_RW(_name, _size) \ __BIN_ATTR(_name, 0600, _name##_read, _name##_write, _size) #define BIN_ATTR_ADMIN_RO(_name, _size) \ struct bin_attribute bin_attr_##_name = __BIN_ATTR_ADMIN_RO(_name, _size) #define BIN_ATTR_ADMIN_RW(_name, _size) \ struct bin_attribute bin_attr_##_name = __BIN_ATTR_ADMIN_RW(_name, _size) #define __BIN_ATTR_SIMPLE_RO(_name, _mode) \ __BIN_ATTR(_name, _mode, sysfs_bin_attr_simple_read, NULL, 0) #define BIN_ATTR_SIMPLE_RO(_name) \ struct bin_attribute bin_attr_##_name = __BIN_ATTR_SIMPLE_RO(_name, 0444) #define BIN_ATTR_SIMPLE_ADMIN_RO(_name) \ struct bin_attribute bin_attr_##_name = __BIN_ATTR_SIMPLE_RO(_name, 0400) struct sysfs_ops { ssize_t (*show)(struct kobject *, struct attribute *, char *); ssize_t (*store)(struct kobject *, struct attribute *, const char *, size_t); }; #ifdef CONFIG_SYSFS int __must_check sysfs_create_dir_ns(struct kobject *kobj, const void *ns); void sysfs_remove_dir(struct kobject *kobj); int __must_check sysfs_rename_dir_ns(struct kobject *kobj, const char *new_name, const void *new_ns); int __must_check sysfs_move_dir_ns(struct kobject *kobj, struct kobject *new_parent_kobj, const void *new_ns); int __must_check sysfs_create_mount_point(struct kobject *parent_kobj, const char *name); void sysfs_remove_mount_point(struct kobject *parent_kobj, const char *name); int __must_check sysfs_create_file_ns(struct kobject *kobj, const struct attribute *attr, const void *ns); int __must_check sysfs_create_files(struct kobject *kobj, const struct attribute * const *attr); int __must_check sysfs_chmod_file(struct kobject *kobj, const struct attribute *attr, umode_t mode); struct kernfs_node *sysfs_break_active_protection(struct kobject *kobj, const struct attribute *attr); void sysfs_unbreak_active_protection(struct kernfs_node *kn); void sysfs_remove_file_ns(struct kobject *kobj, const struct attribute *attr, const void *ns); bool sysfs_remove_file_self(struct kobject *kobj, const struct attribute *attr); void sysfs_remove_files(struct kobject *kobj, const struct attribute * const *attr); int __must_check sysfs_create_bin_file(struct kobject *kobj, const struct bin_attribute *attr); void sysfs_remove_bin_file(struct kobject *kobj, const struct bin_attribute *attr); int __must_check sysfs_create_link(struct kobject *kobj, struct kobject *target, const char *name); int __must_check sysfs_create_link_nowarn(struct kobject *kobj, struct kobject *target, const char *name); void sysfs_remove_link(struct kobject *kobj, const char *name); int sysfs_rename_link_ns(struct kobject *kobj, struct kobject *target, const char *old_name, const char *new_name, const void *new_ns); void sysfs_delete_link(struct kobject *dir, struct kobject *targ, const char *name); int __must_check sysfs_create_group(struct kobject *kobj, const struct attribute_group *grp); int __must_check sysfs_create_groups(struct kobject *kobj, const struct attribute_group **groups); int __must_check sysfs_update_groups(struct kobject *kobj, const struct attribute_group **groups); int sysfs_update_group(struct kobject *kobj, const struct attribute_group *grp); void sysfs_remove_group(struct kobject *kobj, const struct attribute_group *grp); void sysfs_remove_groups(struct kobject *kobj, const struct attribute_group **groups); int sysfs_add_file_to_group(struct kobject *kobj, const struct attribute *attr, const char *group); void sysfs_remove_file_from_group(struct kobject *kobj, const struct attribute *attr, const char *group); int sysfs_merge_group(struct kobject *kobj, const struct attribute_group *grp); void sysfs_unmerge_group(struct kobject *kobj, const struct attribute_group *grp); int sysfs_add_link_to_group(struct kobject *kobj, const char *group_name, struct kobject *target, const char *link_name); void sysfs_remove_link_from_group(struct kobject *kobj, const char *group_name, const char *link_name); int compat_only_sysfs_link_entry_to_kobj(struct kobject *kobj, struct kobject *target_kobj, const char *target_name, const char *symlink_name); void sysfs_notify(struct kobject *kobj, const char *dir, const char *attr); int __must_check sysfs_init(void); static inline void sysfs_enable_ns(struct kernfs_node *kn) { return kernfs_enable_ns(kn); } int sysfs_file_change_owner(struct kobject *kobj, const char *name, kuid_t kuid, kgid_t kgid); int sysfs_change_owner(struct kobject *kobj, kuid_t kuid, kgid_t kgid); int sysfs_link_change_owner(struct kobject *kobj, struct kobject *targ, const char *name, kuid_t kuid, kgid_t kgid); int sysfs_groups_change_owner(struct kobject *kobj, const struct attribute_group **groups, kuid_t kuid, kgid_t kgid); int sysfs_group_change_owner(struct kobject *kobj, const struct attribute_group *groups, kuid_t kuid, kgid_t kgid); __printf(2, 3) int sysfs_emit(char *buf, const char *fmt, ...); __printf(3, 4) int sysfs_emit_at(char *buf, int at, const char *fmt, ...); ssize_t sysfs_bin_attr_simple_read(struct file *file, struct kobject *kobj, const struct bin_attribute *attr, char *buf, loff_t off, size_t count); #else /* CONFIG_SYSFS */ static inline int sysfs_create_dir_ns(struct kobject *kobj, const void *ns) { return 0; } static inline void sysfs_remove_dir(struct kobject *kobj) { } static inline int sysfs_rename_dir_ns(struct kobject *kobj, const char *new_name, const void *new_ns) { return 0; } static inline int sysfs_move_dir_ns(struct kobject *kobj, struct kobject *new_parent_kobj, const void *new_ns) { return 0; } static inline int sysfs_create_mount_point(struct kobject *parent_kobj, const char *name) { return 0; } static inline void sysfs_remove_mount_point(struct kobject *parent_kobj, const char *name) { } static inline int sysfs_create_file_ns(struct kobject *kobj, const struct attribute *attr, const void *ns) { return 0; } static inline int sysfs_create_files(struct kobject *kobj, const struct attribute * const *attr) { return 0; } static inline int sysfs_chmod_file(struct kobject *kobj, const struct attribute *attr, umode_t mode) { return 0; } static inline struct kernfs_node * sysfs_break_active_protection(struct kobject *kobj, const struct attribute *attr) { return NULL; } static inline void sysfs_unbreak_active_protection(struct kernfs_node *kn) { } static inline void sysfs_remove_file_ns(struct kobject *kobj, const struct attribute *attr, const void *ns) { } static inline bool sysfs_remove_file_self(struct kobject *kobj, const struct attribute *attr) { return false; } static inline void sysfs_remove_files(struct kobject *kobj, const struct attribute * const *attr) { } static inline int sysfs_create_bin_file(struct kobject *kobj, const struct bin_attribute *attr) { return 0; } static inline void sysfs_remove_bin_file(struct kobject *kobj, const struct bin_attribute *attr) { } static inline int sysfs_create_link(struct kobject *kobj, struct kobject *target, const char *name) { return 0; } static inline int sysfs_create_link_nowarn(struct kobject *kobj, struct kobject *target, const char *name) { return 0; } static inline void sysfs_remove_link(struct kobject *kobj, const char *name) { } static inline int sysfs_rename_link_ns(struct kobject *k, struct kobject *t, const char *old_name, const char *new_name, const void *ns) { return 0; } static inline void sysfs_delete_link(struct kobject *k, struct kobject *t, const char *name) { } static inline int sysfs_create_group(struct kobject *kobj, const struct attribute_group *grp) { return 0; } static inline int sysfs_create_groups(struct kobject *kobj, const struct attribute_group **groups) { return 0; } static inline int sysfs_update_groups(struct kobject *kobj, const struct attribute_group **groups) { return 0; } static inline int sysfs_update_group(struct kobject *kobj, const struct attribute_group *grp) { return 0; } static inline void sysfs_remove_group(struct kobject *kobj, const struct attribute_group *grp) { } static inline void sysfs_remove_groups(struct kobject *kobj, const struct attribute_group **groups) { } static inline int sysfs_add_file_to_group(struct kobject *kobj, const struct attribute *attr, const char *group) { return 0; } static inline void sysfs_remove_file_from_group(struct kobject *kobj, const struct attribute *attr, const char *group) { } static inline int sysfs_merge_group(struct kobject *kobj, const struct attribute_group *grp) { return 0; } static inline void sysfs_unmerge_group(struct kobject *kobj, const struct attribute_group *grp) { } static inline int sysfs_add_link_to_group(struct kobject *kobj, const char *group_name, struct kobject *target, const char *link_name) { return 0; } static inline void sysfs_remove_link_from_group(struct kobject *kobj, const char *group_name, const char *link_name) { } static inline int compat_only_sysfs_link_entry_to_kobj(struct kobject *kobj, struct kobject *target_kobj, const char *target_name, const char *symlink_name) { return 0; } static inline void sysfs_notify(struct kobject *kobj, const char *dir, const char *attr) { } static inline int __must_check sysfs_init(void) { return 0; } static inline void sysfs_enable_ns(struct kernfs_node *kn) { } static inline int sysfs_file_change_owner(struct kobject *kobj, const char *name, kuid_t kuid, kgid_t kgid) { return 0; } static inline int sysfs_link_change_owner(struct kobject *kobj, struct kobject *targ, const char *name, kuid_t kuid, kgid_t kgid) { return 0; } static inline int sysfs_change_owner(struct kobject *kobj, kuid_t kuid, kgid_t kgid) { return 0; } static inline int sysfs_groups_change_owner(struct kobject *kobj, const struct attribute_group **groups, kuid_t kuid, kgid_t kgid) { return 0; } static inline int sysfs_group_change_owner(struct kobject *kobj, const struct attribute_group *groups, kuid_t kuid, kgid_t kgid) { return 0; } __printf(2, 3) static inline int sysfs_emit(char *buf, const char *fmt, ...) { return 0; } __printf(3, 4) static inline int sysfs_emit_at(char *buf, int at, const char *fmt, ...) { return 0; } static inline ssize_t sysfs_bin_attr_simple_read(struct file *file, struct kobject *kobj, const struct bin_attribute *attr, char *buf, loff_t off, size_t count) { return 0; } #endif /* CONFIG_SYSFS */ static inline int __must_check sysfs_create_file(struct kobject *kobj, const struct attribute *attr) { return sysfs_create_file_ns(kobj, attr, NULL); } static inline void sysfs_remove_file(struct kobject *kobj, const struct attribute *attr) { sysfs_remove_file_ns(kobj, attr, NULL); } static inline int sysfs_rename_link(struct kobject *kobj, struct kobject *target, const char *old_name, const char *new_name) { return sysfs_rename_link_ns(kobj, target, old_name, new_name, NULL); } static inline void sysfs_notify_dirent(struct kernfs_node *kn) { kernfs_notify(kn); } static inline struct kernfs_node *sysfs_get_dirent(struct kernfs_node *parent, const char *name) { return kernfs_find_and_get(parent, name); } static inline struct kernfs_node *sysfs_get(struct kernfs_node *kn) { kernfs_get(kn); return kn; } static inline void sysfs_put(struct kernfs_node *kn) { kernfs_put(kn); } #endif /* _SYSFS_H_ */ |
| 14 14 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 | /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _TRACE_SYSCALL_H #define _TRACE_SYSCALL_H #include <linux/tracepoint.h> #include <linux/unistd.h> #include <linux/trace_events.h> #include <linux/thread_info.h> #include <asm/ptrace.h> /* * A syscall entry in the ftrace syscalls array. * * @name: name of the syscall * @syscall_nr: number of the syscall * @nb_args: number of parameters it takes * @types: list of types as strings * @args: list of args as strings (args[i] matches types[i]) * @enter_fields: list of fields for syscall_enter trace event * @enter_event: associated syscall_enter trace event * @exit_event: associated syscall_exit trace event */ struct syscall_metadata { const char *name; int syscall_nr; int nb_args; const char **types; const char **args; struct list_head enter_fields; struct trace_event_call *enter_event; struct trace_event_call *exit_event; }; #if defined(CONFIG_TRACEPOINTS) && defined(CONFIG_HAVE_SYSCALL_TRACEPOINTS) static inline void syscall_tracepoint_update(struct task_struct *p) { if (test_syscall_work(SYSCALL_TRACEPOINT)) set_task_syscall_work(p, SYSCALL_TRACEPOINT); else clear_task_syscall_work(p, SYSCALL_TRACEPOINT); } #else static inline void syscall_tracepoint_update(struct task_struct *p) { } #endif #endif /* _TRACE_SYSCALL_H */ |
| 881 883 882 883 881 880 880 883 132 1 882 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 | // SPDX-License-Identifier: GPL-2.0 /* * linux/mm/swap_state.c * * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds * Swap reorganised 29.12.95, Stephen Tweedie * * Rewritten to use page cache, (C) 1998 Stephen Tweedie */ #include <linux/mm.h> #include <linux/gfp.h> #include <linux/kernel_stat.h> #include <linux/mempolicy.h> #include <linux/swap.h> #include <linux/swapops.h> #include <linux/init.h> #include <linux/pagemap.h> #include <linux/pagevec.h> #include <linux/backing-dev.h> #include <linux/blkdev.h> #include <linux/migrate.h> #include <linux/vmalloc.h> #include <linux/huge_mm.h> #include <linux/shmem_fs.h> #include "internal.h" #include "swap.h" /* * swapper_space is a fiction, retained to simplify the path through * vmscan's shrink_folio_list. */ static const struct address_space_operations swap_aops = { .dirty_folio = noop_dirty_folio, #ifdef CONFIG_MIGRATION .migrate_folio = migrate_folio, #endif }; struct address_space *swapper_spaces[MAX_SWAPFILES] __read_mostly; static unsigned int nr_swapper_spaces[MAX_SWAPFILES] __read_mostly; static bool enable_vma_readahead __read_mostly = true; #define SWAP_RA_ORDER_CEILING 5 #define SWAP_RA_WIN_SHIFT (PAGE_SHIFT / 2) #define SWAP_RA_HITS_MASK ((1UL << SWAP_RA_WIN_SHIFT) - 1) #define SWAP_RA_HITS_MAX SWAP_RA_HITS_MASK #define SWAP_RA_WIN_MASK (~PAGE_MASK & ~SWAP_RA_HITS_MASK) #define SWAP_RA_HITS(v) ((v) & SWAP_RA_HITS_MASK) #define SWAP_RA_WIN(v) (((v) & SWAP_RA_WIN_MASK) >> SWAP_RA_WIN_SHIFT) #define SWAP_RA_ADDR(v) ((v) & PAGE_MASK) #define SWAP_RA_VAL(addr, win, hits) \ (((addr) & PAGE_MASK) | \ (((win) << SWAP_RA_WIN_SHIFT) & SWAP_RA_WIN_MASK) | \ ((hits) & SWAP_RA_HITS_MASK)) /* Initial readahead hits is 4 to start up with a small window */ #define GET_SWAP_RA_VAL(vma) \ (atomic_long_read(&(vma)->swap_readahead_info) ? : 4) static atomic_t swapin_readahead_hits = ATOMIC_INIT(4); void show_swap_cache_info(void) { printk("%lu pages in swap cache\n", total_swapcache_pages()); printk("Free swap = %ldkB\n", K(get_nr_swap_pages())); printk("Total swap = %lukB\n", K(total_swap_pages)); } void *get_shadow_from_swap_cache(swp_entry_t entry) { struct address_space *address_space = swap_address_space(entry); pgoff_t idx = swap_cache_index(entry); void *shadow; shadow = xa_load(&address_space->i_pages, idx); if (xa_is_value(shadow)) return shadow; return NULL; } /* * add_to_swap_cache resembles filemap_add_folio on swapper_space, * but sets SwapCache flag and 'swap' instead of mapping and index. */ int add_to_swap_cache(struct folio *folio, swp_entry_t entry, gfp_t gfp, void **shadowp) { struct address_space *address_space = swap_address_space(entry); pgoff_t idx = swap_cache_index(entry); XA_STATE_ORDER(xas, &address_space->i_pages, idx, folio_order(folio)); unsigned long i, nr = folio_nr_pages(folio); void *old; xas_set_update(&xas, workingset_update_node); VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); VM_BUG_ON_FOLIO(folio_test_swapcache(folio), folio); VM_BUG_ON_FOLIO(!folio_test_swapbacked(folio), folio); folio_ref_add(folio, nr); folio_set_swapcache(folio); folio->swap = entry; do { xas_lock_irq(&xas); xas_create_range(&xas); if (xas_error(&xas)) goto unlock; for (i = 0; i < nr; i++) { VM_BUG_ON_FOLIO(xas.xa_index != idx + i, folio); if (shadowp) { old = xas_load(&xas); if (xa_is_value(old)) *shadowp = old; } xas_store(&xas, folio); xas_next(&xas); } address_space->nrpages += nr; __node_stat_mod_folio(folio, NR_FILE_PAGES, nr); __lruvec_stat_mod_folio(folio, NR_SWAPCACHE, nr); unlock: xas_unlock_irq(&xas); } while (xas_nomem(&xas, gfp)); if (!xas_error(&xas)) return 0; folio_clear_swapcache(folio); folio_ref_sub(folio, nr); return xas_error(&xas); } /* * This must be called only on folios that have * been verified to be in the swap cache. */ void __delete_from_swap_cache(struct folio *folio, swp_entry_t entry, void *shadow) { struct address_space *address_space = swap_address_space(entry); int i; long nr = folio_nr_pages(folio); pgoff_t idx = swap_cache_index(entry); XA_STATE(xas, &address_space->i_pages, idx); xas_set_update(&xas, workingset_update_node); VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); VM_BUG_ON_FOLIO(!folio_test_swapcache(folio), folio); VM_BUG_ON_FOLIO(folio_test_writeback(folio), folio); for (i = 0; i < nr; i++) { void *entry = xas_store(&xas, shadow); VM_BUG_ON_PAGE(entry != folio, entry); xas_next(&xas); } folio->swap.val = 0; folio_clear_swapcache(folio); address_space->nrpages -= nr; __node_stat_mod_folio(folio, NR_FILE_PAGES, -nr); __lruvec_stat_mod_folio(folio, NR_SWAPCACHE, -nr); } /* * This must be called only on folios that have * been verified to be in the swap cache and locked. * It will never put the folio into the free list, * the caller has a reference on the folio. */ void delete_from_swap_cache(struct folio *folio) { swp_entry_t entry = folio->swap; struct address_space *address_space = swap_address_space(entry); xa_lock_irq(&address_space->i_pages); __delete_from_swap_cache(folio, entry, NULL); xa_unlock_irq(&address_space->i_pages); put_swap_folio(folio, entry); folio_ref_sub(folio, folio_nr_pages(folio)); } void clear_shadow_from_swap_cache(int type, unsigned long begin, unsigned long end) { unsigned long curr = begin; void *old; for (;;) { swp_entry_t entry = swp_entry(type, curr); unsigned long index = curr & SWAP_ADDRESS_SPACE_MASK; struct address_space *address_space = swap_address_space(entry); XA_STATE(xas, &address_space->i_pages, index); xas_set_update(&xas, workingset_update_node); xa_lock_irq(&address_space->i_pages); xas_for_each(&xas, old, min(index + (end - curr), SWAP_ADDRESS_SPACE_PAGES)) { if (!xa_is_value(old)) continue; xas_store(&xas, NULL); } xa_unlock_irq(&address_space->i_pages); /* search the next swapcache until we meet end */ curr = ALIGN((curr + 1), SWAP_ADDRESS_SPACE_PAGES); if (curr > end) break; } } /* * If we are the only user, then try to free up the swap cache. * * Its ok to check the swapcache flag without the folio lock * here because we are going to recheck again inside * folio_free_swap() _with_ the lock. * - Marcelo */ void free_swap_cache(struct folio *folio) { if (folio_test_swapcache(folio) && !folio_mapped(folio) && folio_trylock(folio)) { folio_free_swap(folio); folio_unlock(folio); } } /* * Freeing a folio and also freeing any swap cache associated with * this folio if it is the last user. */ void free_folio_and_swap_cache(struct folio *folio) { free_swap_cache(folio); if (!is_huge_zero_folio(folio)) folio_put(folio); } /* * Passed an array of pages, drop them all from swapcache and then release * them. They are removed from the LRU and freed if this is their last use. */ void free_pages_and_swap_cache(struct encoded_page **pages, int nr) { struct folio_batch folios; unsigned int refs[PAGEVEC_SIZE]; folio_batch_init(&folios); for (int i = 0; i < nr; i++) { struct folio *folio = page_folio(encoded_page_ptr(pages[i])); free_swap_cache(folio); refs[folios.nr] = 1; if (unlikely(encoded_page_flags(pages[i]) & ENCODED_PAGE_BIT_NR_PAGES_NEXT)) refs[folios.nr] = encoded_nr_pages(pages[++i]); if (folio_batch_add(&folios, folio) == 0) folios_put_refs(&folios, refs); } if (folios.nr) folios_put_refs(&folios, refs); } static inline bool swap_use_vma_readahead(void) { return READ_ONCE(enable_vma_readahead) && !atomic_read(&nr_rotate_swap); } /* * Lookup a swap entry in the swap cache. A found folio will be returned * unlocked and with its refcount incremented - we rely on the kernel * lock getting page table operations atomic even if we drop the folio * lock before returning. * * Caller must lock the swap device or hold a reference to keep it valid. */ struct folio *swap_cache_get_folio(swp_entry_t entry, struct vm_area_struct *vma, unsigned long addr) { struct folio *folio; folio = filemap_get_folio(swap_address_space(entry), swap_cache_index(entry)); if (!IS_ERR(folio)) { bool vma_ra = swap_use_vma_readahead(); bool readahead; /* * At the moment, we don't support PG_readahead for anon THP * so let's bail out rather than confusing the readahead stat. */ if (unlikely(folio_test_large(folio))) return folio; readahead = folio_test_clear_readahead(folio); if (vma && vma_ra) { unsigned long ra_val; int win, hits; ra_val = GET_SWAP_RA_VAL(vma); win = SWAP_RA_WIN(ra_val); hits = SWAP_RA_HITS(ra_val); if (readahead) hits = min_t(int, hits + 1, SWAP_RA_HITS_MAX); atomic_long_set(&vma->swap_readahead_info, SWAP_RA_VAL(addr, win, hits)); } if (readahead) { count_vm_event(SWAP_RA_HIT); if (!vma || !vma_ra) atomic_inc(&swapin_readahead_hits); } } else { folio = NULL; } return folio; } /** * filemap_get_incore_folio - Find and get a folio from the page or swap caches. * @mapping: The address_space to search. * @index: The page cache index. * * This differs from filemap_get_folio() in that it will also look for the * folio in the swap cache. * * Return: The found folio or %NULL. */ struct folio *filemap_get_incore_folio(struct address_space *mapping, pgoff_t index) { swp_entry_t swp; struct swap_info_struct *si; struct folio *folio = filemap_get_entry(mapping, index); if (!folio) return ERR_PTR(-ENOENT); if (!xa_is_value(folio)) return folio; if (!shmem_mapping(mapping)) return ERR_PTR(-ENOENT); swp = radix_to_swp_entry(folio); /* There might be swapin error entries in shmem mapping. */ if (non_swap_entry(swp)) return ERR_PTR(-ENOENT); /* Prevent swapoff from happening to us */ si = get_swap_device(swp); if (!si) return ERR_PTR(-ENOENT); index = swap_cache_index(swp); folio = filemap_get_folio(swap_address_space(swp), index); put_swap_device(si); return folio; } struct folio *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, struct mempolicy *mpol, pgoff_t ilx, bool *new_page_allocated, bool skip_if_exists) { struct swap_info_struct *si = swp_swap_info(entry); struct folio *folio; struct folio *new_folio = NULL; struct folio *result = NULL; void *shadow = NULL; *new_page_allocated = false; for (;;) { int err; /* * First check the swap cache. Since this is normally * called after swap_cache_get_folio() failed, re-calling * that would confuse statistics. */ folio = filemap_get_folio(swap_address_space(entry), swap_cache_index(entry)); if (!IS_ERR(folio)) goto got_folio; /* * Just skip read ahead for unused swap slot. */ if (!swap_entry_swapped(si, entry)) goto put_and_return; /* * Get a new folio to read into from swap. Allocate it now if * new_folio not exist, before marking swap_map SWAP_HAS_CACHE, * when -EEXIST will cause any racers to loop around until we * add it to cache. */ if (!new_folio) { new_folio = folio_alloc_mpol(gfp_mask, 0, mpol, ilx, numa_node_id()); if (!new_folio) goto put_and_return; } /* * Swap entry may have been freed since our caller observed it. */ err = swapcache_prepare(entry, 1); if (!err) break; else if (err != -EEXIST) goto put_and_return; /* * Protect against a recursive call to __read_swap_cache_async() * on the same entry waiting forever here because SWAP_HAS_CACHE * is set but the folio is not the swap cache yet. This can * happen today if mem_cgroup_swapin_charge_folio() below * triggers reclaim through zswap, which may call * __read_swap_cache_async() in the writeback path. */ if (skip_if_exists) goto put_and_return; /* * We might race against __delete_from_swap_cache(), and * stumble across a swap_map entry whose SWAP_HAS_CACHE * has not yet been cleared. Or race against another * __read_swap_cache_async(), which has set SWAP_HAS_CACHE * in swap_map, but not yet added its folio to swap cache. */ schedule_timeout_uninterruptible(1); } /* * The swap entry is ours to swap in. Prepare the new folio. */ __folio_set_locked(new_folio); __folio_set_swapbacked(new_folio); if (mem_cgroup_swapin_charge_folio(new_folio, NULL, gfp_mask, entry)) goto fail_unlock; /* May fail (-ENOMEM) if XArray node allocation failed. */ if (add_to_swap_cache(new_folio, entry, gfp_mask & GFP_RECLAIM_MASK, &shadow)) goto fail_unlock; memcg1_swapin(entry, 1); if (shadow) workingset_refault(new_folio, shadow); /* Caller will initiate read into locked new_folio */ folio_add_lru(new_folio); *new_page_allocated = true; folio = new_folio; got_folio: result = folio; goto put_and_return; fail_unlock: put_swap_folio(new_folio, entry); folio_unlock(new_folio); put_and_return: if (!(*new_page_allocated) && new_folio) folio_put(new_folio); return result; } /* * Locate a page of swap in physical memory, reserving swap cache space * and reading the disk if it is not already cached. * A failure return means that either the page allocation failed or that * the swap entry is no longer in use. * * get/put_swap_device() aren't needed to call this function, because * __read_swap_cache_async() call them and swap_read_folio() holds the * swap cache folio lock. */ struct folio *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, struct vm_area_struct *vma, unsigned long addr, struct swap_iocb **plug) { struct swap_info_struct *si; bool page_allocated; struct mempolicy *mpol; pgoff_t ilx; struct folio *folio; si = get_swap_device(entry); if (!si) return NULL; mpol = get_vma_policy(vma, addr, 0, &ilx); folio = __read_swap_cache_async(entry, gfp_mask, mpol, ilx, &page_allocated, false); mpol_cond_put(mpol); if (page_allocated) swap_read_folio(folio, plug); put_swap_device(si); return folio; } static unsigned int __swapin_nr_pages(unsigned long prev_offset, unsigned long offset, int hits, int max_pages, int prev_win) { unsigned int pages, last_ra; /* * This heuristic has been found to work well on both sequential and * random loads, swapping to hard disk or to SSD: please don't ask * what the "+ 2" means, it just happens to work well, that's all. */ pages = hits + 2; if (pages == 2) { /* * We can have no readahead hits to judge by: but must not get * stuck here forever, so check for an adjacent offset instead * (and don't even bother to check whether swap type is same). */ if (offset != prev_offset + 1 && offset != prev_offset - 1) pages = 1; } else { unsigned int roundup = 4; while (roundup < pages) roundup <<= 1; pages = roundup; } if (pages > max_pages) pages = max_pages; /* Don't shrink readahead too fast */ last_ra = prev_win / 2; if (pages < last_ra) pages = last_ra; return pages; } static unsigned long swapin_nr_pages(unsigned long offset) { static unsigned long prev_offset; unsigned int hits, pages, max_pages; static atomic_t last_readahead_pages; max_pages = 1 << READ_ONCE(page_cluster); if (max_pages <= 1) return 1; hits = atomic_xchg(&swapin_readahead_hits, 0); pages = __swapin_nr_pages(READ_ONCE(prev_offset), offset, hits, max_pages, atomic_read(&last_readahead_pages)); if (!hits) WRITE_ONCE(prev_offset, offset); atomic_set(&last_readahead_pages, pages); return pages; } /** * swap_cluster_readahead - swap in pages in hope we need them soon * @entry: swap entry of this memory * @gfp_mask: memory allocation flags * @mpol: NUMA memory allocation policy to be applied * @ilx: NUMA interleave index, for use only when MPOL_INTERLEAVE * * Returns the struct folio for entry and addr, after queueing swapin. * * Primitive swap readahead code. We simply read an aligned block of * (1 << page_cluster) entries in the swap area. This method is chosen * because it doesn't cost us any seek time. We also make sure to queue * the 'original' request together with the readahead ones... * * Note: it is intentional that the same NUMA policy and interleave index * are used for every page of the readahead: neighbouring pages on swap * are fairly likely to have been swapped out from the same node. */ struct folio *swap_cluster_readahead(swp_entry_t entry, gfp_t gfp_mask, struct mempolicy *mpol, pgoff_t ilx) { struct folio *folio; unsigned long entry_offset = swp_offset(entry); unsigned long offset = entry_offset; unsigned long start_offset, end_offset; unsigned long mask; struct swap_info_struct *si = swp_swap_info(entry); struct blk_plug plug; struct swap_iocb *splug = NULL; bool page_allocated; mask = swapin_nr_pages(offset) - 1; if (!mask) goto skip; /* Read a page_cluster sized and aligned cluster around offset. */ start_offset = offset & ~mask; end_offset = offset | mask; if (!start_offset) /* First page is swap header. */ start_offset++; if (end_offset >= si->max) end_offset = si->max - 1; blk_start_plug(&plug); for (offset = start_offset; offset <= end_offset ; offset++) { /* Ok, do the async read-ahead now */ folio = __read_swap_cache_async( swp_entry(swp_type(entry), offset), gfp_mask, mpol, ilx, &page_allocated, false); if (!folio) continue; if (page_allocated) { swap_read_folio(folio, &splug); if (offset != entry_offset) { folio_set_readahead(folio); count_vm_event(SWAP_RA); } } folio_put(folio); } blk_finish_plug(&plug); swap_read_unplug(splug); lru_add_drain(); /* Push any new pages onto the LRU now */ skip: /* The page was likely read above, so no need for plugging here */ folio = __read_swap_cache_async(entry, gfp_mask, mpol, ilx, &page_allocated, false); if (unlikely(page_allocated)) swap_read_folio(folio, NULL); return folio; } int init_swap_address_space(unsigned int type, unsigned long nr_pages) { struct address_space *spaces, *space; unsigned int i, nr; nr = DIV_ROUND_UP(nr_pages, SWAP_ADDRESS_SPACE_PAGES); spaces = kvcalloc(nr, sizeof(struct address_space), GFP_KERNEL); if (!spaces) return -ENOMEM; for (i = 0; i < nr; i++) { space = spaces + i; xa_init_flags(&space->i_pages, XA_FLAGS_LOCK_IRQ); atomic_set(&space->i_mmap_writable, 0); space->a_ops = &swap_aops; /* swap cache doesn't use writeback related tags */ mapping_set_no_writeback_tags(space); } nr_swapper_spaces[type] = nr; swapper_spaces[type] = spaces; return 0; } void exit_swap_address_space(unsigned int type) { int i; struct address_space *spaces = swapper_spaces[type]; for (i = 0; i < nr_swapper_spaces[type]; i++) VM_WARN_ON_ONCE(!mapping_empty(&spaces[i])); kvfree(spaces); nr_swapper_spaces[type] = 0; swapper_spaces[type] = NULL; } static int swap_vma_ra_win(struct vm_fault *vmf, unsigned long *start, unsigned long *end) { struct vm_area_struct *vma = vmf->vma; unsigned long ra_val; unsigned long faddr, prev_faddr, left, right; unsigned int max_win, hits, prev_win, win; max_win = 1 << min(READ_ONCE(page_cluster), SWAP_RA_ORDER_CEILING); if (max_win == 1) return 1; faddr = vmf->address; ra_val = GET_SWAP_RA_VAL(vma); prev_faddr = SWAP_RA_ADDR(ra_val); prev_win = SWAP_RA_WIN(ra_val); hits = SWAP_RA_HITS(ra_val); win = __swapin_nr_pages(PFN_DOWN(prev_faddr), PFN_DOWN(faddr), hits, max_win, prev_win); atomic_long_set(&vma->swap_readahead_info, SWAP_RA_VAL(faddr, win, 0)); if (win == 1) return 1; if (faddr == prev_faddr + PAGE_SIZE) left = faddr; else if (prev_faddr == faddr + PAGE_SIZE) left = faddr - (win << PAGE_SHIFT) + PAGE_SIZE; else left = faddr - (((win - 1) / 2) << PAGE_SHIFT); right = left + (win << PAGE_SHIFT); if ((long)left < 0) left = 0; *start = max3(left, vma->vm_start, faddr & PMD_MASK); *end = min3(right, vma->vm_end, (faddr & PMD_MASK) + PMD_SIZE); return win; } /** * swap_vma_readahead - swap in pages in hope we need them soon * @targ_entry: swap entry of the targeted memory * @gfp_mask: memory allocation flags * @mpol: NUMA memory allocation policy to be applied * @targ_ilx: NUMA interleave index, for use only when MPOL_INTERLEAVE * @vmf: fault information * * Returns the struct folio for entry and addr, after queueing swapin. * * Primitive swap readahead code. We simply read in a few pages whose * virtual addresses are around the fault address in the same vma. * * Caller must hold read mmap_lock if vmf->vma is not NULL. * */ static struct folio *swap_vma_readahead(swp_entry_t targ_entry, gfp_t gfp_mask, struct mempolicy *mpol, pgoff_t targ_ilx, struct vm_fault *vmf) { struct blk_plug plug; struct swap_iocb *splug = NULL; struct folio *folio; pte_t *pte = NULL, pentry; int win; unsigned long start, end, addr; swp_entry_t entry; pgoff_t ilx; bool page_allocated; win = swap_vma_ra_win(vmf, &start, &end); if (win == 1) goto skip; ilx = targ_ilx - PFN_DOWN(vmf->address - start); blk_start_plug(&plug); for (addr = start; addr < end; ilx++, addr += PAGE_SIZE) { if (!pte++) { pte = pte_offset_map(vmf->pmd, addr); if (!pte) break; } pentry = ptep_get_lockless(pte); if (!is_swap_pte(pentry)) continue; entry = pte_to_swp_entry(pentry); if (unlikely(non_swap_entry(entry))) continue; pte_unmap(pte); pte = NULL; folio = __read_swap_cache_async(entry, gfp_mask, mpol, ilx, &page_allocated, false); if (!folio) continue; if (page_allocated) { swap_read_folio(folio, &splug); if (addr != vmf->address) { folio_set_readahead(folio); count_vm_event(SWAP_RA); } } folio_put(folio); } if (pte) pte_unmap(pte); blk_finish_plug(&plug); swap_read_unplug(splug); lru_add_drain(); skip: /* The folio was likely read above, so no need for plugging here */ folio = __read_swap_cache_async(targ_entry, gfp_mask, mpol, targ_ilx, &page_allocated, false); if (unlikely(page_allocated)) swap_read_folio(folio, NULL); return folio; } /** * swapin_readahead - swap in pages in hope we need them soon * @entry: swap entry of this memory * @gfp_mask: memory allocation flags * @vmf: fault information * * Returns the struct folio for entry and addr, after queueing swapin. * * It's a main entry function for swap readahead. By the configuration, * it will read ahead blocks by cluster-based(ie, physical disk based) * or vma-based(ie, virtual address based on faulty address) readahead. */ struct folio *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask, struct vm_fault *vmf) { struct mempolicy *mpol; pgoff_t ilx; struct folio *folio; mpol = get_vma_policy(vmf->vma, vmf->address, 0, &ilx); folio = swap_use_vma_readahead() ? swap_vma_readahead(entry, gfp_mask, mpol, ilx, vmf) : swap_cluster_readahead(entry, gfp_mask, mpol, ilx); mpol_cond_put(mpol); return folio; } #ifdef CONFIG_SYSFS static ssize_t vma_ra_enabled_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { return sysfs_emit(buf, "%s\n", str_true_false(enable_vma_readahead)); } static ssize_t vma_ra_enabled_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) { ssize_t ret; ret = kstrtobool(buf, &enable_vma_readahead); if (ret) return ret; return count; } static struct kobj_attribute vma_ra_enabled_attr = __ATTR_RW(vma_ra_enabled); static struct attribute *swap_attrs[] = { &vma_ra_enabled_attr.attr, NULL, }; static const struct attribute_group swap_attr_group = { .attrs = swap_attrs, }; static int __init swap_init_sysfs(void) { int err; struct kobject *swap_kobj; swap_kobj = kobject_create_and_add("swap", mm_kobj); if (!swap_kobj) { pr_err("failed to create swap kobject\n"); return -ENOMEM; } err = sysfs_create_group(swap_kobj, &swap_attr_group); if (err) { pr_err("failed to register swap group\n"); goto delete_obj; } return 0; delete_obj: kobject_put(swap_kobj); return err; } subsys_initcall(swap_init_sysfs); #endif |
| 40 53 85 12 24 24 34 11 11 28 22 1 22 30 28 12 107 43 2 105 27 98 32 104 24 35 30 30 5 26 26 26 16 2 14 26 49 33 43 31 25 10 16 8 21 61 12 1 34 26 20 5 2 26 15 59 17 49 12 10 1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 | // SPDX-License-Identifier: GPL-2.0 /* Generic part */ typedef struct { block_t *p; block_t key; struct buffer_head *bh; } Indirect; static DEFINE_RWLOCK(pointers_lock); static inline void add_chain(Indirect *p, struct buffer_head *bh, block_t *v) { p->key = *(p->p = v); p->bh = bh; } static inline int verify_chain(Indirect *from, Indirect *to) { while (from <= to && from->key == *from->p) from++; return (from > to); } static inline block_t *block_end(struct buffer_head *bh) { return (block_t *)((char*)bh->b_data + bh->b_size); } static inline Indirect *get_branch(struct inode *inode, int depth, int *offsets, Indirect chain[DEPTH], int *err) { struct super_block *sb = inode->i_sb; Indirect *p = chain; struct buffer_head *bh; *err = 0; /* i_data is not going away, no lock needed */ add_chain (chain, NULL, i_data(inode) + *offsets); if (!p->key) goto no_block; while (--depth) { bh = sb_bread(sb, block_to_cpu(p->key)); if (!bh) goto failure; read_lock(&pointers_lock); if (!verify_chain(chain, p)) goto changed; add_chain(++p, bh, (block_t *)bh->b_data + *++offsets); read_unlock(&pointers_lock); if (!p->key) goto no_block; } return NULL; changed: read_unlock(&pointers_lock); brelse(bh); *err = -EAGAIN; goto no_block; failure: *err = -EIO; no_block: return p; } static int alloc_branch(struct inode *inode, int num, int *offsets, Indirect *branch) { int n = 0; int i; int parent = minix_new_block(inode); int err = -ENOSPC; branch[0].key = cpu_to_block(parent); if (parent) for (n = 1; n < num; n++) { struct buffer_head *bh; /* Allocate the next block */ int nr = minix_new_block(inode); if (!nr) break; branch[n].key = cpu_to_block(nr); bh = sb_getblk(inode->i_sb, parent); if (!bh) { minix_free_block(inode, nr); err = -ENOMEM; break; } lock_buffer(bh); memset(bh->b_data, 0, bh->b_size); branch[n].bh = bh; branch[n].p = (block_t*) bh->b_data + offsets[n]; *branch[n].p = branch[n].key; set_buffer_uptodate(bh); unlock_buffer(bh); mark_buffer_dirty_inode(bh, inode); parent = nr; } if (n == num) return 0; /* Allocation failed, free what we already allocated */ for (i = 1; i < n; i++) bforget(branch[i].bh); for (i = 0; i < n; i++) minix_free_block(inode, block_to_cpu(branch[i].key)); return err; } static inline int splice_branch(struct inode *inode, Indirect chain[DEPTH], Indirect *where, int num) { int i; write_lock(&pointers_lock); /* Verify that place we are splicing to is still there and vacant */ if (!verify_chain(chain, where-1) || *where->p) goto changed; *where->p = where->key; write_unlock(&pointers_lock); /* We are done with atomic stuff, now do the rest of housekeeping */ inode_set_ctime_current(inode); /* had we spliced it onto indirect block? */ if (where->bh) mark_buffer_dirty_inode(where->bh, inode); mark_inode_dirty(inode); return 0; changed: write_unlock(&pointers_lock); for (i = 1; i < num; i++) bforget(where[i].bh); for (i = 0; i < num; i++) minix_free_block(inode, block_to_cpu(where[i].key)); return -EAGAIN; } static int get_block(struct inode * inode, sector_t block, struct buffer_head *bh, int create) { int err = -EIO; int offsets[DEPTH]; Indirect chain[DEPTH]; Indirect *partial; int left; int depth = block_to_path(inode, block, offsets); if (depth == 0) goto out; reread: partial = get_branch(inode, depth, offsets, chain, &err); /* Simplest case - block found, no allocation needed */ if (!partial) { got_it: map_bh(bh, inode->i_sb, block_to_cpu(chain[depth-1].key)); /* Clean up and exit */ partial = chain+depth-1; /* the whole chain */ goto cleanup; } /* Next simple case - plain lookup or failed read of indirect block */ if (!create || err == -EIO) { cleanup: while (partial > chain) { brelse(partial->bh); partial--; } out: return err; } /* * Indirect block might be removed by truncate while we were * reading it. Handling of that case (forget what we've got and * reread) is taken out of the main path. */ if (err == -EAGAIN) goto changed; left = (chain + depth) - partial; err = alloc_branch(inode, left, offsets+(partial-chain), partial); if (err) goto cleanup; if (splice_branch(inode, chain, partial, left) < 0) goto changed; set_buffer_new(bh); goto got_it; changed: while (partial > chain) { brelse(partial->bh); partial--; } goto reread; } static inline int all_zeroes(block_t *p, block_t *q) { while (p < q) if (*p++) return 0; return 1; } static Indirect *find_shared(struct inode *inode, int depth, int offsets[DEPTH], Indirect chain[DEPTH], block_t *top) { Indirect *partial, *p; int k, err; *top = 0; for (k = depth; k > 1 && !offsets[k-1]; k--) ; partial = get_branch(inode, k, offsets, chain, &err); write_lock(&pointers_lock); if (!partial) partial = chain + k-1; if (!partial->key && *partial->p) { write_unlock(&pointers_lock); goto no_top; } for (p=partial;p>chain && all_zeroes((block_t*)p->bh->b_data,p->p);p--) ; if (p == chain + k - 1 && p > chain) { p->p--; } else { *top = *p->p; *p->p = 0; } write_unlock(&pointers_lock); while(partial > p) { brelse(partial->bh); partial--; } no_top: return partial; } static inline void free_data(struct inode *inode, block_t *p, block_t *q) { unsigned long nr; for ( ; p < q ; p++) { nr = block_to_cpu(*p); if (nr) { *p = 0; minix_free_block(inode, nr); } } } static void free_branches(struct inode *inode, block_t *p, block_t *q, int depth) { struct buffer_head * bh; unsigned long nr; if (depth--) { for ( ; p < q ; p++) { nr = block_to_cpu(*p); if (!nr) continue; *p = 0; bh = sb_bread(inode->i_sb, nr); if (!bh) continue; free_branches(inode, (block_t*)bh->b_data, block_end(bh), depth); bforget(bh); minix_free_block(inode, nr); mark_inode_dirty(inode); } } else free_data(inode, p, q); } static inline void truncate (struct inode * inode) { struct super_block *sb = inode->i_sb; block_t *idata = i_data(inode); int offsets[DEPTH]; Indirect chain[DEPTH]; Indirect *partial; block_t nr = 0; int n; int first_whole; long iblock; iblock = (inode->i_size + sb->s_blocksize -1) >> sb->s_blocksize_bits; block_truncate_page(inode->i_mapping, inode->i_size, get_block); n = block_to_path(inode, iblock, offsets); if (!n) return; if (n == 1) { free_data(inode, idata+offsets[0], idata + DIRECT); first_whole = 0; goto do_indirects; } first_whole = offsets[0] + 1 - DIRECT; partial = find_shared(inode, n, offsets, chain, &nr); if (nr) { if (partial == chain) mark_inode_dirty(inode); else mark_buffer_dirty_inode(partial->bh, inode); free_branches(inode, &nr, &nr+1, (chain+n-1) - partial); } /* Clear the ends of indirect blocks on the shared branch */ while (partial > chain) { free_branches(inode, partial->p + 1, block_end(partial->bh), (chain+n-1) - partial); mark_buffer_dirty_inode(partial->bh, inode); brelse (partial->bh); partial--; } do_indirects: /* Kill the remaining (whole) subtrees */ while (first_whole < DEPTH-1) { nr = idata[DIRECT+first_whole]; if (nr) { idata[DIRECT+first_whole] = 0; mark_inode_dirty(inode); free_branches(inode, &nr, &nr+1, first_whole+1); } first_whole++; } inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode)); mark_inode_dirty(inode); } static inline unsigned nblocks(loff_t size, struct super_block *sb) { int k = sb->s_blocksize_bits - 10; unsigned blocks, res, direct = DIRECT, i = DEPTH; blocks = (size + sb->s_blocksize - 1) >> (BLOCK_SIZE_BITS + k); res = blocks; while (--i && blocks > direct) { blocks -= direct; blocks += sb->s_blocksize/sizeof(block_t) - 1; blocks /= sb->s_blocksize/sizeof(block_t); res += blocks; direct = 1; } return res; } |
| 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 | /* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (C) 2020 Google LLC. */ #ifndef _LINUX_BPF_LSM_H #define _LINUX_BPF_LSM_H #include <linux/sched.h> #include <linux/bpf.h> #include <linux/bpf_verifier.h> #include <linux/lsm_hooks.h> #ifdef CONFIG_BPF_LSM #define LSM_HOOK(RET, DEFAULT, NAME, ...) \ RET bpf_lsm_##NAME(__VA_ARGS__); #include <linux/lsm_hook_defs.h> #undef LSM_HOOK struct bpf_storage_blob { struct bpf_local_storage __rcu *storage; }; extern struct lsm_blob_sizes bpf_lsm_blob_sizes; int bpf_lsm_verify_prog(struct bpf_verifier_log *vlog, const struct bpf_prog *prog); bool bpf_lsm_is_sleepable_hook(u32 btf_id); bool bpf_lsm_is_trusted(const struct bpf_prog *prog); static inline struct bpf_storage_blob *bpf_inode( const struct inode *inode) { if (unlikely(!inode->i_security)) return NULL; return inode->i_security + bpf_lsm_blob_sizes.lbs_inode; } extern const struct bpf_func_proto bpf_inode_storage_get_proto; extern const struct bpf_func_proto bpf_inode_storage_delete_proto; void bpf_inode_storage_free(struct inode *inode); void bpf_lsm_find_cgroup_shim(const struct bpf_prog *prog, bpf_func_t *bpf_func); int bpf_lsm_get_retval_range(const struct bpf_prog *prog, struct bpf_retval_range *range); int bpf_set_dentry_xattr_locked(struct dentry *dentry, const char *name__str, const struct bpf_dynptr *value_p, int flags); int bpf_remove_dentry_xattr_locked(struct dentry *dentry, const char *name__str); bool bpf_lsm_has_d_inode_locked(const struct bpf_prog *prog); #else /* !CONFIG_BPF_LSM */ static inline bool bpf_lsm_is_sleepable_hook(u32 btf_id) { return false; } static inline bool bpf_lsm_is_trusted(const struct bpf_prog *prog) { return false; } static inline int bpf_lsm_verify_prog(struct bpf_verifier_log *vlog, const struct bpf_prog *prog) { return -EOPNOTSUPP; } static inline struct bpf_storage_blob *bpf_inode( const struct inode *inode) { return NULL; } static inline void bpf_inode_storage_free(struct inode *inode) { } static inline void bpf_lsm_find_cgroup_shim(const struct bpf_prog *prog, bpf_func_t *bpf_func) { } static inline int bpf_lsm_get_retval_range(const struct bpf_prog *prog, struct bpf_retval_range *range) { return -EOPNOTSUPP; } static inline int bpf_set_dentry_xattr_locked(struct dentry *dentry, const char *name__str, const struct bpf_dynptr *value_p, int flags) { return -EOPNOTSUPP; } static inline int bpf_remove_dentry_xattr_locked(struct dentry *dentry, const char *name__str) { return -EOPNOTSUPP; } static inline bool bpf_lsm_has_d_inode_locked(const struct bpf_prog *prog) { return false; } #endif /* CONFIG_BPF_LSM */ #endif /* _LINUX_BPF_LSM_H */ |
| 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 | // SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (c) International Business Machines Corp., 2006 * Copyright (c) Nokia Corporation, 2006, 2007 * * Author: Artem Bityutskiy (Битюцкий Артём) */ /* * This file includes volume table manipulation code. The volume table is an * on-flash table containing volume meta-data like name, number of reserved * physical eraseblocks, type, etc. The volume table is stored in the so-called * "layout volume". * * The layout volume is an internal volume which is organized as follows. It * consists of two logical eraseblocks - LEB 0 and LEB 1. Each logical * eraseblock stores one volume table copy, i.e. LEB 0 and LEB 1 duplicate each * other. This redundancy guarantees robustness to unclean reboots. The volume * table is basically an array of volume table records. Each record contains * full information about the volume and protected by a CRC checksum. Note, * nowadays we use the atomic LEB change operation when updating the volume * table, so we do not really need 2 LEBs anymore, but we preserve the older * design for the backward compatibility reasons. * * When the volume table is changed, it is first changed in RAM. Then LEB 0 is * erased, and the updated volume table is written back to LEB 0. Then same for * LEB 1. This scheme guarantees recoverability from unclean reboots. * * In this UBI implementation the on-flash volume table does not contain any * information about how much data static volumes contain. * * But it would still be beneficial to store this information in the volume * table. For example, suppose we have a static volume X, and all its physical * eraseblocks became bad for some reasons. Suppose we are attaching the * corresponding MTD device, for some reason we find no logical eraseblocks * corresponding to the volume X. According to the volume table volume X does * exist. So we don't know whether it is just empty or all its physical * eraseblocks went bad. So we cannot alarm the user properly. * * The volume table also stores so-called "update marker", which is used for * volume updates. Before updating the volume, the update marker is set, and * after the update operation is finished, the update marker is cleared. So if * the update operation was interrupted (e.g. by an unclean reboot) - the * update marker is still there and we know that the volume's contents is * damaged. */ #include <linux/crc32.h> #include <linux/err.h> #include <linux/slab.h> #include <asm/div64.h> #include "ubi.h" static void self_vtbl_check(const struct ubi_device *ubi); /* Empty volume table record */ static struct ubi_vtbl_record empty_vtbl_record; /** * ubi_update_layout_vol - helper for updatting layout volumes on flash * @ubi: UBI device description object */ static int ubi_update_layout_vol(struct ubi_device *ubi) { struct ubi_volume *layout_vol; int i, err; layout_vol = ubi->volumes[vol_id2idx(ubi, UBI_LAYOUT_VOLUME_ID)]; for (i = 0; i < UBI_LAYOUT_VOLUME_EBS; i++) { err = ubi_eba_atomic_leb_change(ubi, layout_vol, i, ubi->vtbl, ubi->vtbl_size); if (err) return err; } return 0; } /** * ubi_change_vtbl_record - change volume table record. * @ubi: UBI device description object * @idx: table index to change * @vtbl_rec: new volume table record * * This function changes volume table record @idx. If @vtbl_rec is %NULL, empty * volume table record is written. The caller does not have to calculate CRC of * the record as it is done by this function. Returns zero in case of success * and a negative error code in case of failure. */ int ubi_change_vtbl_record(struct ubi_device *ubi, int idx, struct ubi_vtbl_record *vtbl_rec) { int err; uint32_t crc; ubi_assert(idx >= 0 && idx < ubi->vtbl_slots); if (!vtbl_rec) vtbl_rec = &empty_vtbl_record; else { crc = crc32(UBI_CRC32_INIT, vtbl_rec, UBI_VTBL_RECORD_SIZE_CRC); vtbl_rec->crc = cpu_to_be32(crc); } memcpy(&ubi->vtbl[idx], vtbl_rec, sizeof(struct ubi_vtbl_record)); err = ubi_update_layout_vol(ubi); self_vtbl_check(ubi); return err ? err : 0; } /** * ubi_vtbl_rename_volumes - rename UBI volumes in the volume table. * @ubi: UBI device description object * @rename_list: list of &struct ubi_rename_entry objects * * This function re-names multiple volumes specified in @req in the volume * table. Returns zero in case of success and a negative error code in case of * failure. */ int ubi_vtbl_rename_volumes(struct ubi_device *ubi, struct list_head *rename_list) { struct ubi_rename_entry *re; list_for_each_entry(re, rename_list, list) { uint32_t crc; struct ubi_volume *vol = re->desc->vol; struct ubi_vtbl_record *vtbl_rec = &ubi->vtbl[vol->vol_id]; if (re->remove) { memcpy(vtbl_rec, &empty_vtbl_record, sizeof(struct ubi_vtbl_record)); continue; } vtbl_rec->name_len = cpu_to_be16(re->new_name_len); memcpy(vtbl_rec->name, re->new_name, re->new_name_len); memset(vtbl_rec->name + re->new_name_len, 0, UBI_VOL_NAME_MAX + 1 - re->new_name_len); crc = crc32(UBI_CRC32_INIT, vtbl_rec, UBI_VTBL_RECORD_SIZE_CRC); vtbl_rec->crc = cpu_to_be32(crc); } return ubi_update_layout_vol(ubi); } /** * vtbl_check - check if volume table is not corrupted and sensible. * @ubi: UBI device description object * @vtbl: volume table * * This function returns zero if @vtbl is all right, %1 if CRC is incorrect, * and %-EINVAL if it contains inconsistent data. */ static int vtbl_check(const struct ubi_device *ubi, const struct ubi_vtbl_record *vtbl) { int i, n, reserved_pebs, alignment, data_pad, vol_type, name_len; int upd_marker, err; uint32_t crc; const char *name; for (i = 0; i < ubi->vtbl_slots; i++) { cond_resched(); reserved_pebs = be32_to_cpu(vtbl[i].reserved_pebs); alignment = be32_to_cpu(vtbl[i].alignment); data_pad = be32_to_cpu(vtbl[i].data_pad); upd_marker = vtbl[i].upd_marker; vol_type = vtbl[i].vol_type; name_len = be16_to_cpu(vtbl[i].name_len); name = &vtbl[i].name[0]; crc = crc32(UBI_CRC32_INIT, &vtbl[i], UBI_VTBL_RECORD_SIZE_CRC); if (be32_to_cpu(vtbl[i].crc) != crc) { ubi_err(ubi, "bad CRC at record %u: %#08x, not %#08x", i, crc, be32_to_cpu(vtbl[i].crc)); ubi_dump_vtbl_record(&vtbl[i], i); return 1; } if (reserved_pebs == 0) { if (memcmp(&vtbl[i], &empty_vtbl_record, UBI_VTBL_RECORD_SIZE)) { err = 2; goto bad; } continue; } if (reserved_pebs < 0 || alignment < 0 || data_pad < 0 || name_len < 0) { err = 3; goto bad; } if (alignment > ubi->leb_size || alignment == 0) { err = 4; goto bad; } n = alignment & (ubi->min_io_size - 1); if (alignment != 1 && n) { err = 5; goto bad; } n = ubi->leb_size % alignment; if (data_pad != n) { ubi_err(ubi, "bad data_pad, has to be %d", n); err = 6; goto bad; } if (vol_type != UBI_VID_DYNAMIC && vol_type != UBI_VID_STATIC) { err = 7; goto bad; } if (upd_marker != 0 && upd_marker != 1) { err = 8; goto bad; } if (reserved_pebs > ubi->good_peb_count) { ubi_err(ubi, "too large reserved_pebs %d, good PEBs %d", reserved_pebs, ubi->good_peb_count); err = 9; goto bad; } if (name_len > UBI_VOL_NAME_MAX) { err = 10; goto bad; } if (name[0] == '\0') { err = 11; goto bad; } if (name_len != strnlen(name, name_len + 1)) { err = 12; goto bad; } } /* Checks that all names are unique */ for (i = 0; i < ubi->vtbl_slots - 1; i++) { for (n = i + 1; n < ubi->vtbl_slots; n++) { int len1 = be16_to_cpu(vtbl[i].name_len); int len2 = be16_to_cpu(vtbl[n].name_len); if (len1 > 0 && len1 == len2 && !strncmp(vtbl[i].name, vtbl[n].name, len1)) { ubi_err(ubi, "volumes %d and %d have the same name \"%s\"", i, n, vtbl[i].name); ubi_dump_vtbl_record(&vtbl[i], i); ubi_dump_vtbl_record(&vtbl[n], n); return -EINVAL; } } } return 0; bad: ubi_err(ubi, "volume table check failed: record %d, error %d", i, err); ubi_dump_vtbl_record(&vtbl[i], i); return -EINVAL; } /** * create_vtbl - create a copy of volume table. * @ubi: UBI device description object * @ai: attaching information * @copy: number of the volume table copy * @vtbl: contents of the volume table * * This function returns zero in case of success and a negative error code in * case of failure. */ static int create_vtbl(struct ubi_device *ubi, struct ubi_attach_info *ai, int copy, void *vtbl) { int err, tries = 0; struct ubi_vid_io_buf *vidb; struct ubi_vid_hdr *vid_hdr; struct ubi_ainf_peb *new_aeb; dbg_gen("create volume table (copy #%d)", copy + 1); vidb = ubi_alloc_vid_buf(ubi, GFP_KERNEL); if (!vidb) return -ENOMEM; vid_hdr = ubi_get_vid_hdr(vidb); retry: new_aeb = ubi_early_get_peb(ubi, ai); if (IS_ERR(new_aeb)) { err = PTR_ERR(new_aeb); goto out_free; } vid_hdr->vol_type = UBI_LAYOUT_VOLUME_TYPE; vid_hdr->vol_id = cpu_to_be32(UBI_LAYOUT_VOLUME_ID); vid_hdr->compat = UBI_LAYOUT_VOLUME_COMPAT; vid_hdr->data_size = vid_hdr->used_ebs = vid_hdr->data_pad = cpu_to_be32(0); vid_hdr->lnum = cpu_to_be32(copy); vid_hdr->sqnum = cpu_to_be64(++ai->max_sqnum); /* The EC header is already there, write the VID header */ err = ubi_io_write_vid_hdr(ubi, new_aeb->pnum, vidb); if (err) goto write_error; /* Write the layout volume contents */ err = ubi_io_write_data(ubi, vtbl, new_aeb->pnum, 0, ubi->vtbl_size); if (err) goto write_error; /* * And add it to the attaching information. Don't delete the old version * of this LEB as it will be deleted and freed in 'ubi_add_to_av()'. */ err = ubi_add_to_av(ubi, ai, new_aeb->pnum, new_aeb->ec, vid_hdr, 0); ubi_free_aeb(ai, new_aeb); ubi_free_vid_buf(vidb); return err; write_error: if (err == -EIO && ++tries <= 5) { /* * Probably this physical eraseblock went bad, try to pick * another one. */ list_add(&new_aeb->u.list, &ai->erase); goto retry; } ubi_free_aeb(ai, new_aeb); out_free: ubi_free_vid_buf(vidb); return err; } /** * process_lvol - process the layout volume. * @ubi: UBI device description object * @ai: attaching information * @av: layout volume attaching information * * This function is responsible for reading the layout volume, ensuring it is * not corrupted, and recovering from corruptions if needed. Returns volume * table in case of success and a negative error code in case of failure. */ static struct ubi_vtbl_record *process_lvol(struct ubi_device *ubi, struct ubi_attach_info *ai, struct ubi_ainf_volume *av) { int err; struct rb_node *rb; struct ubi_ainf_peb *aeb; struct ubi_vtbl_record *leb[UBI_LAYOUT_VOLUME_EBS] = { NULL, NULL }; int leb_corrupted[UBI_LAYOUT_VOLUME_EBS] = {1, 1}; /* * UBI goes through the following steps when it changes the layout * volume: * a. erase LEB 0; * b. write new data to LEB 0; * c. erase LEB 1; * d. write new data to LEB 1. * * Before the change, both LEBs contain the same data. * * Due to unclean reboots, the contents of LEB 0 may be lost, but there * should LEB 1. So it is OK if LEB 0 is corrupted while LEB 1 is not. * Similarly, LEB 1 may be lost, but there should be LEB 0. And * finally, unclean reboots may result in a situation when neither LEB * 0 nor LEB 1 are corrupted, but they are different. In this case, LEB * 0 contains more recent information. * * So the plan is to first check LEB 0. Then * a. if LEB 0 is OK, it must be containing the most recent data; then * we compare it with LEB 1, and if they are different, we copy LEB * 0 to LEB 1; * b. if LEB 0 is corrupted, but LEB 1 has to be OK, and we copy LEB 1 * to LEB 0. */ dbg_gen("check layout volume"); /* Read both LEB 0 and LEB 1 into memory */ ubi_rb_for_each_entry(rb, aeb, &av->root, u.rb) { leb[aeb->lnum] = vzalloc(ubi->vtbl_size); if (!leb[aeb->lnum]) { err = -ENOMEM; goto out_free; } err = ubi_io_read_data(ubi, leb[aeb->lnum], aeb->pnum, 0, ubi->vtbl_size); if (err == UBI_IO_BITFLIPS || mtd_is_eccerr(err)) /* * Scrub the PEB later. Note, -EBADMSG indicates an * uncorrectable ECC error, but we have our own CRC and * the data will be checked later. If the data is OK, * the PEB will be scrubbed (because we set * aeb->scrub). If the data is not OK, the contents of * the PEB will be recovered from the second copy, and * aeb->scrub will be cleared in * 'ubi_add_to_av()'. */ aeb->scrub = 1; else if (err) goto out_free; } err = -EINVAL; if (leb[0]) { leb_corrupted[0] = vtbl_check(ubi, leb[0]); if (leb_corrupted[0] < 0) goto out_free; } if (!leb_corrupted[0]) { /* LEB 0 is OK */ if (leb[1]) leb_corrupted[1] = memcmp(leb[0], leb[1], ubi->vtbl_size); if (leb_corrupted[1]) { ubi_warn(ubi, "volume table copy #2 is corrupted"); err = create_vtbl(ubi, ai, 1, leb[0]); if (err) goto out_free; ubi_msg(ubi, "volume table was restored"); } /* Both LEB 1 and LEB 2 are OK and consistent */ vfree(leb[1]); return leb[0]; } else { /* LEB 0 is corrupted or does not exist */ if (leb[1]) { leb_corrupted[1] = vtbl_check(ubi, leb[1]); if (leb_corrupted[1] < 0) goto out_free; } if (leb_corrupted[1]) { /* Both LEB 0 and LEB 1 are corrupted */ ubi_err(ubi, "both volume tables are corrupted"); goto out_free; } ubi_warn(ubi, "volume table copy #1 is corrupted"); err = create_vtbl(ubi, ai, 0, leb[1]); if (err) goto out_free; ubi_msg(ubi, "volume table was restored"); vfree(leb[0]); return leb[1]; } out_free: vfree(leb[0]); vfree(leb[1]); return ERR_PTR(err); } /** * create_empty_lvol - create empty layout volume. * @ubi: UBI device description object * @ai: attaching information * * This function returns volume table contents in case of success and a * negative error code in case of failure. */ static struct ubi_vtbl_record *create_empty_lvol(struct ubi_device *ubi, struct ubi_attach_info *ai) { int i; struct ubi_vtbl_record *vtbl; vtbl = vzalloc(ubi->vtbl_size); if (!vtbl) return ERR_PTR(-ENOMEM); for (i = 0; i < ubi->vtbl_slots; i++) memcpy(&vtbl[i], &empty_vtbl_record, UBI_VTBL_RECORD_SIZE); for (i = 0; i < UBI_LAYOUT_VOLUME_EBS; i++) { int err; err = create_vtbl(ubi, ai, i, vtbl); if (err) { vfree(vtbl); return ERR_PTR(err); } } return vtbl; } /** * init_volumes - initialize volume information for existing volumes. * @ubi: UBI device description object * @ai: scanning information * @vtbl: volume table * * This function allocates volume description objects for existing volumes. * Returns zero in case of success and a negative error code in case of * failure. */ static int init_volumes(struct ubi_device *ubi, const struct ubi_attach_info *ai, const struct ubi_vtbl_record *vtbl) { int i, err, reserved_pebs = 0; struct ubi_ainf_volume *av; struct ubi_volume *vol; for (i = 0; i < ubi->vtbl_slots; i++) { cond_resched(); if (be32_to_cpu(vtbl[i].reserved_pebs) == 0) continue; /* Empty record */ vol = kzalloc(sizeof(struct ubi_volume), GFP_KERNEL); if (!vol) return -ENOMEM; vol->reserved_pebs = be32_to_cpu(vtbl[i].reserved_pebs); vol->alignment = be32_to_cpu(vtbl[i].alignment); vol->data_pad = be32_to_cpu(vtbl[i].data_pad); vol->upd_marker = vtbl[i].upd_marker; vol->vol_type = vtbl[i].vol_type == UBI_VID_DYNAMIC ? UBI_DYNAMIC_VOLUME : UBI_STATIC_VOLUME; vol->name_len = be16_to_cpu(vtbl[i].name_len); vol->usable_leb_size = ubi->leb_size - vol->data_pad; memcpy(vol->name, vtbl[i].name, vol->name_len); vol->name[vol->name_len] = '\0'; vol->vol_id = i; if (vtbl[i].flags & UBI_VTBL_SKIP_CRC_CHECK_FLG) vol->skip_check = 1; if (vtbl[i].flags & UBI_VTBL_AUTORESIZE_FLG) { /* Auto re-size flag may be set only for one volume */ if (ubi->autoresize_vol_id != -1) { ubi_err(ubi, "more than one auto-resize volume (%d and %d)", ubi->autoresize_vol_id, i); kfree(vol); return -EINVAL; } ubi->autoresize_vol_id = i; } ubi_assert(!ubi->volumes[i]); ubi->volumes[i] = vol; ubi->vol_count += 1; vol->ubi = ubi; reserved_pebs += vol->reserved_pebs; /* * We use ubi->peb_count and not vol->reserved_pebs because * we want to keep the code simple. Otherwise we'd have to * resize/check the bitmap upon volume resize too. * Allocating a few bytes more does not hurt. */ err = ubi_fastmap_init_checkmap(vol, ubi->peb_count); if (err) return err; /* * In case of dynamic volume UBI knows nothing about how many * data is stored there. So assume the whole volume is used. */ if (vol->vol_type == UBI_DYNAMIC_VOLUME) { vol->used_ebs = vol->reserved_pebs; vol->last_eb_bytes = vol->usable_leb_size; vol->used_bytes = (long long)vol->used_ebs * vol->usable_leb_size; continue; } /* Static volumes only */ av = ubi_find_av(ai, i); if (!av || !av->leb_count) { /* * No eraseblocks belonging to this volume found. We * don't actually know whether this static volume is * completely corrupted or just contains no data. And * we cannot know this as long as data size is not * stored on flash. So we just assume the volume is * empty. FIXME: this should be handled. */ continue; } if (av->leb_count != av->used_ebs) { /* * We found a static volume which misses several * eraseblocks. Treat it as corrupted. */ ubi_warn(ubi, "static volume %d misses %d LEBs - corrupted", av->vol_id, av->used_ebs - av->leb_count); vol->corrupted = 1; continue; } vol->used_ebs = av->used_ebs; vol->used_bytes = (long long)(vol->used_ebs - 1) * vol->usable_leb_size; vol->used_bytes += av->last_data_size; vol->last_eb_bytes = av->last_data_size; } /* And add the layout volume */ vol = kzalloc(sizeof(struct ubi_volume), GFP_KERNEL); if (!vol) return -ENOMEM; vol->reserved_pebs = UBI_LAYOUT_VOLUME_EBS; vol->alignment = UBI_LAYOUT_VOLUME_ALIGN; vol->vol_type = UBI_DYNAMIC_VOLUME; vol->name_len = sizeof(UBI_LAYOUT_VOLUME_NAME) - 1; memcpy(vol->name, UBI_LAYOUT_VOLUME_NAME, vol->name_len + 1); vol->usable_leb_size = ubi->leb_size; vol->used_ebs = vol->reserved_pebs; vol->last_eb_bytes = vol->reserved_pebs; vol->used_bytes = (long long)vol->used_ebs * (ubi->leb_size - vol->data_pad); vol->vol_id = UBI_LAYOUT_VOLUME_ID; vol->ref_count = 1; ubi_assert(!ubi->volumes[i]); ubi->volumes[vol_id2idx(ubi, vol->vol_id)] = vol; reserved_pebs += vol->reserved_pebs; ubi->vol_count += 1; vol->ubi = ubi; err = ubi_fastmap_init_checkmap(vol, UBI_LAYOUT_VOLUME_EBS); if (err) return err; if (reserved_pebs > ubi->avail_pebs) { ubi_err(ubi, "not enough PEBs, required %d, available %d", reserved_pebs, ubi->avail_pebs); if (ubi->corr_peb_count) ubi_err(ubi, "%d PEBs are corrupted and not used", ubi->corr_peb_count); return -ENOSPC; } ubi->rsvd_pebs += reserved_pebs; ubi->avail_pebs -= reserved_pebs; return 0; } /** * check_av - check volume attaching information. * @vol: UBI volume description object * @av: volume attaching information * * This function returns zero if the volume attaching information is consistent * to the data read from the volume tabla, and %-EINVAL if not. */ static int check_av(const struct ubi_volume *vol, const struct ubi_ainf_volume *av) { int err; if (av->highest_lnum >= vol->reserved_pebs) { err = 1; goto bad; } if (av->leb_count > vol->reserved_pebs) { err = 2; goto bad; } if (av->vol_type != vol->vol_type) { err = 3; goto bad; } if (av->used_ebs > vol->reserved_pebs) { err = 4; goto bad; } if (av->data_pad != vol->data_pad) { err = 5; goto bad; } return 0; bad: ubi_err(vol->ubi, "bad attaching information, error %d", err); ubi_dump_av(av); ubi_dump_vol_info(vol); return -EINVAL; } /** * check_attaching_info - check that attaching information. * @ubi: UBI device description object * @ai: attaching information * * Even though we protect on-flash data by CRC checksums, we still don't trust * the media. This function ensures that attaching information is consistent to * the information read from the volume table. Returns zero if the attaching * information is OK and %-EINVAL if it is not. */ static int check_attaching_info(const struct ubi_device *ubi, struct ubi_attach_info *ai) { int err, i; struct ubi_ainf_volume *av; struct ubi_volume *vol; if (ai->vols_found > UBI_INT_VOL_COUNT + ubi->vtbl_slots) { ubi_err(ubi, "found %d volumes while attaching, maximum is %d + %d", ai->vols_found, UBI_INT_VOL_COUNT, ubi->vtbl_slots); return -EINVAL; } if (ai->highest_vol_id >= ubi->vtbl_slots + UBI_INT_VOL_COUNT && ai->highest_vol_id < UBI_INTERNAL_VOL_START) { ubi_err(ubi, "too large volume ID %d found", ai->highest_vol_id); return -EINVAL; } for (i = 0; i < ubi->vtbl_slots + UBI_INT_VOL_COUNT; i++) { cond_resched(); av = ubi_find_av(ai, i); vol = ubi->volumes[i]; if (!vol) { if (av) ubi_remove_av(ai, av); continue; } if (vol->reserved_pebs == 0) { ubi_assert(i < ubi->vtbl_slots); if (!av) continue; /* * During attaching we found a volume which does not * exist according to the information in the volume * table. This must have happened due to an unclean * reboot while the volume was being removed. Discard * these eraseblocks. */ ubi_msg(ubi, "finish volume %d removal", av->vol_id); ubi_remove_av(ai, av); } else if (av) { err = check_av(vol, av); if (err) return err; } } return 0; } /** * ubi_read_volume_table - read the volume table. * @ubi: UBI device description object * @ai: attaching information * * This function reads volume table, checks it, recover from errors if needed, * or creates it if needed. Returns zero in case of success and a negative * error code in case of failure. */ int ubi_read_volume_table(struct ubi_device *ubi, struct ubi_attach_info *ai) { int err; struct ubi_ainf_volume *av; empty_vtbl_record.crc = cpu_to_be32(0xf116c36b); /* * The number of supported volumes is limited by the eraseblock size * and by the UBI_MAX_VOLUMES constant. */ if (ubi->leb_size < UBI_VTBL_RECORD_SIZE) { ubi_err(ubi, "LEB size too small for a volume record"); return -EINVAL; } ubi->vtbl_slots = ubi->leb_size / UBI_VTBL_RECORD_SIZE; if (ubi->vtbl_slots > UBI_MAX_VOLUMES) ubi->vtbl_slots = UBI_MAX_VOLUMES; ubi->vtbl_size = ubi->vtbl_slots * UBI_VTBL_RECORD_SIZE; ubi->vtbl_size = ALIGN(ubi->vtbl_size, ubi->min_io_size); av = ubi_find_av(ai, UBI_LAYOUT_VOLUME_ID); if (!av) { /* * No logical eraseblocks belonging to the layout volume were * found. This could mean that the flash is just empty. In * this case we create empty layout volume. * * But if flash is not empty this must be a corruption or the * MTD device just contains garbage. */ if (ai->is_empty) { ubi->vtbl = create_empty_lvol(ubi, ai); if (IS_ERR(ubi->vtbl)) return PTR_ERR(ubi->vtbl); } else { ubi_err(ubi, "the layout volume was not found"); return -EINVAL; } } else { if (av->leb_count > UBI_LAYOUT_VOLUME_EBS) { /* This must not happen with proper UBI images */ ubi_err(ubi, "too many LEBs (%d) in layout volume", av->leb_count); return -EINVAL; } ubi->vtbl = process_lvol(ubi, ai, av); if (IS_ERR(ubi->vtbl)) return PTR_ERR(ubi->vtbl); } ubi->avail_pebs = ubi->good_peb_count - ubi->corr_peb_count; /* * The layout volume is OK, initialize the corresponding in-RAM data * structures. */ err = init_volumes(ubi, ai, ubi->vtbl); if (err) goto out_free; /* * Make sure that the attaching information is consistent to the * information stored in the volume table. */ err = check_attaching_info(ubi, ai); if (err) goto out_free; return 0; out_free: vfree(ubi->vtbl); ubi_free_all_volumes(ubi); return err; } /** * self_vtbl_check - check volume table. * @ubi: UBI device description object */ static void self_vtbl_check(const struct ubi_device *ubi) { if (!ubi_dbg_chk_gen(ubi)) return; if (vtbl_check(ubi, ubi->vtbl)) { ubi_err(ubi, "self-check failed"); BUG(); } } |
| 190 125 128 4 176 7 32 113 113 31 113 31 113 83 141 141 141 141 141 141 1 1 172 172 70 171 172 70 1 70 172 1 1 172 172 172 54 172 3 172 1 1 172 141 124 1 124 125 125 125 145 4 4 4 5 5 593 588 589 590 588 3 36 113 113 113 128 128 128 3 69 128 128 36 113 36 113 128 589 439 580 588 234 203 168 95 34 96 107 9 33 106 1 3 2 145 145 66 107 42 75 145 107 145 55 107 107 49 64 4 4 104 16 97 4 3 106 30 52 35 4 36 3 2 1 3 3 144 144 3 3 96 96 172 172 172 172 172 172 171 170 54 172 53 171 113 72 172 172 71 113 113 113 112 113 113 113 113 113 113 113 113 172 172 172 172 3 3 114 100 72 113 113 378 377 467 140 378 378 378 376 378 378 378 378 378 378 378 378 378 378 377 718 723 723 723 130 2 377 377 378 378 378 377 378 377 378 377 378 377 378 378 377 378 373 360 373 373 373 377 378 378 2 378 378 378 377 378 378 377 378 176 175 175 176 171 172 171 172 47 47 2 46 172 159 172 158 17 171 176 176 176 175 176 176 175 66 66 66 7 8 8 91 92 91 91 74 23 59 11 21 60 74 92 15 576 440 442 575 439 443 577 577 575 588 589 83 83 83 83 83 83 391 393 393 393 393 393 393 725 726 579 659 657 721 723 579 657 23 659 95 95 594 580 577 95 95 578 95 93 578 579 578 593 594 182 580 578 580 580 577 579 579 91 580 578 580 578 579 726 730 728 575 747 462 152 739 663 594 688 7 309 536 498 274 139 466 467 466 466 140 140 140 139 139 140 502 503 505 505 501 465 467 467 467 467 327 326 328 328 328 586 589 586 587 2 2 2 458 187 328 328 328 328 328 328 458 458 731 732 732 432 433 431 3 3 3 3 3 553 552 553 198 198 198 619 619 502 434 434 434 433 434 610 471 471 471 472 475 476 476 475 476 196 196 195 196 196 196 4 4 4 4 4 4 4 3 3 4 4 1 473 459 385 473 470 473 24 8 24 7 7 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 3422 3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434 3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456 3457 3458 3459 3460 3461 3462 3463 3464 3465 3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479 3480 3481 3482 3483 3484 3485 3486 3487 3488 3489 3490 3491 3492 3493 3494 3495 3496 3497 3498 3499 3500 3501 3502 3503 3504 3505 3506 3507 3508 3509 3510 3511 3512 3513 3514 3515 3516 3517 3518 3519 3520 3521 3522 3523 3524 3525 3526 3527 3528 3529 3530 3531 3532 3533 3534 3535 3536 3537 3538 3539 3540 3541 3542 3543 3544 3545 3546 3547 3548 3549 3550 3551 3552 3553 3554 3555 3556 3557 3558 3559 3560 3561 3562 3563 3564 3565 3566 3567 3568 3569 3570 3571 3572 3573 3574 3575 3576 3577 3578 3579 3580 3581 3582 3583 3584 3585 3586 3587 3588 3589 3590 3591 3592 3593 3594 3595 3596 3597 3598 3599 3600 3601 3602 3603 3604 3605 3606 3607 3608 3609 3610 3611 3612 3613 3614 3615 3616 3617 3618 3619 3620 3621 3622 3623 3624 3625 3626 3627 3628 3629 3630 3631 3632 3633 3634 3635 3636 3637 3638 3639 3640 3641 3642 3643 3644 3645 3646 3647 3648 3649 3650 3651 3652 3653 3654 3655 3656 3657 3658 3659 3660 3661 3662 3663 3664 3665 3666 3667 3668 3669 3670 3671 3672 3673 3674 3675 3676 3677 3678 3679 3680 3681 3682 3683 3684 3685 3686 3687 3688 3689 3690 3691 3692 3693 3694 3695 3696 3697 3698 3699 3700 3701 3702 3703 3704 3705 3706 3707 3708 3709 3710 3711 3712 3713 3714 3715 3716 3717 3718 3719 3720 3721 3722 3723 3724 3725 3726 3727 3728 3729 3730 3731 3732 3733 3734 3735 3736 3737 3738 3739 3740 3741 3742 3743 3744 3745 3746 3747 3748 3749 3750 3751 3752 3753 3754 3755 3756 3757 3758 3759 3760 3761 3762 3763 3764 3765 3766 3767 3768 3769 3770 3771 3772 3773 3774 3775 3776 3777 3778 3779 3780 3781 3782 3783 3784 3785 3786 3787 3788 3789 3790 3791 3792 3793 3794 3795 3796 3797 3798 3799 3800 3801 3802 3803 3804 3805 3806 3807 3808 3809 3810 3811 3812 3813 3814 3815 3816 3817 3818 3819 3820 3821 3822 3823 3824 3825 3826 3827 3828 3829 3830 3831 3832 3833 3834 3835 3836 3837 3838 3839 3840 3841 3842 3843 3844 3845 3846 3847 3848 3849 3850 3851 3852 3853 3854 3855 3856 3857 3858 3859 3860 3861 3862 3863 3864 3865 3866 3867 3868 3869 3870 3871 3872 3873 3874 3875 3876 3877 3878 3879 3880 3881 3882 3883 3884 3885 3886 3887 3888 3889 3890 3891 3892 3893 3894 3895 3896 3897 3898 3899 3900 3901 3902 3903 3904 3905 3906 3907 3908 3909 3910 3911 3912 3913 3914 3915 3916 3917 3918 3919 3920 3921 3922 3923 3924 3925 3926 3927 3928 3929 3930 3931 3932 3933 3934 3935 3936 3937 3938 3939 3940 3941 3942 3943 3944 3945 3946 3947 3948 3949 3950 3951 3952 3953 3954 3955 3956 3957 3958 3959 3960 3961 3962 3963 3964 3965 3966 3967 3968 3969 3970 3971 3972 3973 3974 3975 3976 3977 3978 3979 3980 3981 3982 3983 3984 3985 3986 3987 3988 3989 3990 3991 3992 3993 3994 3995 3996 3997 3998 3999 4000 4001 4002 4003 4004 4005 4006 4007 4008 4009 4010 4011 4012 4013 4014 4015 4016 4017 4018 4019 4020 4021 4022 4023 4024 4025 4026 4027 4028 4029 4030 4031 4032 4033 4034 4035 4036 4037 4038 4039 4040 4041 4042 4043 4044 4045 4046 4047 4048 4049 4050 4051 4052 4053 4054 4055 4056 4057 4058 4059 4060 4061 4062 4063 4064 4065 4066 4067 4068 4069 4070 4071 4072 4073 4074 4075 4076 4077 4078 4079 4080 4081 4082 4083 4084 4085 4086 4087 4088 4089 4090 4091 4092 4093 4094 4095 4096 4097 4098 4099 4100 4101 4102 4103 4104 4105 4106 4107 4108 4109 4110 4111 4112 4113 4114 4115 4116 4117 4118 4119 4120 4121 4122 4123 4124 4125 4126 4127 4128 4129 4130 4131 4132 4133 4134 4135 4136 4137 4138 4139 4140 4141 4142 4143 4144 4145 4146 4147 4148 4149 4150 4151 4152 4153 4154 4155 4156 4157 4158 4159 4160 4161 4162 4163 4164 4165 4166 4167 4168 4169 4170 4171 4172 4173 4174 4175 4176 4177 4178 4179 4180 4181 4182 4183 4184 4185 4186 4187 4188 4189 4190 4191 4192 4193 4194 4195 4196 4197 4198 4199 4200 4201 4202 4203 4204 4205 4206 4207 4208 4209 4210 4211 4212 4213 4214 4215 4216 4217 4218 4219 4220 4221 4222 4223 4224 4225 4226 4227 4228 4229 4230 4231 4232 4233 4234 4235 4236 4237 4238 4239 4240 4241 4242 4243 4244 4245 4246 4247 4248 4249 4250 4251 4252 4253 4254 4255 4256 4257 4258 4259 4260 4261 4262 4263 4264 4265 4266 4267 4268 4269 4270 4271 4272 4273 4274 4275 4276 4277 4278 4279 4280 4281 4282 4283 4284 4285 4286 4287 4288 4289 4290 4291 4292 4293 4294 4295 4296 4297 4298 4299 4300 4301 4302 4303 4304 4305 4306 4307 4308 4309 4310 4311 4312 4313 4314 4315 4316 4317 4318 4319 4320 4321 4322 4323 4324 4325 4326 4327 4328 4329 4330 4331 4332 4333 4334 4335 4336 4337 4338 4339 4340 4341 4342 4343 4344 4345 4346 4347 4348 4349 4350 4351 4352 4353 4354 4355 4356 4357 4358 4359 4360 4361 4362 4363 4364 4365 4366 4367 4368 4369 4370 4371 4372 4373 4374 4375 4376 4377 4378 4379 4380 4381 4382 4383 4384 4385 4386 4387 4388 4389 4390 4391 4392 4393 4394 4395 4396 4397 4398 4399 4400 4401 4402 4403 4404 4405 4406 4407 4408 4409 4410 4411 4412 4413 4414 4415 4416 4417 4418 4419 4420 4421 4422 4423 4424 4425 4426 4427 4428 4429 4430 4431 4432 4433 4434 4435 4436 4437 4438 4439 4440 4441 4442 4443 4444 4445 4446 4447 4448 4449 4450 4451 4452 4453 4454 4455 4456 4457 4458 4459 4460 4461 4462 4463 4464 4465 4466 4467 4468 4469 4470 4471 4472 4473 4474 4475 4476 4477 4478 4479 4480 4481 4482 4483 4484 4485 4486 4487 4488 4489 4490 4491 4492 4493 4494 4495 4496 | // SPDX-License-Identifier: GPL-2.0 #include <linux/bitops.h> #include <linux/slab.h> #include <linux/bio.h> #include <linux/mm.h> #include <linux/pagemap.h> #include <linux/page-flags.h> #include <linux/sched/mm.h> #include <linux/spinlock.h> #include <linux/blkdev.h> #include <linux/swap.h> #include <linux/writeback.h> #include <linux/pagevec.h> #include <linux/prefetch.h> #include <linux/fsverity.h> #include "extent_io.h" #include "extent-io-tree.h" #include "extent_map.h" #include "ctree.h" #include "btrfs_inode.h" #include "bio.h" #include "locking.h" #include "backref.h" #include "disk-io.h" #include "subpage.h" #include "zoned.h" #include "block-group.h" #include "compression.h" #include "fs.h" #include "accessors.h" #include "file-item.h" #include "file.h" #include "dev-replace.h" #include "super.h" #include "transaction.h" static struct kmem_cache *extent_buffer_cache; #ifdef CONFIG_BTRFS_DEBUG static inline void btrfs_leak_debug_add_eb(struct extent_buffer *eb) { struct btrfs_fs_info *fs_info = eb->fs_info; unsigned long flags; spin_lock_irqsave(&fs_info->eb_leak_lock, flags); list_add(&eb->leak_list, &fs_info->allocated_ebs); spin_unlock_irqrestore(&fs_info->eb_leak_lock, flags); } static inline void btrfs_leak_debug_del_eb(struct extent_buffer *eb) { struct btrfs_fs_info *fs_info = eb->fs_info; unsigned long flags; spin_lock_irqsave(&fs_info->eb_leak_lock, flags); list_del(&eb->leak_list); spin_unlock_irqrestore(&fs_info->eb_leak_lock, flags); } void btrfs_extent_buffer_leak_debug_check(struct btrfs_fs_info *fs_info) { struct extent_buffer *eb; unsigned long flags; /* * If we didn't get into open_ctree our allocated_ebs will not be * initialized, so just skip this. */ if (!fs_info->allocated_ebs.next) return; WARN_ON(!list_empty(&fs_info->allocated_ebs)); spin_lock_irqsave(&fs_info->eb_leak_lock, flags); while (!list_empty(&fs_info->allocated_ebs)) { eb = list_first_entry(&fs_info->allocated_ebs, struct extent_buffer, leak_list); btrfs_err(fs_info, "buffer leak start %llu len %u refs %d bflags %lu owner %llu", eb->start, eb->len, refcount_read(&eb->refs), eb->bflags, btrfs_header_owner(eb)); list_del(&eb->leak_list); WARN_ON_ONCE(1); kmem_cache_free(extent_buffer_cache, eb); } spin_unlock_irqrestore(&fs_info->eb_leak_lock, flags); } #else #define btrfs_leak_debug_add_eb(eb) do {} while (0) #define btrfs_leak_debug_del_eb(eb) do {} while (0) #endif /* * Structure to record info about the bio being assembled, and other info like * how many bytes are there before stripe/ordered extent boundary. */ struct btrfs_bio_ctrl { struct btrfs_bio *bbio; /* Last byte contained in bbio + 1 . */ loff_t next_file_offset; enum btrfs_compression_type compress_type; u32 len_to_oe_boundary; blk_opf_t opf; btrfs_bio_end_io_t end_io_func; struct writeback_control *wbc; /* * The sectors of the page which are going to be submitted by * extent_writepage_io(). * This is to avoid touching ranges covered by compression/inline. */ unsigned long submit_bitmap; struct readahead_control *ractl; }; static void submit_one_bio(struct btrfs_bio_ctrl *bio_ctrl) { struct btrfs_bio *bbio = bio_ctrl->bbio; if (!bbio) return; /* Caller should ensure the bio has at least some range added */ ASSERT(bbio->bio.bi_iter.bi_size); if (btrfs_op(&bbio->bio) == BTRFS_MAP_READ && bio_ctrl->compress_type != BTRFS_COMPRESS_NONE) btrfs_submit_compressed_read(bbio); else btrfs_submit_bbio(bbio, 0); /* The bbio is owned by the end_io handler now */ bio_ctrl->bbio = NULL; } /* * Submit or fail the current bio in the bio_ctrl structure. */ static void submit_write_bio(struct btrfs_bio_ctrl *bio_ctrl, int ret) { struct btrfs_bio *bbio = bio_ctrl->bbio; if (!bbio) return; if (ret) { ASSERT(ret < 0); btrfs_bio_end_io(bbio, errno_to_blk_status(ret)); /* The bio is owned by the end_io handler now */ bio_ctrl->bbio = NULL; } else { submit_one_bio(bio_ctrl); } } int __init extent_buffer_init_cachep(void) { extent_buffer_cache = kmem_cache_create("btrfs_extent_buffer", sizeof(struct extent_buffer), 0, 0, NULL); if (!extent_buffer_cache) return -ENOMEM; return 0; } void __cold extent_buffer_free_cachep(void) { /* * Make sure all delayed rcu free are flushed before we * destroy caches. */ rcu_barrier(); kmem_cache_destroy(extent_buffer_cache); } static void process_one_folio(struct btrfs_fs_info *fs_info, struct folio *folio, const struct folio *locked_folio, unsigned long page_ops, u64 start, u64 end) { u32 len; ASSERT(end + 1 - start != 0 && end + 1 - start < U32_MAX); len = end + 1 - start; if (page_ops & PAGE_SET_ORDERED) btrfs_folio_clamp_set_ordered(fs_info, folio, start, len); if (page_ops & PAGE_START_WRITEBACK) { btrfs_folio_clamp_clear_dirty(fs_info, folio, start, len); btrfs_folio_clamp_set_writeback(fs_info, folio, start, len); } if (page_ops & PAGE_END_WRITEBACK) btrfs_folio_clamp_clear_writeback(fs_info, folio, start, len); if (folio != locked_folio && (page_ops & PAGE_UNLOCK)) btrfs_folio_end_lock(fs_info, folio, start, len); } static void __process_folios_contig(struct address_space *mapping, const struct folio *locked_folio, u64 start, u64 end, unsigned long page_ops) { struct btrfs_fs_info *fs_info = inode_to_fs_info(mapping->host); pgoff_t index = start >> PAGE_SHIFT; pgoff_t end_index = end >> PAGE_SHIFT; struct folio_batch fbatch; int i; folio_batch_init(&fbatch); while (index <= end_index) { int found_folios; found_folios = filemap_get_folios_contig(mapping, &index, end_index, &fbatch); for (i = 0; i < found_folios; i++) { struct folio *folio = fbatch.folios[i]; process_one_folio(fs_info, folio, locked_folio, page_ops, start, end); } folio_batch_release(&fbatch); cond_resched(); } } static noinline void unlock_delalloc_folio(const struct inode *inode, struct folio *locked_folio, u64 start, u64 end) { ASSERT(locked_folio); __process_folios_contig(inode->i_mapping, locked_folio, start, end, PAGE_UNLOCK); } static noinline int lock_delalloc_folios(struct inode *inode, struct folio *locked_folio, u64 start, u64 end) { struct btrfs_fs_info *fs_info = inode_to_fs_info(inode); struct address_space *mapping = inode->i_mapping; pgoff_t index = start >> PAGE_SHIFT; pgoff_t end_index = end >> PAGE_SHIFT; u64 processed_end = start; struct folio_batch fbatch; folio_batch_init(&fbatch); while (index <= end_index) { unsigned int found_folios, i; found_folios = filemap_get_folios_contig(mapping, &index, end_index, &fbatch); if (found_folios == 0) goto out; for (i = 0; i < found_folios; i++) { struct folio *folio = fbatch.folios[i]; u64 range_start; u32 range_len; if (folio == locked_folio) continue; folio_lock(folio); if (!folio_test_dirty(folio) || folio->mapping != mapping) { folio_unlock(folio); goto out; } range_start = max_t(u64, folio_pos(folio), start); range_len = min_t(u64, folio_end(folio), end + 1) - range_start; btrfs_folio_set_lock(fs_info, folio, range_start, range_len); processed_end = range_start + range_len - 1; } folio_batch_release(&fbatch); cond_resched(); } return 0; out: folio_batch_release(&fbatch); if (processed_end > start) unlock_delalloc_folio(inode, locked_folio, start, processed_end); return -EAGAIN; } /* * Find and lock a contiguous range of bytes in the file marked as delalloc, no * more than @max_bytes. * * @start: The original start bytenr to search. * Will store the extent range start bytenr. * @end: The original end bytenr of the search range * Will store the extent range end bytenr. * * Return true if we find a delalloc range which starts inside the original * range, and @start/@end will store the delalloc range start/end. * * Return false if we can't find any delalloc range which starts inside the * original range, and @start/@end will be the non-delalloc range start/end. */ EXPORT_FOR_TESTS noinline_for_stack bool find_lock_delalloc_range(struct inode *inode, struct folio *locked_folio, u64 *start, u64 *end) { struct btrfs_fs_info *fs_info = inode_to_fs_info(inode); struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree; const u64 orig_start = *start; const u64 orig_end = *end; /* The sanity tests may not set a valid fs_info. */ u64 max_bytes = fs_info ? fs_info->max_extent_size : BTRFS_MAX_EXTENT_SIZE; u64 delalloc_start; u64 delalloc_end; bool found; struct extent_state *cached_state = NULL; int ret; int loops = 0; /* Caller should pass a valid @end to indicate the search range end */ ASSERT(orig_end > orig_start); /* The range should at least cover part of the folio */ ASSERT(!(orig_start >= folio_end(locked_folio) || orig_end <= folio_pos(locked_folio))); again: /* step one, find a bunch of delalloc bytes starting at start */ delalloc_start = *start; delalloc_end = 0; found = btrfs_find_delalloc_range(tree, &delalloc_start, &delalloc_end, max_bytes, &cached_state); if (!found || delalloc_end <= *start || delalloc_start > orig_end) { *start = delalloc_start; /* @delalloc_end can be -1, never go beyond @orig_end */ *end = min(delalloc_end, orig_end); btrfs_free_extent_state(cached_state); return false; } /* * start comes from the offset of locked_folio. We have to lock * folios in order, so we can't process delalloc bytes before * locked_folio */ if (delalloc_start < *start) delalloc_start = *start; /* * make sure to limit the number of folios we try to lock down */ if (delalloc_end + 1 - delalloc_start > max_bytes) delalloc_end = delalloc_start + max_bytes - 1; /* step two, lock all the folioss after the folios that has start */ ret = lock_delalloc_folios(inode, locked_folio, delalloc_start, delalloc_end); ASSERT(!ret || ret == -EAGAIN); if (ret == -EAGAIN) { /* some of the folios are gone, lets avoid looping by * shortening the size of the delalloc range we're searching */ btrfs_free_extent_state(cached_state); cached_state = NULL; if (!loops) { max_bytes = PAGE_SIZE; loops = 1; goto again; } else { found = false; goto out_failed; } } /* step three, lock the state bits for the whole range */ btrfs_lock_extent(tree, delalloc_start, delalloc_end, &cached_state); /* then test to make sure it is all still delalloc */ ret = btrfs_test_range_bit(tree, delalloc_start, delalloc_end, EXTENT_DELALLOC, cached_state); btrfs_unlock_extent(tree, delalloc_start, delalloc_end, &cached_state); if (!ret) { unlock_delalloc_folio(inode, locked_folio, delalloc_start, delalloc_end); cond_resched(); goto again; } *start = delalloc_start; *end = delalloc_end; out_failed: return found; } void extent_clear_unlock_delalloc(struct btrfs_inode *inode, u64 start, u64 end, const struct folio *locked_folio, struct extent_state **cached, u32 clear_bits, unsigned long page_ops) { btrfs_clear_extent_bit(&inode->io_tree, start, end, clear_bits, cached); __process_folios_contig(inode->vfs_inode.i_mapping, locked_folio, start, end, page_ops); } static bool btrfs_verify_folio(struct folio *folio, u64 start, u32 len) { struct btrfs_fs_info *fs_info = folio_to_fs_info(folio); if (!fsverity_active(folio->mapping->host) || btrfs_folio_test_uptodate(fs_info, folio, start, len) || start >= i_size_read(folio->mapping->host)) return true; return fsverity_verify_folio(folio); } static void end_folio_read(struct folio *folio, bool uptodate, u64 start, u32 len) { struct btrfs_fs_info *fs_info = folio_to_fs_info(folio); ASSERT(folio_pos(folio) <= start && start + len <= folio_end(folio)); if (uptodate && btrfs_verify_folio(folio, start, len)) btrfs_folio_set_uptodate(fs_info, folio, start, len); else btrfs_folio_clear_uptodate(fs_info, folio, start, len); if (!btrfs_is_subpage(fs_info, folio)) folio_unlock(folio); else btrfs_folio_end_lock(fs_info, folio, start, len); } /* * After a write IO is done, we need to: * * - clear the uptodate bits on error * - clear the writeback bits in the extent tree for the range * - filio_end_writeback() if there is no more pending io for the folio * * Scheduling is not allowed, so the extent state tree is expected * to have one and only one object corresponding to this IO. */ static void end_bbio_data_write(struct btrfs_bio *bbio) { struct btrfs_fs_info *fs_info = bbio->fs_info; struct bio *bio = &bbio->bio; int error = blk_status_to_errno(bio->bi_status); struct folio_iter fi; const u32 sectorsize = fs_info->sectorsize; ASSERT(!bio_flagged(bio, BIO_CLONED)); bio_for_each_folio_all(fi, bio) { struct folio *folio = fi.folio; u64 start = folio_pos(folio) + fi.offset; u32 len = fi.length; /* Our read/write should always be sector aligned. */ if (!IS_ALIGNED(fi.offset, sectorsize)) btrfs_err(fs_info, "partial page write in btrfs with offset %zu and length %zu", fi.offset, fi.length); else if (!IS_ALIGNED(fi.length, sectorsize)) btrfs_info(fs_info, "incomplete page write with offset %zu and length %zu", fi.offset, fi.length); btrfs_finish_ordered_extent(bbio->ordered, folio, start, len, !error); if (error) mapping_set_error(folio->mapping, error); btrfs_folio_clear_writeback(fs_info, folio, start, len); } bio_put(bio); } static void begin_folio_read(struct btrfs_fs_info *fs_info, struct folio *folio) { ASSERT(folio_test_locked(folio)); if (!btrfs_is_subpage(fs_info, folio)) return; ASSERT(folio_test_private(folio)); btrfs_folio_set_lock(fs_info, folio, folio_pos(folio), folio_size(folio)); } /* * After a data read IO is done, we need to: * * - clear the uptodate bits on error * - set the uptodate bits if things worked * - set the folio up to date if all extents in the tree are uptodate * - clear the lock bit in the extent tree * - unlock the folio if there are no other extents locked for it * * Scheduling is not allowed, so the extent state tree is expected * to have one and only one object corresponding to this IO. */ static void end_bbio_data_read(struct btrfs_bio *bbio) { struct btrfs_fs_info *fs_info = bbio->fs_info; struct bio *bio = &bbio->bio; struct folio_iter fi; ASSERT(!bio_flagged(bio, BIO_CLONED)); bio_for_each_folio_all(fi, &bbio->bio) { bool uptodate = !bio->bi_status; struct folio *folio = fi.folio; struct inode *inode = folio->mapping->host; u64 start = folio_pos(folio) + fi.offset; btrfs_debug(fs_info, "%s: bi_sector=%llu, err=%d, mirror=%u", __func__, bio->bi_iter.bi_sector, bio->bi_status, bbio->mirror_num); if (likely(uptodate)) { u64 end = start + fi.length - 1; loff_t i_size = i_size_read(inode); /* * Zero out the remaining part if this range straddles * i_size. * * Here we should only zero the range inside the folio, * not touch anything else. * * NOTE: i_size is exclusive while end is inclusive and * folio_contains() takes PAGE_SIZE units. */ if (folio_contains(folio, i_size >> PAGE_SHIFT) && i_size <= end) { u32 zero_start = max(offset_in_folio(folio, i_size), offset_in_folio(folio, start)); u32 zero_len = offset_in_folio(folio, end) + 1 - zero_start; folio_zero_range(folio, zero_start, zero_len); } } /* Update page status and unlock. */ end_folio_read(folio, uptodate, start, fi.length); } bio_put(bio); } /* * Populate every free slot in a provided array with folios using GFP_NOFS. * * @nr_folios: number of folios to allocate * @folio_array: the array to fill with folios; any existing non-NULL entries in * the array will be skipped * * Return: 0 if all folios were able to be allocated; * -ENOMEM otherwise, the partially allocated folios would be freed and * the array slots zeroed */ int btrfs_alloc_folio_array(unsigned int nr_folios, struct folio **folio_array) { for (int i = 0; i < nr_folios; i++) { if (folio_array[i]) continue; folio_array[i] = folio_alloc(GFP_NOFS, 0); if (!folio_array[i]) goto error; } return 0; error: for (int i = 0; i < nr_folios; i++) { if (folio_array[i]) folio_put(folio_array[i]); } return -ENOMEM; } /* * Populate every free slot in a provided array with pages, using GFP_NOFS. * * @nr_pages: number of pages to allocate * @page_array: the array to fill with pages; any existing non-null entries in * the array will be skipped * @nofail: whether using __GFP_NOFAIL flag * * Return: 0 if all pages were able to be allocated; * -ENOMEM otherwise, the partially allocated pages would be freed and * the array slots zeroed */ int btrfs_alloc_page_array(unsigned int nr_pages, struct page **page_array, bool nofail) { const gfp_t gfp = nofail ? (GFP_NOFS | __GFP_NOFAIL) : GFP_NOFS; unsigned int allocated; for (allocated = 0; allocated < nr_pages;) { unsigned int last = allocated; allocated = alloc_pages_bulk(gfp, nr_pages, page_array); if (unlikely(allocated == last)) { /* No progress, fail and do cleanup. */ for (int i = 0; i < allocated; i++) { __free_page(page_array[i]); page_array[i] = NULL; } return -ENOMEM; } } return 0; } /* * Populate needed folios for the extent buffer. * * For now, the folios populated are always in order 0 (aka, single page). */ static int alloc_eb_folio_array(struct extent_buffer *eb, bool nofail) { struct page *page_array[INLINE_EXTENT_BUFFER_PAGES] = { 0 }; int num_pages = num_extent_pages(eb); int ret; ret = btrfs_alloc_page_array(num_pages, page_array, nofail); if (ret < 0) return ret; for (int i = 0; i < num_pages; i++) eb->folios[i] = page_folio(page_array[i]); eb->folio_size = PAGE_SIZE; eb->folio_shift = PAGE_SHIFT; return 0; } static bool btrfs_bio_is_contig(struct btrfs_bio_ctrl *bio_ctrl, u64 disk_bytenr, loff_t file_offset) { struct bio *bio = &bio_ctrl->bbio->bio; const sector_t sector = disk_bytenr >> SECTOR_SHIFT; if (bio_ctrl->compress_type != BTRFS_COMPRESS_NONE) { /* * For compression, all IO should have its logical bytenr set * to the starting bytenr of the compressed extent. */ return bio->bi_iter.bi_sector == sector; } /* * To merge into a bio both the disk sector and the logical offset in * the file need to be contiguous. */ return bio_ctrl->next_file_offset == file_offset && bio_end_sector(bio) == sector; } static void alloc_new_bio(struct btrfs_inode *inode, struct btrfs_bio_ctrl *bio_ctrl, u64 disk_bytenr, u64 file_offset) { struct btrfs_fs_info *fs_info = inode->root->fs_info; struct btrfs_bio *bbio; bbio = btrfs_bio_alloc(BIO_MAX_VECS, bio_ctrl->opf, fs_info, bio_ctrl->end_io_func, NULL); bbio->bio.bi_iter.bi_sector = disk_bytenr >> SECTOR_SHIFT; bbio->bio.bi_write_hint = inode->vfs_inode.i_write_hint; bbio->inode = inode; bbio->file_offset = file_offset; bio_ctrl->bbio = bbio; bio_ctrl->len_to_oe_boundary = U32_MAX; bio_ctrl->next_file_offset = file_offset; /* Limit data write bios to the ordered boundary. */ if (bio_ctrl->wbc) { struct btrfs_ordered_extent *ordered; ordered = btrfs_lookup_ordered_extent(inode, file_offset); if (ordered) { bio_ctrl->len_to_oe_boundary = min_t(u32, U32_MAX, ordered->file_offset + ordered->disk_num_bytes - file_offset); bbio->ordered = ordered; } /* * Pick the last added device to support cgroup writeback. For * multi-device file systems this means blk-cgroup policies have * to always be set on the last added/replaced device. * This is a bit odd but has been like that for a long time. */ bio_set_dev(&bbio->bio, fs_info->fs_devices->latest_dev->bdev); wbc_init_bio(bio_ctrl->wbc, &bbio->bio); } } /* * @disk_bytenr: logical bytenr where the write will be * @page: page to add to the bio * @size: portion of page that we want to write to * @pg_offset: offset of the new bio or to check whether we are adding * a contiguous page to the previous one * * The will either add the page into the existing @bio_ctrl->bbio, or allocate a * new one in @bio_ctrl->bbio. * The mirror number for this IO should already be initizlied in * @bio_ctrl->mirror_num. */ static void submit_extent_folio(struct btrfs_bio_ctrl *bio_ctrl, u64 disk_bytenr, struct folio *folio, size_t size, unsigned long pg_offset) { struct btrfs_inode *inode = folio_to_inode(folio); loff_t file_offset = folio_pos(folio) + pg_offset; ASSERT(pg_offset + size <= folio_size(folio)); ASSERT(bio_ctrl->end_io_func); if (bio_ctrl->bbio && !btrfs_bio_is_contig(bio_ctrl, disk_bytenr, file_offset)) submit_one_bio(bio_ctrl); do { u32 len = size; /* Allocate new bio if needed */ if (!bio_ctrl->bbio) alloc_new_bio(inode, bio_ctrl, disk_bytenr, file_offset); /* Cap to the current ordered extent boundary if there is one. */ if (len > bio_ctrl->len_to_oe_boundary) { ASSERT(bio_ctrl->compress_type == BTRFS_COMPRESS_NONE); ASSERT(is_data_inode(inode)); len = bio_ctrl->len_to_oe_boundary; } if (!bio_add_folio(&bio_ctrl->bbio->bio, folio, len, pg_offset)) { /* bio full: move on to a new one */ submit_one_bio(bio_ctrl); continue; } bio_ctrl->next_file_offset += len; if (bio_ctrl->wbc) wbc_account_cgroup_owner(bio_ctrl->wbc, folio, len); size -= len; pg_offset += len; disk_bytenr += len; file_offset += len; /* * len_to_oe_boundary defaults to U32_MAX, which isn't folio or * sector aligned. alloc_new_bio() then sets it to the end of * our ordered extent for writes into zoned devices. * * When len_to_oe_boundary is tracking an ordered extent, we * trust the ordered extent code to align things properly, and * the check above to cap our write to the ordered extent * boundary is correct. * * When len_to_oe_boundary is U32_MAX, the cap above would * result in a 4095 byte IO for the last folio right before * we hit the bio limit of UINT_MAX. bio_add_folio() has all * the checks required to make sure we don't overflow the bio, * and we should just ignore len_to_oe_boundary completely * unless we're using it to track an ordered extent. * * It's pretty hard to make a bio sized U32_MAX, but it can * happen when the page cache is able to feed us contiguous * folios for large extents. */ if (bio_ctrl->len_to_oe_boundary != U32_MAX) bio_ctrl->len_to_oe_boundary -= len; /* Ordered extent boundary: move on to a new bio. */ if (bio_ctrl->len_to_oe_boundary == 0) submit_one_bio(bio_ctrl); } while (size); } static int attach_extent_buffer_folio(struct extent_buffer *eb, struct folio *folio, struct btrfs_folio_state *prealloc) { struct btrfs_fs_info *fs_info = eb->fs_info; int ret = 0; /* * If the page is mapped to btree inode, we should hold the private * lock to prevent race. * For cloned or dummy extent buffers, their pages are not mapped and * will not race with any other ebs. */ if (folio->mapping) lockdep_assert_held(&folio->mapping->i_private_lock); if (!btrfs_meta_is_subpage(fs_info)) { if (!folio_test_private(folio)) folio_attach_private(folio, eb); else WARN_ON(folio_get_private(folio) != eb); return 0; } /* Already mapped, just free prealloc */ if (folio_test_private(folio)) { btrfs_free_folio_state(prealloc); return 0; } if (prealloc) /* Has preallocated memory for subpage */ folio_attach_private(folio, prealloc); else /* Do new allocation to attach subpage */ ret = btrfs_attach_folio_state(fs_info, folio, BTRFS_SUBPAGE_METADATA); return ret; } int set_folio_extent_mapped(struct folio *folio) { struct btrfs_fs_info *fs_info; ASSERT(folio->mapping); if (folio_test_private(folio)) return 0; fs_info = folio_to_fs_info(folio); if (btrfs_is_subpage(fs_info, folio)) return btrfs_attach_folio_state(fs_info, folio, BTRFS_SUBPAGE_DATA); folio_attach_private(folio, (void *)EXTENT_FOLIO_PRIVATE); return 0; } void clear_folio_extent_mapped(struct folio *folio) { struct btrfs_fs_info *fs_info; ASSERT(folio->mapping); if (!folio_test_private(folio)) return; fs_info = folio_to_fs_info(folio); if (btrfs_is_subpage(fs_info, folio)) return btrfs_detach_folio_state(fs_info, folio, BTRFS_SUBPAGE_DATA); folio_detach_private(folio); } static struct extent_map *get_extent_map(struct btrfs_inode *inode, struct folio *folio, u64 start, u64 len, struct extent_map **em_cached) { struct extent_map *em; ASSERT(em_cached); if (*em_cached) { em = *em_cached; if (btrfs_extent_map_in_tree(em) && start >= em->start && start < btrfs_extent_map_end(em)) { refcount_inc(&em->refs); return em; } btrfs_free_extent_map(em); *em_cached = NULL; } em = btrfs_get_extent(inode, folio, start, len); if (!IS_ERR(em)) { BUG_ON(*em_cached); refcount_inc(&em->refs); *em_cached = em; } return em; } static void btrfs_readahead_expand(struct readahead_control *ractl, const struct extent_map *em) { const u64 ra_pos = readahead_pos(ractl); const u64 ra_end = ra_pos + readahead_length(ractl); const u64 em_end = em->start + em->ram_bytes; /* No expansion for holes and inline extents. */ if (em->disk_bytenr > EXTENT_MAP_LAST_BYTE) return; ASSERT(em_end >= ra_pos, "extent_map %llu %llu ends before current readahead position %llu", em->start, em->len, ra_pos); if (em_end > ra_end) readahead_expand(ractl, ra_pos, em_end - ra_pos); } /* * basic readpage implementation. Locked extent state structs are inserted * into the tree that are removed when the IO is done (by the end_io * handlers) * XXX JDM: This needs looking at to ensure proper page locking * return 0 on success, otherwise return error */ static int btrfs_do_readpage(struct folio *folio, struct extent_map **em_cached, struct btrfs_bio_ctrl *bio_ctrl, u64 *prev_em_start) { struct inode *inode = folio->mapping->host; struct btrfs_fs_info *fs_info = inode_to_fs_info(inode); u64 start = folio_pos(folio); const u64 end = start + folio_size(folio) - 1; u64 extent_offset; u64 last_byte = i_size_read(inode); struct extent_map *em; int ret = 0; const size_t blocksize = fs_info->sectorsize; ret = set_folio_extent_mapped(folio); if (ret < 0) { folio_unlock(folio); return ret; } if (folio_contains(folio, last_byte >> PAGE_SHIFT)) { size_t zero_offset = offset_in_folio(folio, last_byte); if (zero_offset) folio_zero_range(folio, zero_offset, folio_size(folio) - zero_offset); } bio_ctrl->end_io_func = end_bbio_data_read; begin_folio_read(fs_info, folio); for (u64 cur = start; cur <= end; cur += blocksize) { enum btrfs_compression_type compress_type = BTRFS_COMPRESS_NONE; unsigned long pg_offset = offset_in_folio(folio, cur); bool force_bio_submit = false; u64 disk_bytenr; u64 block_start; ASSERT(IS_ALIGNED(cur, fs_info->sectorsize)); if (cur >= last_byte) { folio_zero_range(folio, pg_offset, end - cur + 1); end_folio_read(folio, true, cur, end - cur + 1); break; } if (btrfs_folio_test_uptodate(fs_info, folio, cur, blocksize)) { end_folio_read(folio, true, cur, blocksize); continue; } em = get_extent_map(BTRFS_I(inode), folio, cur, end - cur + 1, em_cached); if (IS_ERR(em)) { end_folio_read(folio, false, cur, end + 1 - cur); return PTR_ERR(em); } extent_offset = cur - em->start; BUG_ON(btrfs_extent_map_end(em) <= cur); BUG_ON(end < cur); compress_type = btrfs_extent_map_compression(em); /* * Only expand readahead for extents which are already creating * the pages anyway in add_ra_bio_pages, which is compressed * extents in the non subpage case. */ if (bio_ctrl->ractl && !btrfs_is_subpage(fs_info, folio) && compress_type != BTRFS_COMPRESS_NONE) btrfs_readahead_expand(bio_ctrl->ractl, em); if (compress_type != BTRFS_COMPRESS_NONE) disk_bytenr = em->disk_bytenr; else disk_bytenr = btrfs_extent_map_block_start(em) + extent_offset; if (em->flags & EXTENT_FLAG_PREALLOC) block_start = EXTENT_MAP_HOLE; else block_start = btrfs_extent_map_block_start(em); /* * If we have a file range that points to a compressed extent * and it's followed by a consecutive file range that points * to the same compressed extent (possibly with a different * offset and/or length, so it either points to the whole extent * or only part of it), we must make sure we do not submit a * single bio to populate the folios for the 2 ranges because * this makes the compressed extent read zero out the folios * belonging to the 2nd range. Imagine the following scenario: * * File layout * [0 - 8K] [8K - 24K] * | | * | | * points to extent X, points to extent X, * offset 4K, length of 8K offset 0, length 16K * * [extent X, compressed length = 4K uncompressed length = 16K] * * If the bio to read the compressed extent covers both ranges, * it will decompress extent X into the folios belonging to the * first range and then it will stop, zeroing out the remaining * folios that belong to the other range that points to extent X. * So here we make sure we submit 2 bios, one for the first * range and another one for the third range. Both will target * the same physical extent from disk, but we can't currently * make the compressed bio endio callback populate the folios * for both ranges because each compressed bio is tightly * coupled with a single extent map, and each range can have * an extent map with a different offset value relative to the * uncompressed data of our extent and different lengths. This * is a corner case so we prioritize correctness over * non-optimal behavior (submitting 2 bios for the same extent). */ if (compress_type != BTRFS_COMPRESS_NONE && prev_em_start && *prev_em_start != (u64)-1 && *prev_em_start != em->start) force_bio_submit = true; if (prev_em_start) *prev_em_start = em->start; btrfs_free_extent_map(em); em = NULL; /* we've found a hole, just zero and go on */ if (block_start == EXTENT_MAP_HOLE) { folio_zero_range(folio, pg_offset, blocksize); end_folio_read(folio, true, cur, blocksize); continue; } /* the get_extent function already copied into the folio */ if (block_start == EXTENT_MAP_INLINE) { end_folio_read(folio, true, cur, blocksize); continue; } if (bio_ctrl->compress_type != compress_type) { submit_one_bio(bio_ctrl); bio_ctrl->compress_type = compress_type; } if (force_bio_submit) submit_one_bio(bio_ctrl); submit_extent_folio(bio_ctrl, disk_bytenr, folio, blocksize, pg_offset); } return 0; } /* * Check if we can skip waiting the @ordered extent covering the block at @fileoff. * * @fileoff: Both input and output. * Input as the file offset where the check should start at. * Output as where the next check should start at, * if the function returns true. * * Return true if we can skip to @fileoff. The caller needs to check the new * @fileoff value to make sure it covers the full range, before skipping the * full OE. * * Return false if we must wait for the ordered extent. */ static bool can_skip_one_ordered_range(struct btrfs_inode *inode, struct btrfs_ordered_extent *ordered, u64 *fileoff) { const struct btrfs_fs_info *fs_info = inode->root->fs_info; struct folio *folio; const u32 blocksize = fs_info->sectorsize; u64 cur = *fileoff; bool ret; folio = filemap_get_folio(inode->vfs_inode.i_mapping, cur >> PAGE_SHIFT); /* * We should have locked the folio(s) for range [start, end], thus * there must be a folio and it must be locked. */ ASSERT(!IS_ERR(folio)); ASSERT(folio_test_locked(folio)); /* * There are several cases for the folio and OE combination: * * 1) Folio has no private flag * The OE has all its IO done but not yet finished, and folio got * invalidated. * * Have we have to wait for the OE to finish, as it may contain the * to-be-inserted data checksum. * Without the data checksum inserted into the csum tree, read will * just fail with missing csum. */ if (!folio_test_private(folio)) { ret = false; goto out; } /* * 2) The first block is DIRTY. * * This means the OE is created by some other folios whose file pos is * before this one. And since we are holding the folio lock, the writeback * of this folio cannot start. * * We must skip the whole OE, because it will never start until we * finished our folio read and unlocked the folio. */ if (btrfs_folio_test_dirty(fs_info, folio, cur, blocksize)) { u64 range_len = min(folio_end(folio), ordered->file_offset + ordered->num_bytes) - cur; ret = true; /* * At least inside the folio, all the remaining blocks should * also be dirty. */ ASSERT(btrfs_folio_test_dirty(fs_info, folio, cur, range_len)); *fileoff = ordered->file_offset + ordered->num_bytes; goto out; } /* * 3) The first block is uptodate. * * At least the first block can be skipped, but we are still not fully * sure. E.g. if the OE has some other folios in the range that cannot * be skipped. * So we return true and update @next_ret to the OE/folio boundary. */ if (btrfs_folio_test_uptodate(fs_info, folio, cur, blocksize)) { u64 range_len = min(folio_end(folio), ordered->file_offset + ordered->num_bytes) - cur; /* * The whole range to the OE end or folio boundary should also * be uptodate. */ ASSERT(btrfs_folio_test_uptodate(fs_info, folio, cur, range_len)); ret = true; *fileoff = cur + range_len; goto out; } /* * 4) The first block is not uptodate. * * This means the folio is invalidated after the writeback was finished, * but by some other operations (e.g. block aligned buffered write) the * folio is inserted into filemap. * Very much the same as case 1). */ ret = false; out: folio_put(folio); return ret; } static bool can_skip_ordered_extent(struct btrfs_inode *inode, struct btrfs_ordered_extent *ordered, u64 start, u64 end) { const u64 range_end = min(end, ordered->file_offset + ordered->num_bytes - 1); u64 cur = max(start, ordered->file_offset); while (cur < range_end) { bool can_skip; can_skip = can_skip_one_ordered_range(inode, ordered, &cur); if (!can_skip) return false; } return true; } /* * Locking helper to make sure we get a stable view of extent maps for the * involved range. * * This is for folio read paths (read and readahead), thus the involved range * should have all the folios locked. */ static void lock_extents_for_read(struct btrfs_inode *inode, u64 start, u64 end, struct extent_state **cached_state) { u64 cur_pos; /* Caller must provide a valid @cached_state. */ ASSERT(cached_state); /* The range must at least be page aligned, as all read paths are folio based. */ ASSERT(IS_ALIGNED(start, PAGE_SIZE)); ASSERT(IS_ALIGNED(end + 1, PAGE_SIZE)); again: btrfs_lock_extent(&inode->io_tree, start, end, cached_state); cur_pos = start; while (cur_pos < end) { struct btrfs_ordered_extent *ordered; ordered = btrfs_lookup_ordered_range(inode, cur_pos, end - cur_pos + 1); /* * No ordered extents in the range, and we hold the extent lock, * no one can modify the extent maps in the range, we're safe to return. */ if (!ordered) break; /* Check if we can skip waiting for the whole OE. */ if (can_skip_ordered_extent(inode, ordered, start, end)) { cur_pos = min(ordered->file_offset + ordered->num_bytes, end + 1); btrfs_put_ordered_extent(ordered); continue; } /* Now wait for the OE to finish. */ btrfs_unlock_extent(&inode->io_tree, start, end, cached_state); btrfs_start_ordered_extent_nowriteback(ordered, start, end + 1 - start); btrfs_put_ordered_extent(ordered); /* We have unlocked the whole range, restart from the beginning. */ goto again; } } int btrfs_read_folio(struct file *file, struct folio *folio) { struct btrfs_inode *inode = folio_to_inode(folio); const u64 start = folio_pos(folio); const u64 end = start + folio_size(folio) - 1; struct extent_state *cached_state = NULL; struct btrfs_bio_ctrl bio_ctrl = { .opf = REQ_OP_READ }; struct extent_map *em_cached = NULL; int ret; lock_extents_for_read(inode, start, end, &cached_state); ret = btrfs_do_readpage(folio, &em_cached, &bio_ctrl, NULL); btrfs_unlock_extent(&inode->io_tree, start, end, &cached_state); btrfs_free_extent_map(em_cached); /* * If btrfs_do_readpage() failed we will want to submit the assembled * bio to do the cleanup. */ submit_one_bio(&bio_ctrl); return ret; } static void set_delalloc_bitmap(struct folio *folio, unsigned long *delalloc_bitmap, u64 start, u32 len) { struct btrfs_fs_info *fs_info = folio_to_fs_info(folio); const u64 folio_start = folio_pos(folio); unsigned int start_bit; unsigned int nbits; ASSERT(start >= folio_start && start + len <= folio_start + folio_size(folio)); start_bit = (start - folio_start) >> fs_info->sectorsize_bits; nbits = len >> fs_info->sectorsize_bits; ASSERT(bitmap_test_range_all_zero(delalloc_bitmap, start_bit, nbits)); bitmap_set(delalloc_bitmap, start_bit, nbits); } static bool find_next_delalloc_bitmap(struct folio *folio, unsigned long *delalloc_bitmap, u64 start, u64 *found_start, u32 *found_len) { struct btrfs_fs_info *fs_info = folio_to_fs_info(folio); const u64 folio_start = folio_pos(folio); const unsigned int bitmap_size = btrfs_blocks_per_folio(fs_info, folio); unsigned int start_bit; unsigned int first_zero; unsigned int first_set; ASSERT(start >= folio_start && start < folio_start + folio_size(folio)); start_bit = (start - folio_start) >> fs_info->sectorsize_bits; first_set = find_next_bit(delalloc_bitmap, bitmap_size, start_bit); if (first_set >= bitmap_size) return false; *found_start = folio_start + (first_set << fs_info->sectorsize_bits); first_zero = find_next_zero_bit(delalloc_bitmap, bitmap_size, first_set); *found_len = (first_zero - first_set) << fs_info->sectorsize_bits; return true; } /* * Do all of the delayed allocation setup. * * Return >0 if all the dirty blocks are submitted async (compression) or inlined. * The @folio should no longer be touched (treat it as already unlocked). * * Return 0 if there is still dirty block that needs to be submitted through * extent_writepage_io(). * bio_ctrl->submit_bitmap will indicate which blocks of the folio should be * submitted, and @folio is still kept locked. * * Return <0 if there is any error hit. * Any allocated ordered extent range covering this folio will be marked * finished (IOERR), and @folio is still kept locked. */ static noinline_for_stack int writepage_delalloc(struct btrfs_inode *inode, struct folio *folio, struct btrfs_bio_ctrl *bio_ctrl) { struct btrfs_fs_info *fs_info = inode_to_fs_info(&inode->vfs_inode); struct writeback_control *wbc = bio_ctrl->wbc; const bool is_subpage = btrfs_is_subpage(fs_info, folio); const u64 page_start = folio_pos(folio); const u64 page_end = page_start + folio_size(folio) - 1; const unsigned int blocks_per_folio = btrfs_blocks_per_folio(fs_info, folio); unsigned long delalloc_bitmap = 0; /* * Save the last found delalloc end. As the delalloc end can go beyond * page boundary, thus we cannot rely on subpage bitmap to locate the * last delalloc end. */ u64 last_delalloc_end = 0; /* * The range end (exclusive) of the last successfully finished delalloc * range. * Any range covered by ordered extent must either be manually marked * finished (error handling), or has IO submitted (and finish the * ordered extent normally). * * This records the end of ordered extent cleanup if we hit an error. */ u64 last_finished_delalloc_end = page_start; u64 delalloc_start = page_start; u64 delalloc_end = page_end; u64 delalloc_to_write = 0; int ret = 0; int bit; /* Save the dirty bitmap as our submission bitmap will be a subset of it. */ if (btrfs_is_subpage(fs_info, folio)) { ASSERT(blocks_per_folio > 1); btrfs_get_subpage_dirty_bitmap(fs_info, folio, &bio_ctrl->submit_bitmap); } else { bio_ctrl->submit_bitmap = 1; } for_each_set_bit(bit, &bio_ctrl->submit_bitmap, blocks_per_folio) { u64 start = page_start + (bit << fs_info->sectorsize_bits); btrfs_folio_set_lock(fs_info, folio, start, fs_info->sectorsize); } /* Lock all (subpage) delalloc ranges inside the folio first. */ while (delalloc_start < page_end) { delalloc_end = page_end; if (!find_lock_delalloc_range(&inode->vfs_inode, folio, &delalloc_start, &delalloc_end)) { delalloc_start = delalloc_end + 1; continue; } set_delalloc_bitmap(folio, &delalloc_bitmap, delalloc_start, min(delalloc_end, page_end) + 1 - delalloc_start); last_delalloc_end = delalloc_end; delalloc_start = delalloc_end + 1; } delalloc_start = page_start; if (!last_delalloc_end) goto out; /* Run the delalloc ranges for the above locked ranges. */ while (delalloc_start < page_end) { u64 found_start; u32 found_len; bool found; if (!is_subpage) { /* * For non-subpage case, the found delalloc range must * cover this folio and there must be only one locked * delalloc range. */ found_start = page_start; found_len = last_delalloc_end + 1 - found_start; found = true; } else { found = find_next_delalloc_bitmap(folio, &delalloc_bitmap, delalloc_start, &found_start, &found_len); } if (!found) break; /* * The subpage range covers the last sector, the delalloc range may * end beyond the folio boundary, use the saved delalloc_end * instead. */ if (found_start + found_len >= page_end) found_len = last_delalloc_end + 1 - found_start; if (ret >= 0) { /* * Some delalloc range may be created by previous folios. * Thus we still need to clean up this range during error * handling. */ last_finished_delalloc_end = found_start; /* No errors hit so far, run the current delalloc range. */ ret = btrfs_run_delalloc_range(inode, folio, found_start, found_start + found_len - 1, wbc); if (ret >= 0) last_finished_delalloc_end = found_start + found_len; if (unlikely(ret < 0)) btrfs_err_rl(fs_info, "failed to run delalloc range, root=%lld ino=%llu folio=%llu submit_bitmap=%*pbl start=%llu len=%u: %d", btrfs_root_id(inode->root), btrfs_ino(inode), folio_pos(folio), blocks_per_folio, &bio_ctrl->submit_bitmap, found_start, found_len, ret); } else { /* * We've hit an error during previous delalloc range, * have to cleanup the remaining locked ranges. */ btrfs_unlock_extent(&inode->io_tree, found_start, found_start + found_len - 1, NULL); unlock_delalloc_folio(&inode->vfs_inode, folio, found_start, found_start + found_len - 1); } /* * We have some ranges that's going to be submitted asynchronously * (compression or inline). These range have their own control * on when to unlock the pages. We should not touch them * anymore, so clear the range from the submission bitmap. */ if (ret > 0) { unsigned int start_bit = (found_start - page_start) >> fs_info->sectorsize_bits; unsigned int end_bit = (min(page_end + 1, found_start + found_len) - page_start) >> fs_info->sectorsize_bits; bitmap_clear(&bio_ctrl->submit_bitmap, start_bit, end_bit - start_bit); } /* * Above btrfs_run_delalloc_range() may have unlocked the folio, * thus for the last range, we cannot touch the folio anymore. */ if (found_start + found_len >= last_delalloc_end + 1) break; delalloc_start = found_start + found_len; } /* * It's possible we had some ordered extents created before we hit * an error, cleanup non-async successfully created delalloc ranges. */ if (unlikely(ret < 0)) { unsigned int bitmap_size = min( (last_finished_delalloc_end - page_start) >> fs_info->sectorsize_bits, blocks_per_folio); for_each_set_bit(bit, &bio_ctrl->submit_bitmap, bitmap_size) btrfs_mark_ordered_io_finished(inode, folio, page_start + (bit << fs_info->sectorsize_bits), fs_info->sectorsize, false); return ret; } out: if (last_delalloc_end) delalloc_end = last_delalloc_end; else delalloc_end = page_end; /* * delalloc_end is already one less than the total length, so * we don't subtract one from PAGE_SIZE. */ delalloc_to_write += DIV_ROUND_UP(delalloc_end + 1 - page_start, PAGE_SIZE); /* * If all ranges are submitted asynchronously, we just need to account * for them here. */ if (bitmap_empty(&bio_ctrl->submit_bitmap, blocks_per_folio)) { wbc->nr_to_write -= delalloc_to_write; return 1; } if (wbc->nr_to_write < delalloc_to_write) { int thresh = 8192; if (delalloc_to_write < thresh * 2) thresh = delalloc_to_write; wbc->nr_to_write = min_t(u64, delalloc_to_write, thresh); } return 0; } /* * Return 0 if we have submitted or queued the sector for submission. * Return <0 for critical errors, and the sector will have its dirty flag cleared. * * Caller should make sure filepos < i_size and handle filepos >= i_size case. */ static int submit_one_sector(struct btrfs_inode *inode, struct folio *folio, u64 filepos, struct btrfs_bio_ctrl *bio_ctrl, loff_t i_size) { struct btrfs_fs_info *fs_info = inode->root->fs_info; struct extent_map *em; u64 block_start; u64 disk_bytenr; u64 extent_offset; u64 em_end; const u32 sectorsize = fs_info->sectorsize; ASSERT(IS_ALIGNED(filepos, sectorsize)); /* @filepos >= i_size case should be handled by the caller. */ ASSERT(filepos < i_size); em = btrfs_get_extent(inode, NULL, filepos, sectorsize); if (IS_ERR(em)) { /* * When submission failed, we should still clear the folio dirty. * Or the folio will be written back again but without any * ordered extent. */ btrfs_folio_clear_dirty(fs_info, folio, filepos, sectorsize); btrfs_folio_set_writeback(fs_info, folio, filepos, sectorsize); btrfs_folio_clear_writeback(fs_info, folio, filepos, sectorsize); return PTR_ERR(em); } extent_offset = filepos - em->start; em_end = btrfs_extent_map_end(em); ASSERT(filepos <= em_end); ASSERT(IS_ALIGNED(em->start, sectorsize)); ASSERT(IS_ALIGNED(em->len, sectorsize)); block_start = btrfs_extent_map_block_start(em); disk_bytenr = btrfs_extent_map_block_start(em) + extent_offset; ASSERT(!btrfs_extent_map_is_compressed(em)); ASSERT(block_start != EXTENT_MAP_HOLE); ASSERT(block_start != EXTENT_MAP_INLINE); btrfs_free_extent_map(em); em = NULL; /* * Although the PageDirty bit is cleared before entering this * function, subpage dirty bit is not cleared. * So clear subpage dirty bit here so next time we won't submit * a folio for a range already written to disk. */ btrfs_folio_clear_dirty(fs_info, folio, filepos, sectorsize); btrfs_folio_set_writeback(fs_info, folio, filepos, sectorsize); /* * Above call should set the whole folio with writeback flag, even * just for a single subpage sector. * As long as the folio is properly locked and the range is correct, * we should always get the folio with writeback flag. */ ASSERT(folio_test_writeback(folio)); submit_extent_folio(bio_ctrl, disk_bytenr, folio, sectorsize, filepos - folio_pos(folio)); return 0; } /* * Helper for extent_writepage(). This calls the writepage start hooks, * and does the loop to map the page into extents and bios. * * We return 1 if the IO is started and the page is unlocked, * 0 if all went well (page still locked) * < 0 if there were errors (page still locked) */ static noinline_for_stack int extent_writepage_io(struct btrfs_inode *inode, struct folio *folio, u64 start, u32 len, struct btrfs_bio_ctrl *bio_ctrl, loff_t i_size) { struct btrfs_fs_info *fs_info = inode->root->fs_info; unsigned long range_bitmap = 0; bool submitted_io = false; bool error = false; const u64 folio_start = folio_pos(folio); const unsigned int blocks_per_folio = btrfs_blocks_per_folio(fs_info, folio); u64 cur; int bit; int ret = 0; ASSERT(start >= folio_start && start + len <= folio_start + folio_size(folio)); ret = btrfs_writepage_cow_fixup(folio); if (ret == -EAGAIN) { /* Fixup worker will requeue */ folio_redirty_for_writepage(bio_ctrl->wbc, folio); folio_unlock(folio); return 1; } if (ret < 0) { btrfs_folio_clear_dirty(fs_info, folio, start, len); btrfs_folio_set_writeback(fs_info, folio, start, len); btrfs_folio_clear_writeback(fs_info, folio, start, len); return ret; } for (cur = start; cur < start + len; cur += fs_info->sectorsize) set_bit((cur - folio_start) >> fs_info->sectorsize_bits, &range_bitmap); bitmap_and(&bio_ctrl->submit_bitmap, &bio_ctrl->submit_bitmap, &range_bitmap, blocks_per_folio); bio_ctrl->end_io_func = end_bbio_data_write; for_each_set_bit(bit, &bio_ctrl->submit_bitmap, blocks_per_folio) { cur = folio_pos(folio) + (bit << fs_info->sectorsize_bits); if (cur >= i_size) { btrfs_mark_ordered_io_finished(inode, folio, cur, start + len - cur, true); /* * This range is beyond i_size, thus we don't need to * bother writing back. * But we still need to clear the dirty subpage bit, or * the next time the folio gets dirtied, we will try to * writeback the sectors with subpage dirty bits, * causing writeback without ordered extent. */ btrfs_folio_clear_dirty(fs_info, folio, cur, start + len - cur); break; } ret = submit_one_sector(inode, folio, cur, bio_ctrl, i_size); if (unlikely(ret < 0)) { /* * bio_ctrl may contain a bio crossing several folios. * Submit it immediately so that the bio has a chance * to finish normally, other than marked as error. */ submit_one_bio(bio_ctrl); /* * Failed to grab the extent map which should be very rare. * Since there is no bio submitted to finish the ordered * extent, we have to manually finish this sector. */ btrfs_mark_ordered_io_finished(inode, folio, cur, fs_info->sectorsize, false); error = true; continue; } submitted_io = true; } /* * If we didn't submitted any sector (>= i_size), folio dirty get * cleared but PAGECACHE_TAG_DIRTY is not cleared (only cleared * by folio_start_writeback() if the folio is not dirty). * * Here we set writeback and clear for the range. If the full folio * is no longer dirty then we clear the PAGECACHE_TAG_DIRTY tag. * * If we hit any error, the corresponding sector will have its dirty * flag cleared and writeback finished, thus no need to handle the error case. */ if (!submitted_io && !error) { btrfs_folio_set_writeback(fs_info, folio, start, len); btrfs_folio_clear_writeback(fs_info, folio, start, len); } return ret; } /* * the writepage semantics are similar to regular writepage. extent * records are inserted to lock ranges in the tree, and as dirty areas * are found, they are marked writeback. Then the lock bits are removed * and the end_io handler clears the writeback ranges * * Return 0 if everything goes well. * Return <0 for error. */ static int extent_writepage(struct folio *folio, struct btrfs_bio_ctrl *bio_ctrl) { struct btrfs_inode *inode = BTRFS_I(folio->mapping->host); struct btrfs_fs_info *fs_info = inode->root->fs_info; int ret; size_t pg_offset; loff_t i_size = i_size_read(&inode->vfs_inode); const pgoff_t end_index = i_size >> PAGE_SHIFT; const unsigned int blocks_per_folio = btrfs_blocks_per_folio(fs_info, folio); trace_extent_writepage(folio, &inode->vfs_inode, bio_ctrl->wbc); WARN_ON(!folio_test_locked(folio)); pg_offset = offset_in_folio(folio, i_size); if (folio->index > end_index || (folio->index == end_index && !pg_offset)) { folio_invalidate(folio, 0, folio_size(folio)); folio_unlock(folio); return 0; } if (folio_contains(folio, end_index)) folio_zero_range(folio, pg_offset, folio_size(folio) - pg_offset); /* * Default to unlock the whole folio. * The proper bitmap can only be initialized until writepage_delalloc(). */ bio_ctrl->submit_bitmap = (unsigned long)-1; /* * If the page is dirty but without private set, it's marked dirty * without informing the fs. * Nowadays that is a bug, since the introduction of * pin_user_pages*(). * * So here we check if the page has private set to rule out such * case. * But we also have a long history of relying on the COW fixup, * so here we only enable this check for experimental builds until * we're sure it's safe. */ if (IS_ENABLED(CONFIG_BTRFS_EXPERIMENTAL) && unlikely(!folio_test_private(folio))) { WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG)); btrfs_err_rl(fs_info, "root %lld ino %llu folio %llu is marked dirty without notifying the fs", btrfs_root_id(inode->root), btrfs_ino(inode), folio_pos(folio)); ret = -EUCLEAN; goto done; } ret = set_folio_extent_mapped(folio); if (ret < 0) goto done; ret = writepage_delalloc(inode, folio, bio_ctrl); if (ret == 1) return 0; if (ret) goto done; ret = extent_writepage_io(inode, folio, folio_pos(folio), folio_size(folio), bio_ctrl, i_size); if (ret == 1) return 0; if (ret < 0) btrfs_err_rl(fs_info, "failed to submit blocks, root=%lld inode=%llu folio=%llu submit_bitmap=%*pbl: %d", btrfs_root_id(inode->root), btrfs_ino(inode), folio_pos(folio), blocks_per_folio, &bio_ctrl->submit_bitmap, ret); bio_ctrl->wbc->nr_to_write--; done: if (ret < 0) mapping_set_error(folio->mapping, ret); /* * Only unlock ranges that are submitted. As there can be some async * submitted ranges inside the folio. */ btrfs_folio_end_lock_bitmap(fs_info, folio, bio_ctrl->submit_bitmap); ASSERT(ret <= 0); return ret; } /* * Lock extent buffer status and pages for writeback. * * Return %false if the extent buffer doesn't need to be submitted (e.g. the * extent buffer is not dirty) * Return %true is the extent buffer is submitted to bio. */ static noinline_for_stack bool lock_extent_buffer_for_io(struct extent_buffer *eb, struct writeback_control *wbc) { struct btrfs_fs_info *fs_info = eb->fs_info; bool ret = false; btrfs_tree_lock(eb); while (test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags)) { btrfs_tree_unlock(eb); if (wbc->sync_mode != WB_SYNC_ALL) return false; wait_on_extent_buffer_writeback(eb); btrfs_tree_lock(eb); } /* * We need to do this to prevent races in people who check if the eb is * under IO since we can end up having no IO bits set for a short period * of time. */ spin_lock(&eb->refs_lock); if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) { XA_STATE(xas, &fs_info->buffer_tree, eb->start >> fs_info->nodesize_bits); unsigned long flags; set_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags); spin_unlock(&eb->refs_lock); xas_lock_irqsave(&xas, flags); xas_load(&xas); xas_set_mark(&xas, PAGECACHE_TAG_WRITEBACK); xas_clear_mark(&xas, PAGECACHE_TAG_DIRTY); xas_clear_mark(&xas, PAGECACHE_TAG_TOWRITE); xas_unlock_irqrestore(&xas, flags); btrfs_set_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN); percpu_counter_add_batch(&fs_info->dirty_metadata_bytes, -eb->len, fs_info->dirty_metadata_batch); ret = true; } else { spin_unlock(&eb->refs_lock); } btrfs_tree_unlock(eb); return ret; } static void set_btree_ioerr(struct extent_buffer *eb) { struct btrfs_fs_info *fs_info = eb->fs_info; set_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags); /* * A read may stumble upon this buffer later, make sure that it gets an * error and knows there was an error. */ clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags); /* * We need to set the mapping with the io error as well because a write * error will flip the file system readonly, and then syncfs() will * return a 0 because we are readonly if we don't modify the err seq for * the superblock. */ mapping_set_error(eb->fs_info->btree_inode->i_mapping, -EIO); /* * If writeback for a btree extent that doesn't belong to a log tree * failed, increment the counter transaction->eb_write_errors. * We do this because while the transaction is running and before it's * committing (when we call filemap_fdata[write|wait]_range against * the btree inode), we might have * btree_inode->i_mapping->a_ops->writepages() called by the VM - if it * returns an error or an error happens during writeback, when we're * committing the transaction we wouldn't know about it, since the pages * can be no longer dirty nor marked anymore for writeback (if a * subsequent modification to the extent buffer didn't happen before the * transaction commit), which makes filemap_fdata[write|wait]_range not * able to find the pages which contain errors at transaction * commit time. So if this happens we must abort the transaction, * otherwise we commit a super block with btree roots that point to * btree nodes/leafs whose content on disk is invalid - either garbage * or the content of some node/leaf from a past generation that got * cowed or deleted and is no longer valid. * * Note: setting AS_EIO/AS_ENOSPC in the btree inode's i_mapping would * not be enough - we need to distinguish between log tree extents vs * non-log tree extents, and the next filemap_fdatawait_range() call * will catch and clear such errors in the mapping - and that call might * be from a log sync and not from a transaction commit. Also, checking * for the eb flag EXTENT_BUFFER_WRITE_ERR at transaction commit time is * not done and would not be reliable - the eb might have been released * from memory and reading it back again means that flag would not be * set (since it's a runtime flag, not persisted on disk). * * Using the flags below in the btree inode also makes us achieve the * goal of AS_EIO/AS_ENOSPC when writepages() returns success, started * writeback for all dirty pages and before filemap_fdatawait_range() * is called, the writeback for all dirty pages had already finished * with errors - because we were not using AS_EIO/AS_ENOSPC, * filemap_fdatawait_range() would return success, as it could not know * that writeback errors happened (the pages were no longer tagged for * writeback). */ switch (eb->log_index) { case -1: set_bit(BTRFS_FS_BTREE_ERR, &fs_info->flags); break; case 0: set_bit(BTRFS_FS_LOG1_ERR, &fs_info->flags); break; case 1: set_bit(BTRFS_FS_LOG2_ERR, &fs_info->flags); break; default: BUG(); /* unexpected, logic error */ } } static void buffer_tree_set_mark(const struct extent_buffer *eb, xa_mark_t mark) { struct btrfs_fs_info *fs_info = eb->fs_info; XA_STATE(xas, &fs_info->buffer_tree, eb->start >> fs_info->nodesize_bits); unsigned long flags; xas_lock_irqsave(&xas, flags); xas_load(&xas); xas_set_mark(&xas, mark); xas_unlock_irqrestore(&xas, flags); } static void buffer_tree_clear_mark(const struct extent_buffer *eb, xa_mark_t mark) { struct btrfs_fs_info *fs_info = eb->fs_info; XA_STATE(xas, &fs_info->buffer_tree, eb->start >> fs_info->nodesize_bits); unsigned long flags; xas_lock_irqsave(&xas, flags); xas_load(&xas); xas_clear_mark(&xas, mark); xas_unlock_irqrestore(&xas, flags); } static void buffer_tree_tag_for_writeback(struct btrfs_fs_info *fs_info, unsigned long start, unsigned long end) { XA_STATE(xas, &fs_info->buffer_tree, start); unsigned int tagged = 0; void *eb; xas_lock_irq(&xas); xas_for_each_marked(&xas, eb, end, PAGECACHE_TAG_DIRTY) { xas_set_mark(&xas, PAGECACHE_TAG_TOWRITE); if (++tagged % XA_CHECK_SCHED) continue; xas_pause(&xas); xas_unlock_irq(&xas); cond_resched(); xas_lock_irq(&xas); } xas_unlock_irq(&xas); } struct eb_batch { unsigned int nr; unsigned int cur; struct extent_buffer *ebs[PAGEVEC_SIZE]; }; static inline bool eb_batch_add(struct eb_batch *batch, struct extent_buffer *eb) { batch->ebs[batch->nr++] = eb; return (batch->nr < PAGEVEC_SIZE); } static inline void eb_batch_init(struct eb_batch *batch) { batch->nr = 0; batch->cur = 0; } static inline struct extent_buffer *eb_batch_next(struct eb_batch *batch) { if (batch->cur >= batch->nr) return NULL; return batch->ebs[batch->cur++]; } static inline void eb_batch_release(struct eb_batch *batch) { for (unsigned int i = 0; i < batch->nr; i++) free_extent_buffer(batch->ebs[i]); eb_batch_init(batch); } static inline struct extent_buffer *find_get_eb(struct xa_state *xas, unsigned long max, xa_mark_t mark) { struct extent_buffer *eb; retry: eb = xas_find_marked(xas, max, mark); if (xas_retry(xas, eb)) goto retry; if (!eb) return NULL; if (!refcount_inc_not_zero(&eb->refs)) { xas_reset(xas); goto retry; } if (unlikely(eb != xas_reload(xas))) { free_extent_buffer(eb); xas_reset(xas); goto retry; } return eb; } static unsigned int buffer_tree_get_ebs_tag(struct btrfs_fs_info *fs_info, unsigned long *start, unsigned long end, xa_mark_t tag, struct eb_batch *batch) { XA_STATE(xas, &fs_info->buffer_tree, *start); struct extent_buffer *eb; rcu_read_lock(); while ((eb = find_get_eb(&xas, end, tag)) != NULL) { if (!eb_batch_add(batch, eb)) { *start = ((eb->start + eb->len) >> fs_info->nodesize_bits); goto out; } } if (end == ULONG_MAX) *start = ULONG_MAX; else *start = end + 1; out: rcu_read_unlock(); return batch->nr; } /* * The endio specific version which won't touch any unsafe spinlock in endio * context. */ static struct extent_buffer *find_extent_buffer_nolock( struct btrfs_fs_info *fs_info, u64 start) { struct extent_buffer *eb; unsigned long index = (start >> fs_info->nodesize_bits); rcu_read_lock(); eb = xa_load(&fs_info->buffer_tree, index); if (eb && !refcount_inc_not_zero(&eb->refs)) eb = NULL; rcu_read_unlock(); return eb; } static void end_bbio_meta_write(struct btrfs_bio *bbio) { struct extent_buffer *eb = bbio->private; struct folio_iter fi; if (bbio->bio.bi_status != BLK_STS_OK) set_btree_ioerr(eb); bio_for_each_folio_all(fi, &bbio->bio) { btrfs_meta_folio_clear_writeback(fi.folio, eb); } buffer_tree_clear_mark(eb, PAGECACHE_TAG_WRITEBACK); clear_and_wake_up_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags); bio_put(&bbio->bio); } static void prepare_eb_write(struct extent_buffer *eb) { u32 nritems; unsigned long start; unsigned long end; clear_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags); /* Set btree blocks beyond nritems with 0 to avoid stale content */ nritems = btrfs_header_nritems(eb); if (btrfs_header_level(eb) > 0) { end = btrfs_node_key_ptr_offset(eb, nritems); memzero_extent_buffer(eb, end, eb->len - end); } else { /* * Leaf: * header 0 1 2 .. N ... data_N .. data_2 data_1 data_0 */ start = btrfs_item_nr_offset(eb, nritems); end = btrfs_item_nr_offset(eb, 0); if (nritems == 0) end += BTRFS_LEAF_DATA_SIZE(eb->fs_info); else end += btrfs_item_offset(eb, nritems - 1); memzero_extent_buffer(eb, start, end - start); } } static noinline_for_stack void write_one_eb(struct extent_buffer *eb, struct writeback_control *wbc) { struct btrfs_fs_info *fs_info = eb->fs_info; struct btrfs_bio *bbio; prepare_eb_write(eb); bbio = btrfs_bio_alloc(INLINE_EXTENT_BUFFER_PAGES, REQ_OP_WRITE | REQ_META | wbc_to_write_flags(wbc), eb->fs_info, end_bbio_meta_write, eb); bbio->bio.bi_iter.bi_sector = eb->start >> SECTOR_SHIFT; bio_set_dev(&bbio->bio, fs_info->fs_devices->latest_dev->bdev); wbc_init_bio(wbc, &bbio->bio); bbio->inode = BTRFS_I(eb->fs_info->btree_inode); bbio->file_offset = eb->start; for (int i = 0; i < num_extent_folios(eb); i++) { struct folio *folio = eb->folios[i]; u64 range_start = max_t(u64, eb->start, folio_pos(folio)); u32 range_len = min_t(u64, folio_end(folio), eb->start + eb->len) - range_start; folio_lock(folio); btrfs_meta_folio_clear_dirty(folio, eb); btrfs_meta_folio_set_writeback(folio, eb); if (!folio_test_dirty(folio)) wbc->nr_to_write -= folio_nr_pages(folio); bio_add_folio_nofail(&bbio->bio, folio, range_len, offset_in_folio(folio, range_start)); wbc_account_cgroup_owner(wbc, folio, range_len); folio_unlock(folio); } btrfs_submit_bbio(bbio, 0); } /* * Wait for all eb writeback in the given range to finish. * * @fs_info: The fs_info for this file system. * @start: The offset of the range to start waiting on writeback. * @end: The end of the range, inclusive. This is meant to be used in * conjuction with wait_marked_extents, so this will usually be * the_next_eb->start - 1. */ void btrfs_btree_wait_writeback_range(struct btrfs_fs_info *fs_info, u64 start, u64 end) { struct eb_batch batch; unsigned long start_index = (start >> fs_info->nodesize_bits); unsigned long end_index = (end >> fs_info->nodesize_bits); eb_batch_init(&batch); while (start_index <= end_index) { struct extent_buffer *eb; unsigned int nr_ebs; nr_ebs = buffer_tree_get_ebs_tag(fs_info, &start_index, end_index, PAGECACHE_TAG_WRITEBACK, &batch); if (!nr_ebs) break; while ((eb = eb_batch_next(&batch)) != NULL) wait_on_extent_buffer_writeback(eb); eb_batch_release(&batch); cond_resched(); } } int btree_write_cache_pages(struct address_space *mapping, struct writeback_control *wbc) { struct btrfs_eb_write_context ctx = { .wbc = wbc }; struct btrfs_fs_info *fs_info = inode_to_fs_info(mapping->host); int ret = 0; int done = 0; int nr_to_write_done = 0; struct eb_batch batch; unsigned int nr_ebs; unsigned long index; unsigned long end; int scanned = 0; xa_mark_t tag; eb_batch_init(&batch); if (wbc->range_cyclic) { index = ((mapping->writeback_index << PAGE_SHIFT) >> fs_info->nodesize_bits); end = -1; /* * Start from the beginning does not need to cycle over the * range, mark it as scanned. */ scanned = (index == 0); } else { index = (wbc->range_start >> fs_info->nodesize_bits); end = (wbc->range_end >> fs_info->nodesize_bits); scanned = 1; } if (wbc->sync_mode == WB_SYNC_ALL) tag = PAGECACHE_TAG_TOWRITE; else tag = PAGECACHE_TAG_DIRTY; btrfs_zoned_meta_io_lock(fs_info); retry: if (wbc->sync_mode == WB_SYNC_ALL) buffer_tree_tag_for_writeback(fs_info, index, end); while (!done && !nr_to_write_done && (index <= end) && (nr_ebs = buffer_tree_get_ebs_tag(fs_info, &index, end, tag, &batch))) { struct extent_buffer *eb; while ((eb = eb_batch_next(&batch)) != NULL) { ctx.eb = eb; ret = btrfs_check_meta_write_pointer(eb->fs_info, &ctx); if (ret) { if (ret == -EBUSY) ret = 0; if (ret) { done = 1; break; } continue; } if (!lock_extent_buffer_for_io(eb, wbc)) continue; /* Implies write in zoned mode. */ if (ctx.zoned_bg) { /* Mark the last eb in the block group. */ btrfs_schedule_zone_finish_bg(ctx.zoned_bg, eb); ctx.zoned_bg->meta_write_pointer += eb->len; } write_one_eb(eb, wbc); } nr_to_write_done = (wbc->nr_to_write <= 0); eb_batch_release(&batch); cond_resched(); } if (!scanned && !done) { /* * We hit the last page and there is more work to be done: wrap * back to the start of the file */ scanned = 1; index = 0; goto retry; } /* * If something went wrong, don't allow any metadata write bio to be * submitted. * * This would prevent use-after-free if we had dirty pages not * cleaned up, which can still happen by fuzzed images. * * - Bad extent tree * Allowing existing tree block to be allocated for other trees. * * - Log tree operations * Exiting tree blocks get allocated to log tree, bumps its * generation, then get cleaned in tree re-balance. * Such tree block will not be written back, since it's clean, * thus no WRITTEN flag set. * And after log writes back, this tree block is not traced by * any dirty extent_io_tree. * * - Offending tree block gets re-dirtied from its original owner * Since it has bumped generation, no WRITTEN flag, it can be * reused without COWing. This tree block will not be traced * by btrfs_transaction::dirty_pages. * * Now such dirty tree block will not be cleaned by any dirty * extent io tree. Thus we don't want to submit such wild eb * if the fs already has error. * * We can get ret > 0 from submit_extent_folio() indicating how many ebs * were submitted. Reset it to 0 to avoid false alerts for the caller. */ if (ret > 0) ret = 0; if (!ret && BTRFS_FS_ERROR(fs_info)) ret = -EROFS; if (ctx.zoned_bg) btrfs_put_block_group(ctx.zoned_bg); btrfs_zoned_meta_io_unlock(fs_info); return ret; } /* * Walk the list of dirty pages of the given address space and write all of them. * * @mapping: address space structure to write * @wbc: subtract the number of written pages from *@wbc->nr_to_write * @bio_ctrl: holds context for the write, namely the bio * * If a page is already under I/O, write_cache_pages() skips it, even * if it's dirty. This is desirable behaviour for memory-cleaning writeback, * but it is INCORRECT for data-integrity system calls such as fsync(). fsync() * and msync() need to guarantee that all the data which was dirty at the time * the call was made get new I/O started against them. If wbc->sync_mode is * WB_SYNC_ALL then we were called for data integrity and we must wait for * existing IO to complete. */ static int extent_write_cache_pages(struct address_space *mapping, struct btrfs_bio_ctrl *bio_ctrl) { struct writeback_control *wbc = bio_ctrl->wbc; struct inode *inode = mapping->host; int ret = 0; int done = 0; int nr_to_write_done = 0; struct folio_batch fbatch; unsigned int nr_folios; pgoff_t index; pgoff_t end; /* Inclusive */ pgoff_t done_index; int range_whole = 0; int scanned = 0; xa_mark_t tag; /* * We have to hold onto the inode so that ordered extents can do their * work when the IO finishes. The alternative to this is failing to add * an ordered extent if the igrab() fails there and that is a huge pain * to deal with, so instead just hold onto the inode throughout the * writepages operation. If it fails here we are freeing up the inode * anyway and we'd rather not waste our time writing out stuff that is * going to be truncated anyway. */ if (!igrab(inode)) return 0; folio_batch_init(&fbatch); if (wbc->range_cyclic) { index = mapping->writeback_index; /* Start from prev offset */ end = -1; /* * Start from the beginning does not need to cycle over the * range, mark it as scanned. */ scanned = (index == 0); } else { index = wbc->range_start >> PAGE_SHIFT; end = wbc->range_end >> PAGE_SHIFT; if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) range_whole = 1; scanned = 1; } /* * We do the tagged writepage as long as the snapshot flush bit is set * and we are the first one who do the filemap_flush() on this inode. * * The nr_to_write == LONG_MAX is needed to make sure other flushers do * not race in and drop the bit. */ if (range_whole && wbc->nr_to_write == LONG_MAX && test_and_clear_bit(BTRFS_INODE_SNAPSHOT_FLUSH, &BTRFS_I(inode)->runtime_flags)) wbc->tagged_writepages = 1; if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages) tag = PAGECACHE_TAG_TOWRITE; else tag = PAGECACHE_TAG_DIRTY; retry: if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages) tag_pages_for_writeback(mapping, index, end); done_index = index; while (!done && !nr_to_write_done && (index <= end) && (nr_folios = filemap_get_folios_tag(mapping, &index, end, tag, &fbatch))) { unsigned i; for (i = 0; i < nr_folios; i++) { struct folio *folio = fbatch.folios[i]; done_index = folio_next_index(folio); /* * At this point we hold neither the i_pages lock nor * the folio lock: the folio may be truncated or * invalidated (changing folio->mapping to NULL). */ if (!folio_trylock(folio)) { submit_write_bio(bio_ctrl, 0); folio_lock(folio); } if (unlikely(folio->mapping != mapping)) { folio_unlock(folio); continue; } if (!folio_test_dirty(folio)) { /* Someone wrote it for us. */ folio_unlock(folio); continue; } /* * For subpage case, compression can lead to mixed * writeback and dirty flags, e.g: * 0 32K 64K 96K 128K * | |//////||/////| |//| * * In above case, [32K, 96K) is asynchronously submitted * for compression, and [124K, 128K) needs to be written back. * * If we didn't wait wrtiteback for page 64K, [128K, 128K) * won't be submitted as the page still has writeback flag * and will be skipped in the next check. * * This mixed writeback and dirty case is only possible for * subpage case. * * TODO: Remove this check after migrating compression to * regular submission. */ if (wbc->sync_mode != WB_SYNC_NONE || btrfs_is_subpage(inode_to_fs_info(inode), folio)) { if (folio_test_writeback(folio)) submit_write_bio(bio_ctrl, 0); folio_wait_writeback(folio); } if (folio_test_writeback(folio) || !folio_clear_dirty_for_io(folio)) { folio_unlock(folio); continue; } ret = extent_writepage(folio, bio_ctrl); if (ret < 0) { done = 1; break; } /* * The filesystem may choose to bump up nr_to_write. * We have to make sure to honor the new nr_to_write * at any time. */ nr_to_write_done = (wbc->sync_mode == WB_SYNC_NONE && wbc->nr_to_write <= 0); } folio_batch_release(&fbatch); cond_resched(); } if (!scanned && !done) { /* * We hit the last page and there is more work to be done: wrap * back to the start of the file */ scanned = 1; index = 0; /* * If we're looping we could run into a page that is locked by a * writer and that writer could be waiting on writeback for a * page in our current bio, and thus deadlock, so flush the * write bio here. */ submit_write_bio(bio_ctrl, 0); goto retry; } if (wbc->range_cyclic || (wbc->nr_to_write > 0 && range_whole)) mapping->writeback_index = done_index; btrfs_add_delayed_iput(BTRFS_I(inode)); return ret; } /* * Submit the pages in the range to bio for call sites which delalloc range has * already been ran (aka, ordered extent inserted) and all pages are still * locked. */ void extent_write_locked_range(struct inode *inode, const struct folio *locked_folio, u64 start, u64 end, struct writeback_control *wbc, bool pages_dirty) { bool found_error = false; int ret = 0; struct address_space *mapping = inode->i_mapping; struct btrfs_fs_info *fs_info = inode_to_fs_info(inode); const u32 sectorsize = fs_info->sectorsize; loff_t i_size = i_size_read(inode); u64 cur = start; struct btrfs_bio_ctrl bio_ctrl = { .wbc = wbc, .opf = REQ_OP_WRITE | wbc_to_write_flags(wbc), }; if (wbc->no_cgroup_owner) bio_ctrl.opf |= REQ_BTRFS_CGROUP_PUNT; ASSERT(IS_ALIGNED(start, sectorsize) && IS_ALIGNED(end + 1, sectorsize)); while (cur <= end) { u64 cur_end; u32 cur_len; struct folio *folio; folio = filemap_get_folio(mapping, cur >> PAGE_SHIFT); /* * This shouldn't happen, the pages are pinned and locked, this * code is just in case, but shouldn't actually be run. */ if (IS_ERR(folio)) { cur_end = min(round_down(cur, PAGE_SIZE) + PAGE_SIZE - 1, end); cur_len = cur_end + 1 - cur; btrfs_mark_ordered_io_finished(BTRFS_I(inode), NULL, cur, cur_len, false); mapping_set_error(mapping, PTR_ERR(folio)); cur = cur_end; continue; } cur_end = min_t(u64, folio_end(folio) - 1, end); cur_len = cur_end + 1 - cur; ASSERT(folio_test_locked(folio)); if (pages_dirty && folio != locked_folio) ASSERT(folio_test_dirty(folio)); /* * Set the submission bitmap to submit all sectors. * extent_writepage_io() will do the truncation correctly. */ bio_ctrl.submit_bitmap = (unsigned long)-1; ret = extent_writepage_io(BTRFS_I(inode), folio, cur, cur_len, &bio_ctrl, i_size); if (ret == 1) goto next_page; if (ret) mapping_set_error(mapping, ret); btrfs_folio_end_lock(fs_info, folio, cur, cur_len); if (ret < 0) found_error = true; next_page: folio_put(folio); cur = cur_end + 1; } submit_write_bio(&bio_ctrl, found_error ? ret : 0); } int btrfs_writepages(struct address_space *mapping, struct writeback_control *wbc) { struct inode *inode = mapping->host; int ret = 0; struct btrfs_bio_ctrl bio_ctrl = { .wbc = wbc, .opf = REQ_OP_WRITE | wbc_to_write_flags(wbc), }; /* * Allow only a single thread to do the reloc work in zoned mode to * protect the write pointer updates. */ btrfs_zoned_data_reloc_lock(BTRFS_I(inode)); ret = extent_write_cache_pages(mapping, &bio_ctrl); submit_write_bio(&bio_ctrl, ret); btrfs_zoned_data_reloc_unlock(BTRFS_I(inode)); return ret; } void btrfs_readahead(struct readahead_control *rac) { struct btrfs_bio_ctrl bio_ctrl = { .opf = REQ_OP_READ | REQ_RAHEAD, .ractl = rac }; struct folio *folio; struct btrfs_inode *inode = BTRFS_I(rac->mapping->host); const u64 start = readahead_pos(rac); const u64 end = start + readahead_length(rac) - 1; struct extent_state *cached_state = NULL; struct extent_map *em_cached = NULL; u64 prev_em_start = (u64)-1; lock_extents_for_read(inode, start, end, &cached_state); while ((folio = readahead_folio(rac)) != NULL) btrfs_do_readpage(folio, &em_cached, &bio_ctrl, &prev_em_start); btrfs_unlock_extent(&inode->io_tree, start, end, &cached_state); if (em_cached) btrfs_free_extent_map(em_cached); submit_one_bio(&bio_ctrl); } /* * basic invalidate_folio code, this waits on any locked or writeback * ranges corresponding to the folio, and then deletes any extent state * records from the tree */ int extent_invalidate_folio(struct extent_io_tree *tree, struct folio *folio, size_t offset) { struct extent_state *cached_state = NULL; u64 start = folio_pos(folio); u64 end = start + folio_size(folio) - 1; size_t blocksize = folio_to_fs_info(folio)->sectorsize; /* This function is only called for the btree inode */ ASSERT(tree->owner == IO_TREE_BTREE_INODE_IO); start += ALIGN(offset, blocksize); if (start > end) return 0; btrfs_lock_extent(tree, start, end, &cached_state); folio_wait_writeback(folio); /* * Currently for btree io tree, only EXTENT_LOCKED is utilized, * so here we only need to unlock the extent range to free any * existing extent state. */ btrfs_unlock_extent(tree, start, end, &cached_state); return 0; } /* * A helper for struct address_space_operations::release_folio, this tests for * areas of the folio that are locked or under IO and drops the related state * bits if it is safe to drop the folio. */ static bool try_release_extent_state(struct extent_io_tree *tree, struct folio *folio) { struct extent_state *cached_state = NULL; u64 start = folio_pos(folio); u64 end = start + folio_size(folio) - 1; u32 range_bits; u32 clear_bits; bool ret = false; int ret2; btrfs_get_range_bits(tree, start, end, &range_bits, &cached_state); /* * We can release the folio if it's locked only for ordered extent * completion, since that doesn't require using the folio. */ if ((range_bits & EXTENT_LOCKED) && !(range_bits & EXTENT_FINISHING_ORDERED)) goto out; clear_bits = ~(EXTENT_LOCKED | EXTENT_NODATASUM | EXTENT_DELALLOC_NEW | EXTENT_CTLBITS | EXTENT_QGROUP_RESERVED | EXTENT_FINISHING_ORDERED); /* * At this point we can safely clear everything except the locked, * nodatasum, delalloc new and finishing ordered bits. The delalloc new * bit will be cleared by ordered extent completion. */ ret2 = btrfs_clear_extent_bit(tree, start, end, clear_bits, &cached_state); /* * If clear_extent_bit failed for enomem reasons, we can't allow the * release to continue. */ if (ret2 == 0) ret = true; out: btrfs_free_extent_state(cached_state); return ret; } /* * a helper for release_folio. As long as there are no locked extents * in the range corresponding to the page, both state records and extent * map records are removed */ bool try_release_extent_mapping(struct folio *folio, gfp_t mask) { u64 start = folio_pos(folio); u64 end = start + folio_size(folio) - 1; struct btrfs_inode *inode = folio_to_inode(folio); struct extent_io_tree *io_tree = &inode->io_tree; while (start <= end) { const u64 cur_gen = btrfs_get_fs_generation(inode->root->fs_info); const u64 len = end - start + 1; struct extent_map_tree *extent_tree = &inode->extent_tree; struct extent_map *em; write_lock(&extent_tree->lock); em = btrfs_lookup_extent_mapping(extent_tree, start, len); if (!em) { write_unlock(&extent_tree->lock); break; } if ((em->flags & EXTENT_FLAG_PINNED) || em->start != start) { write_unlock(&extent_tree->lock); btrfs_free_extent_map(em); break; } if (btrfs_test_range_bit_exists(io_tree, em->start, btrfs_extent_map_end(em) - 1, EXTENT_LOCKED)) goto next; /* * If it's not in the list of modified extents, used by a fast * fsync, we can remove it. If it's being logged we can safely * remove it since fsync took an extra reference on the em. */ if (list_empty(&em->list) || (em->flags & EXTENT_FLAG_LOGGING)) goto remove_em; /* * If it's in the list of modified extents, remove it only if * its generation is older then the current one, in which case * we don't need it for a fast fsync. Otherwise don't remove it, * we could be racing with an ongoing fast fsync that could miss * the new extent. */ if (em->generation >= cur_gen) goto next; remove_em: /* * We only remove extent maps that are not in the list of * modified extents or that are in the list but with a * generation lower then the current generation, so there is no * need to set the full fsync flag on the inode (it hurts the * fsync performance for workloads with a data size that exceeds * or is close to the system's memory). */ btrfs_remove_extent_mapping(inode, em); /* Once for the inode's extent map tree. */ btrfs_free_extent_map(em); next: start = btrfs_extent_map_end(em); write_unlock(&extent_tree->lock); /* Once for us, for the lookup_extent_mapping() reference. */ btrfs_free_extent_map(em); if (need_resched()) { /* * If we need to resched but we can't block just exit * and leave any remaining extent maps. */ if (!gfpflags_allow_blocking(mask)) break; cond_resched(); } } return try_release_extent_state(io_tree, folio); } static int extent_buffer_under_io(const struct extent_buffer *eb) { return (test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags) || test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)); } static bool folio_range_has_eb(struct folio *folio) { struct btrfs_folio_state *bfs; lockdep_assert_held(&folio->mapping->i_private_lock); if (folio_test_private(folio)) { bfs = folio_get_private(folio); if (atomic_read(&bfs->eb_refs)) return true; } return false; } static void detach_extent_buffer_folio(const struct extent_buffer *eb, struct folio *folio) { struct btrfs_fs_info *fs_info = eb->fs_info; struct address_space *mapping = folio->mapping; const bool mapped = !test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags); /* * For mapped eb, we're going to change the folio private, which should * be done under the i_private_lock. */ if (mapped) spin_lock(&mapping->i_private_lock); if (!folio_test_private(folio)) { if (mapped) spin_unlock(&mapping->i_private_lock); return; } if (!btrfs_meta_is_subpage(fs_info)) { /* * We do this since we'll remove the pages after we've removed * the eb from the xarray, so we could race and have this page * now attached to the new eb. So only clear folio if it's * still connected to this eb. */ if (folio_test_private(folio) && folio_get_private(folio) == eb) { BUG_ON(test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)); BUG_ON(folio_test_dirty(folio)); BUG_ON(folio_test_writeback(folio)); /* We need to make sure we haven't be attached to a new eb. */ folio_detach_private(folio); } if (mapped) spin_unlock(&mapping->i_private_lock); return; } /* * For subpage, we can have dummy eb with folio private attached. In * this case, we can directly detach the private as such folio is only * attached to one dummy eb, no sharing. */ if (!mapped) { btrfs_detach_folio_state(fs_info, folio, BTRFS_SUBPAGE_METADATA); return; } btrfs_folio_dec_eb_refs(fs_info, folio); /* * We can only detach the folio private if there are no other ebs in the * page range and no unfinished IO. */ if (!folio_range_has_eb(folio)) btrfs_detach_folio_state(fs_info, folio, BTRFS_SUBPAGE_METADATA); spin_unlock(&mapping->i_private_lock); } /* Release all folios attached to the extent buffer */ static void btrfs_release_extent_buffer_folios(const struct extent_buffer *eb) { ASSERT(!extent_buffer_under_io(eb)); for (int i = 0; i < INLINE_EXTENT_BUFFER_PAGES; i++) { struct folio *folio = eb->folios[i]; if (!folio) continue; detach_extent_buffer_folio(eb, folio); } } /* * Helper for releasing the extent buffer. */ static inline void btrfs_release_extent_buffer(struct extent_buffer *eb) { btrfs_release_extent_buffer_folios(eb); btrfs_leak_debug_del_eb(eb); kmem_cache_free(extent_buffer_cache, eb); } static struct extent_buffer *__alloc_extent_buffer(struct btrfs_fs_info *fs_info, u64 start) { struct extent_buffer *eb = NULL; eb = kmem_cache_zalloc(extent_buffer_cache, GFP_NOFS|__GFP_NOFAIL); eb->start = start; eb->len = fs_info->nodesize; eb->fs_info = fs_info; init_rwsem(&eb->lock); btrfs_leak_debug_add_eb(eb); spin_lock_init(&eb->refs_lock); refcount_set(&eb->refs, 1); ASSERT(eb->len <= BTRFS_MAX_METADATA_BLOCKSIZE); return eb; } /* * For use in eb allocation error cleanup paths, as btrfs_release_extent_buffer() * does not call folio_put(), and we need to set the folios to NULL so that * btrfs_release_extent_buffer() will not detach them a second time. */ static void cleanup_extent_buffer_folios(struct extent_buffer *eb) { const int num_folios = num_extent_folios(eb); /* We canont use num_extent_folios() as loop bound as eb->folios changes. */ for (int i = 0; i < num_folios; i++) { ASSERT(eb->folios[i]); detach_extent_buffer_folio(eb, eb->folios[i]); folio_put(eb->folios[i]); eb->folios[i] = NULL; } } struct extent_buffer *btrfs_clone_extent_buffer(const struct extent_buffer *src) { struct extent_buffer *new; int num_folios; int ret; new = __alloc_extent_buffer(src->fs_info, src->start); if (new == NULL) return NULL; /* * Set UNMAPPED before calling btrfs_release_extent_buffer(), as * btrfs_release_extent_buffer() have different behavior for * UNMAPPED subpage extent buffer. */ set_bit(EXTENT_BUFFER_UNMAPPED, &new->bflags); ret = alloc_eb_folio_array(new, false); if (ret) goto release_eb; ASSERT(num_extent_folios(src) == num_extent_folios(new), "%d != %d", num_extent_folios(src), num_extent_folios(new)); /* Explicitly use the cached num_extent value from now on. */ num_folios = num_extent_folios(src); for (int i = 0; i < num_folios; i++) { struct folio *folio = new->folios[i]; ret = attach_extent_buffer_folio(new, folio, NULL); if (ret < 0) goto cleanup_folios; WARN_ON(folio_test_dirty(folio)); } for (int i = 0; i < num_folios; i++) folio_put(new->folios[i]); copy_extent_buffer_full(new, src); set_extent_buffer_uptodate(new); return new; cleanup_folios: cleanup_extent_buffer_folios(new); release_eb: btrfs_release_extent_buffer(new); return NULL; } struct extent_buffer *alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info, u64 start) { struct extent_buffer *eb; int ret; eb = __alloc_extent_buffer(fs_info, start); if (!eb) return NULL; ret = alloc_eb_folio_array(eb, false); if (ret) goto release_eb; for (int i = 0; i < num_extent_folios(eb); i++) { ret = attach_extent_buffer_folio(eb, eb->folios[i], NULL); if (ret < 0) goto cleanup_folios; } for (int i = 0; i < num_extent_folios(eb); i++) folio_put(eb->folios[i]); set_extent_buffer_uptodate(eb); btrfs_set_header_nritems(eb, 0); set_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags); return eb; cleanup_folios: cleanup_extent_buffer_folios(eb); release_eb: btrfs_release_extent_buffer(eb); return NULL; } static void check_buffer_tree_ref(struct extent_buffer *eb) { int refs; /* * The TREE_REF bit is first set when the extent_buffer is added to the * xarray. It is also reset, if unset, when a new reference is created * by find_extent_buffer. * * It is only cleared in two cases: freeing the last non-tree * reference to the extent_buffer when its STALE bit is set or * calling release_folio when the tree reference is the only reference. * * In both cases, care is taken to ensure that the extent_buffer's * pages are not under io. However, release_folio can be concurrently * called with creating new references, which is prone to race * conditions between the calls to check_buffer_tree_ref in those * codepaths and clearing TREE_REF in try_release_extent_buffer. * * The actual lifetime of the extent_buffer in the xarray is adequately * protected by the refcount, but the TREE_REF bit and its corresponding * reference are not. To protect against this class of races, we call * check_buffer_tree_ref() from the code paths which trigger io. Note that * once io is initiated, TREE_REF can no longer be cleared, so that is * the moment at which any such race is best fixed. */ refs = refcount_read(&eb->refs); if (refs >= 2 && test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) return; spin_lock(&eb->refs_lock); if (!test_and_set_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) refcount_inc(&eb->refs); spin_unlock(&eb->refs_lock); } static void mark_extent_buffer_accessed(struct extent_buffer *eb) { check_buffer_tree_ref(eb); for (int i = 0; i < num_extent_folios(eb); i++) folio_mark_accessed(eb->folios[i]); } struct extent_buffer *find_extent_buffer(struct btrfs_fs_info *fs_info, u64 start) { struct extent_buffer *eb; eb = find_extent_buffer_nolock(fs_info, start); if (!eb) return NULL; /* * Lock our eb's refs_lock to avoid races with free_extent_buffer(). * When we get our eb it might be flagged with EXTENT_BUFFER_STALE and * another task running free_extent_buffer() might have seen that flag * set, eb->refs == 2, that the buffer isn't under IO (dirty and * writeback flags not set) and it's still in the tree (flag * EXTENT_BUFFER_TREE_REF set), therefore being in the process of * decrementing the extent buffer's reference count twice. So here we * could race and increment the eb's reference count, clear its stale * flag, mark it as dirty and drop our reference before the other task * finishes executing free_extent_buffer, which would later result in * an attempt to free an extent buffer that is dirty. */ if (test_bit(EXTENT_BUFFER_STALE, &eb->bflags)) { spin_lock(&eb->refs_lock); spin_unlock(&eb->refs_lock); } mark_extent_buffer_accessed(eb); return eb; } struct extent_buffer *alloc_test_extent_buffer(struct btrfs_fs_info *fs_info, u64 start) { #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS struct extent_buffer *eb, *exists = NULL; int ret; eb = find_extent_buffer(fs_info, start); if (eb) return eb; eb = alloc_dummy_extent_buffer(fs_info, start); if (!eb) return ERR_PTR(-ENOMEM); eb->fs_info = fs_info; again: xa_lock_irq(&fs_info->buffer_tree); exists = __xa_cmpxchg(&fs_info->buffer_tree, start >> fs_info->nodesize_bits, NULL, eb, GFP_NOFS); if (xa_is_err(exists)) { ret = xa_err(exists); xa_unlock_irq(&fs_info->buffer_tree); btrfs_release_extent_buffer(eb); return ERR_PTR(ret); } if (exists) { if (!refcount_inc_not_zero(&exists->refs)) { /* The extent buffer is being freed, retry. */ xa_unlock_irq(&fs_info->buffer_tree); goto again; } xa_unlock_irq(&fs_info->buffer_tree); btrfs_release_extent_buffer(eb); return exists; } xa_unlock_irq(&fs_info->buffer_tree); check_buffer_tree_ref(eb); return eb; #else /* Stub to avoid linker error when compiled with optimizations turned off. */ return NULL; #endif } static struct extent_buffer *grab_extent_buffer(struct btrfs_fs_info *fs_info, struct folio *folio) { struct extent_buffer *exists; lockdep_assert_held(&folio->mapping->i_private_lock); /* * For subpage case, we completely rely on xarray to ensure we don't try * to insert two ebs for the same bytenr. So here we always return NULL * and just continue. */ if (btrfs_meta_is_subpage(fs_info)) return NULL; /* Page not yet attached to an extent buffer */ if (!folio_test_private(folio)) return NULL; /* * We could have already allocated an eb for this folio and attached one * so lets see if we can get a ref on the existing eb, and if we can we * know it's good and we can just return that one, else we know we can * just overwrite folio private. */ exists = folio_get_private(folio); if (refcount_inc_not_zero(&exists->refs)) return exists; WARN_ON(folio_test_dirty(folio)); folio_detach_private(folio); return NULL; } /* * Validate alignment constraints of eb at logical address @start. */ static bool check_eb_alignment(struct btrfs_fs_info *fs_info, u64 start) { if (!IS_ALIGNED(start, fs_info->sectorsize)) { btrfs_err(fs_info, "bad tree block start %llu", start); return true; } if (fs_info->nodesize < PAGE_SIZE && !IS_ALIGNED(start, fs_info->nodesize)) { btrfs_err(fs_info, "tree block is not nodesize aligned, start %llu nodesize %u", start, fs_info->nodesize); return true; } if (fs_info->nodesize >= PAGE_SIZE && !PAGE_ALIGNED(start)) { btrfs_err(fs_info, "tree block is not page aligned, start %llu nodesize %u", start, fs_info->nodesize); return true; } if (!IS_ALIGNED(start, fs_info->nodesize) && !test_and_set_bit(BTRFS_FS_UNALIGNED_TREE_BLOCK, &fs_info->flags)) { btrfs_warn(fs_info, "tree block not nodesize aligned, start %llu nodesize %u, can be resolved by a full metadata balance", start, fs_info->nodesize); } return false; } /* * Return 0 if eb->folios[i] is attached to btree inode successfully. * Return >0 if there is already another extent buffer for the range, * and @found_eb_ret would be updated. * Return -EAGAIN if the filemap has an existing folio but with different size * than @eb. * The caller needs to free the existing folios and retry using the same order. */ static int attach_eb_folio_to_filemap(struct extent_buffer *eb, int i, struct btrfs_folio_state *prealloc, struct extent_buffer **found_eb_ret) { struct btrfs_fs_info *fs_info = eb->fs_info; struct address_space *mapping = fs_info->btree_inode->i_mapping; const pgoff_t index = eb->start >> PAGE_SHIFT; struct folio *existing_folio; int ret; ASSERT(found_eb_ret); /* Caller should ensure the folio exists. */ ASSERT(eb->folios[i]); retry: existing_folio = NULL; ret = filemap_add_folio(mapping, eb->folios[i], index + i, GFP_NOFS | __GFP_NOFAIL); if (!ret) goto finish; existing_folio = filemap_lock_folio(mapping, index + i); /* The page cache only exists for a very short time, just retry. */ if (IS_ERR(existing_folio)) goto retry; /* For now, we should only have single-page folios for btree inode. */ ASSERT(folio_nr_pages(existing_folio) == 1); if (folio_size(existing_folio) != eb->folio_size) { folio_unlock(existing_folio); folio_put(existing_folio); return -EAGAIN; } finish: spin_lock(&mapping->i_private_lock); if (existing_folio && btrfs_meta_is_subpage(fs_info)) { /* We're going to reuse the existing page, can drop our folio now. */ __free_page(folio_page(eb->folios[i], 0)); eb->folios[i] = existing_folio; } else if (existing_folio) { struct extent_buffer *existing_eb; existing_eb = grab_extent_buffer(fs_info, existing_folio); if (existing_eb) { /* The extent buffer still exists, we can use it directly. */ *found_eb_ret = existing_eb; spin_unlock(&mapping->i_private_lock); folio_unlock(existing_folio); folio_put(existing_folio); return 1; } /* The extent buffer no longer exists, we can reuse the folio. */ __free_page(folio_page(eb->folios[i], 0)); eb->folios[i] = existing_folio; } eb->folio_size = folio_size(eb->folios[i]); eb->folio_shift = folio_shift(eb->folios[i]); /* Should not fail, as we have preallocated the memory. */ ret = attach_extent_buffer_folio(eb, eb->folios[i], prealloc); ASSERT(!ret); /* * To inform we have an extra eb under allocation, so that * detach_extent_buffer_page() won't release the folio private when the * eb hasn't been inserted into the xarray yet. * * The ref will be decreased when the eb releases the page, in * detach_extent_buffer_page(). Thus needs no special handling in the * error path. */ btrfs_folio_inc_eb_refs(fs_info, eb->folios[i]); spin_unlock(&mapping->i_private_lock); return 0; } struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info, u64 start, u64 owner_root, int level) { int attached = 0; struct extent_buffer *eb; struct extent_buffer *existing_eb = NULL; struct btrfs_folio_state *prealloc = NULL; u64 lockdep_owner = owner_root; bool page_contig = true; int uptodate = 1; int ret; if (check_eb_alignment(fs_info, start)) return ERR_PTR(-EINVAL); #if BITS_PER_LONG == 32 if (start >= MAX_LFS_FILESIZE) { btrfs_err_rl(fs_info, "extent buffer %llu is beyond 32bit page cache limit", start); btrfs_err_32bit_limit(fs_info); return ERR_PTR(-EOVERFLOW); } if (start >= BTRFS_32BIT_EARLY_WARN_THRESHOLD) btrfs_warn_32bit_limit(fs_info); #endif eb = find_extent_buffer(fs_info, start); if (eb) return eb; eb = __alloc_extent_buffer(fs_info, start); if (!eb) return ERR_PTR(-ENOMEM); /* * The reloc trees are just snapshots, so we need them to appear to be * just like any other fs tree WRT lockdep. */ if (lockdep_owner == BTRFS_TREE_RELOC_OBJECTID) lockdep_owner = BTRFS_FS_TREE_OBJECTID; btrfs_set_buffer_lockdep_class(lockdep_owner, eb, level); /* * Preallocate folio private for subpage case, so that we won't * allocate memory with i_private_lock nor page lock hold. * * The memory will be freed by attach_extent_buffer_page() or freed * manually if we exit earlier. */ if (btrfs_meta_is_subpage(fs_info)) { prealloc = btrfs_alloc_folio_state(fs_info, PAGE_SIZE, BTRFS_SUBPAGE_METADATA); if (IS_ERR(prealloc)) { ret = PTR_ERR(prealloc); goto out; } } reallocate: /* Allocate all pages first. */ ret = alloc_eb_folio_array(eb, true); if (ret < 0) { btrfs_free_folio_state(prealloc); goto out; } /* Attach all pages to the filemap. */ for (int i = 0; i < num_extent_folios(eb); i++) { struct folio *folio; ret = attach_eb_folio_to_filemap(eb, i, prealloc, &existing_eb); if (ret > 0) { ASSERT(existing_eb); goto out; } /* * TODO: Special handling for a corner case where the order of * folios mismatch between the new eb and filemap. * * This happens when: * * - the new eb is using higher order folio * * - the filemap is still using 0-order folios for the range * This can happen at the previous eb allocation, and we don't * have higher order folio for the call. * * - the existing eb has already been freed * * In this case, we have to free the existing folios first, and * re-allocate using the same order. * Thankfully this is not going to happen yet, as we're still * using 0-order folios. */ if (unlikely(ret == -EAGAIN)) { DEBUG_WARN("folio order mismatch between new eb and filemap"); goto reallocate; } attached++; /* * Only after attach_eb_folio_to_filemap(), eb->folios[] is * reliable, as we may choose to reuse the existing page cache * and free the allocated page. */ folio = eb->folios[i]; WARN_ON(btrfs_meta_folio_test_dirty(folio, eb)); /* * Check if the current page is physically contiguous with previous eb * page. * At this stage, either we allocated a large folio, thus @i * would only be 0, or we fall back to per-page allocation. */ if (i && folio_page(eb->folios[i - 1], 0) + 1 != folio_page(folio, 0)) page_contig = false; if (!btrfs_meta_folio_test_uptodate(folio, eb)) uptodate = 0; /* * We can't unlock the pages just yet since the extent buffer * hasn't been properly inserted into the xarray, this opens a * race with btree_release_folio() which can free a page while we * are still filling in all pages for the buffer and we could crash. */ } if (uptodate) set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags); /* All pages are physically contiguous, can skip cross page handling. */ if (page_contig) eb->addr = folio_address(eb->folios[0]) + offset_in_page(eb->start); again: xa_lock_irq(&fs_info->buffer_tree); existing_eb = __xa_cmpxchg(&fs_info->buffer_tree, start >> fs_info->nodesize_bits, NULL, eb, GFP_NOFS); if (xa_is_err(existing_eb)) { ret = xa_err(existing_eb); xa_unlock_irq(&fs_info->buffer_tree); goto out; } if (existing_eb) { if (!refcount_inc_not_zero(&existing_eb->refs)) { xa_unlock_irq(&fs_info->buffer_tree); goto again; } xa_unlock_irq(&fs_info->buffer_tree); goto out; } xa_unlock_irq(&fs_info->buffer_tree); /* add one reference for the tree */ check_buffer_tree_ref(eb); /* * Now it's safe to unlock the pages because any calls to * btree_release_folio will correctly detect that a page belongs to a * live buffer and won't free them prematurely. */ for (int i = 0; i < num_extent_folios(eb); i++) { folio_unlock(eb->folios[i]); /* * A folio that has been added to an address_space mapping * should not continue holding the refcount from its original * allocation indefinitely. */ folio_put(eb->folios[i]); } return eb; out: WARN_ON(!refcount_dec_and_test(&eb->refs)); /* * Any attached folios need to be detached before we unlock them. This * is because when we're inserting our new folios into the mapping, and * then attaching our eb to that folio. If we fail to insert our folio * we'll lookup the folio for that index, and grab that EB. We do not * want that to grab this eb, as we're getting ready to free it. So we * have to detach it first and then unlock it. * * Note: the bounds is num_extent_pages() as we need to go through all slots. */ for (int i = 0; i < num_extent_pages(eb); i++) { struct folio *folio = eb->folios[i]; if (i < attached) { ASSERT(folio); detach_extent_buffer_folio(eb, folio); folio_unlock(folio); } else if (!folio) { continue; } folio_put(folio); eb->folios[i] = NULL; } btrfs_release_extent_buffer(eb); if (ret < 0) return ERR_PTR(ret); ASSERT(existing_eb); return existing_eb; } static inline void btrfs_release_extent_buffer_rcu(struct rcu_head *head) { struct extent_buffer *eb = container_of(head, struct extent_buffer, rcu_head); kmem_cache_free(extent_buffer_cache, eb); } static int release_extent_buffer(struct extent_buffer *eb) __releases(&eb->refs_lock) { lockdep_assert_held(&eb->refs_lock); if (refcount_dec_and_test(&eb->refs)) { struct btrfs_fs_info *fs_info = eb->fs_info; spin_unlock(&eb->refs_lock); /* * We're erasing, theoretically there will be no allocations, so * just use GFP_ATOMIC. * * We use cmpxchg instead of erase because we do not know if * this eb is actually in the tree or not, we could be cleaning * up an eb that we allocated but never inserted into the tree. * Thus use cmpxchg to remove it from the tree if it is there, * or leave the other entry if this isn't in the tree. * * The documentation says that putting a NULL value is the same * as erase as long as XA_FLAGS_ALLOC is not set, which it isn't * in this case. */ xa_cmpxchg_irq(&fs_info->buffer_tree, eb->start >> fs_info->nodesize_bits, eb, NULL, GFP_ATOMIC); btrfs_leak_debug_del_eb(eb); /* Should be safe to release folios at this point. */ btrfs_release_extent_buffer_folios(eb); #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS if (unlikely(test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags))) { kmem_cache_free(extent_buffer_cache, eb); return 1; } #endif call_rcu(&eb->rcu_head, btrfs_release_extent_buffer_rcu); return 1; } spin_unlock(&eb->refs_lock); return 0; } void free_extent_buffer(struct extent_buffer *eb) { int refs; if (!eb) return; refs = refcount_read(&eb->refs); while (1) { if (test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags)) { if (refs == 1) break; } else if (refs <= 3) { break; } /* Optimization to avoid locking eb->refs_lock. */ if (atomic_try_cmpxchg(&eb->refs.refs, &refs, refs - 1)) return; } spin_lock(&eb->refs_lock); if (refcount_read(&eb->refs) == 2 && test_bit(EXTENT_BUFFER_STALE, &eb->bflags) && !extent_buffer_under_io(eb) && test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) refcount_dec(&eb->refs); /* * I know this is terrible, but it's temporary until we stop tracking * the uptodate bits and such for the extent buffers. */ release_extent_buffer(eb); } void free_extent_buffer_stale(struct extent_buffer *eb) { if (!eb) return; spin_lock(&eb->refs_lock); set_bit(EXTENT_BUFFER_STALE, &eb->bflags); if (refcount_read(&eb->refs) == 2 && !extent_buffer_under_io(eb) && test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) refcount_dec(&eb->refs); release_extent_buffer(eb); } static void btree_clear_folio_dirty_tag(struct folio *folio) { ASSERT(!folio_test_dirty(folio)); ASSERT(folio_test_locked(folio)); xa_lock_irq(&folio->mapping->i_pages); if (!folio_test_dirty(folio)) __xa_clear_mark(&folio->mapping->i_pages, folio->index, PAGECACHE_TAG_DIRTY); xa_unlock_irq(&folio->mapping->i_pages); } void btrfs_clear_buffer_dirty(struct btrfs_trans_handle *trans, struct extent_buffer *eb) { struct btrfs_fs_info *fs_info = eb->fs_info; btrfs_assert_tree_write_locked(eb); if (trans && btrfs_header_generation(eb) != trans->transid) return; /* * Instead of clearing the dirty flag off of the buffer, mark it as * EXTENT_BUFFER_ZONED_ZEROOUT. This allows us to preserve * write-ordering in zoned mode, without the need to later re-dirty * the extent_buffer. * * The actual zeroout of the buffer will happen later in * btree_csum_one_bio. */ if (btrfs_is_zoned(fs_info) && test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) { set_bit(EXTENT_BUFFER_ZONED_ZEROOUT, &eb->bflags); return; } if (!test_and_clear_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) return; buffer_tree_clear_mark(eb, PAGECACHE_TAG_DIRTY); percpu_counter_add_batch(&fs_info->dirty_metadata_bytes, -eb->len, fs_info->dirty_metadata_batch); for (int i = 0; i < num_extent_folios(eb); i++) { struct folio *folio = eb->folios[i]; bool last; if (!folio_test_dirty(folio)) continue; folio_lock(folio); last = btrfs_meta_folio_clear_and_test_dirty(folio, eb); if (last) btree_clear_folio_dirty_tag(folio); folio_unlock(folio); } WARN_ON(refcount_read(&eb->refs) == 0); } void set_extent_buffer_dirty(struct extent_buffer *eb) { bool was_dirty; check_buffer_tree_ref(eb); was_dirty = test_and_set_bit(EXTENT_BUFFER_DIRTY, &eb->bflags); WARN_ON(refcount_read(&eb->refs) == 0); WARN_ON(!test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)); WARN_ON(test_bit(EXTENT_BUFFER_ZONED_ZEROOUT, &eb->bflags)); if (!was_dirty) { bool subpage = btrfs_meta_is_subpage(eb->fs_info); /* * For subpage case, we can have other extent buffers in the * same page, and in clear_extent_buffer_dirty() we * have to clear page dirty without subpage lock held. * This can cause race where our page gets dirty cleared after * we just set it. * * Thankfully, clear_extent_buffer_dirty() has locked * its page for other reasons, we can use page lock to prevent * the above race. */ if (subpage) folio_lock(eb->folios[0]); for (int i = 0; i < num_extent_folios(eb); i++) btrfs_meta_folio_set_dirty(eb->folios[i], eb); buffer_tree_set_mark(eb, PAGECACHE_TAG_DIRTY); if (subpage) folio_unlock(eb->folios[0]); percpu_counter_add_batch(&eb->fs_info->dirty_metadata_bytes, eb->len, eb->fs_info->dirty_metadata_batch); } #ifdef CONFIG_BTRFS_DEBUG for (int i = 0; i < num_extent_folios(eb); i++) ASSERT(folio_test_dirty(eb->folios[i])); #endif } void clear_extent_buffer_uptodate(struct extent_buffer *eb) { clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags); for (int i = 0; i < num_extent_folios(eb); i++) { struct folio *folio = eb->folios[i]; if (!folio) continue; btrfs_meta_folio_clear_uptodate(folio, eb); } } void set_extent_buffer_uptodate(struct extent_buffer *eb) { set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags); for (int i = 0; i < num_extent_folios(eb); i++) btrfs_meta_folio_set_uptodate(eb->folios[i], eb); } static void clear_extent_buffer_reading(struct extent_buffer *eb) { clear_and_wake_up_bit(EXTENT_BUFFER_READING, &eb->bflags); } static void end_bbio_meta_read(struct btrfs_bio *bbio) { struct extent_buffer *eb = bbio->private; bool uptodate = !bbio->bio.bi_status; /* * If the extent buffer is marked UPTODATE before the read operation * completes, other calls to read_extent_buffer_pages() will return * early without waiting for the read to finish, causing data races. */ WARN_ON(test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags)); eb->read_mirror = bbio->mirror_num; if (uptodate && btrfs_validate_extent_buffer(eb, &bbio->parent_check) < 0) uptodate = false; if (uptodate) set_extent_buffer_uptodate(eb); else clear_extent_buffer_uptodate(eb); clear_extent_buffer_reading(eb); free_extent_buffer(eb); bio_put(&bbio->bio); } int read_extent_buffer_pages_nowait(struct extent_buffer *eb, int mirror_num, const struct btrfs_tree_parent_check *check) { struct btrfs_bio *bbio; if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags)) return 0; /* * We could have had EXTENT_BUFFER_UPTODATE cleared by the write * operation, which could potentially still be in flight. In this case * we simply want to return an error. */ if (unlikely(test_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags))) return -EIO; /* Someone else is already reading the buffer, just wait for it. */ if (test_and_set_bit(EXTENT_BUFFER_READING, &eb->bflags)) return 0; /* * Between the initial test_bit(EXTENT_BUFFER_UPTODATE) and the above * test_and_set_bit(EXTENT_BUFFER_READING), someone else could have * started and finished reading the same eb. In this case, UPTODATE * will now be set, and we shouldn't read it in again. */ if (unlikely(test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))) { clear_extent_buffer_reading(eb); return 0; } eb->read_mirror = 0; check_buffer_tree_ref(eb); refcount_inc(&eb->refs); bbio = btrfs_bio_alloc(INLINE_EXTENT_BUFFER_PAGES, REQ_OP_READ | REQ_META, eb->fs_info, end_bbio_meta_read, eb); bbio->bio.bi_iter.bi_sector = eb->start >> SECTOR_SHIFT; bbio->inode = BTRFS_I(eb->fs_info->btree_inode); bbio->file_offset = eb->start; memcpy(&bbio->parent_check, check, sizeof(*check)); for (int i = 0; i < num_extent_folios(eb); i++) { struct folio *folio = eb->folios[i]; u64 range_start = max_t(u64, eb->start, folio_pos(folio)); u32 range_len = min_t(u64, folio_end(folio), eb->start + eb->len) - range_start; bio_add_folio_nofail(&bbio->bio, folio, range_len, offset_in_folio(folio, range_start)); } btrfs_submit_bbio(bbio, mirror_num); return 0; } int read_extent_buffer_pages(struct extent_buffer *eb, int mirror_num, const struct btrfs_tree_parent_check *check) { int ret; ret = read_extent_buffer_pages_nowait(eb, mirror_num, check); if (ret < 0) return ret; wait_on_bit_io(&eb->bflags, EXTENT_BUFFER_READING, TASK_UNINTERRUPTIBLE); if (!test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags)) return -EIO; return 0; } static bool report_eb_range(const struct extent_buffer *eb, unsigned long start, unsigned long len) { btrfs_warn(eb->fs_info, "access to eb bytenr %llu len %u out of range start %lu len %lu", eb->start, eb->len, start, len); DEBUG_WARN(); return true; } /* * Check if the [start, start + len) range is valid before reading/writing * the eb. * NOTE: @start and @len are offset inside the eb, not logical address. * * Caller should not touch the dst/src memory if this function returns error. */ static inline int check_eb_range(const struct extent_buffer *eb, unsigned long start, unsigned long len) { unsigned long offset; /* start, start + len should not go beyond eb->len nor overflow */ if (unlikely(check_add_overflow(start, len, &offset) || offset > eb->len)) return report_eb_range(eb, start, len); return false; } void read_extent_buffer(const struct extent_buffer *eb, void *dstv, unsigned long start, unsigned long len) { const int unit_size = eb->folio_size; size_t cur; size_t offset; char *dst = (char *)dstv; unsigned long i = get_eb_folio_index(eb, start); if (check_eb_range(eb, start, len)) { /* * Invalid range hit, reset the memory, so callers won't get * some random garbage for their uninitialized memory. */ memset(dstv, 0, len); return; } if (eb->addr) { memcpy(dstv, eb->addr + start, len); return; } offset = get_eb_offset_in_folio(eb, start); while (len > 0) { char *kaddr; cur = min(len, unit_size - offset); kaddr = folio_address(eb->folios[i]); memcpy(dst, kaddr + offset, cur); dst += cur; len -= cur; offset = 0; i++; } } int read_extent_buffer_to_user_nofault(const struct extent_buffer *eb, void __user *dstv, unsigned long start, unsigned long len) { const int unit_size = eb->folio_size; size_t cur; size_t offset; char __user *dst = (char __user *)dstv; unsigned long i = get_eb_folio_index(eb, start); int ret = 0; WARN_ON(start > eb->len); WARN_ON(start + len > eb->start + eb->len); if (eb->addr) { if (copy_to_user_nofault(dstv, eb->addr + start, len)) ret = -EFAULT; return ret; } offset = get_eb_offset_in_folio(eb, start); while (len > 0) { char *kaddr; cur = min(len, unit_size - offset); kaddr = folio_address(eb->folios[i]); if (copy_to_user_nofault(dst, kaddr + offset, cur)) { ret = -EFAULT; break; } dst += cur; len -= cur; offset = 0; i++; } return ret; } int memcmp_extent_buffer(const struct extent_buffer *eb, const void *ptrv, unsigned long start, unsigned long len) { const int unit_size = eb->folio_size; size_t cur; size_t offset; char *kaddr; char *ptr = (char *)ptrv; unsigned long i = get_eb_folio_index(eb, start); int ret = 0; if (check_eb_range(eb, start, len)) return -EINVAL; if (eb->addr) return memcmp(ptrv, eb->addr + start, len); offset = get_eb_offset_in_folio(eb, start); while (len > 0) { cur = min(len, unit_size - offset); kaddr = folio_address(eb->folios[i]); ret = memcmp(ptr, kaddr + offset, cur); if (ret) break; ptr += cur; len -= cur; offset = 0; i++; } return ret; } /* * Check that the extent buffer is uptodate. * * For regular sector size == PAGE_SIZE case, check if @page is uptodate. * For subpage case, check if the range covered by the eb has EXTENT_UPTODATE. */ static void assert_eb_folio_uptodate(const struct extent_buffer *eb, int i) { struct btrfs_fs_info *fs_info = eb->fs_info; struct folio *folio = eb->folios[i]; ASSERT(folio); /* * If we are using the commit root we could potentially clear a page * Uptodate while we're using the extent buffer that we've previously * looked up. We don't want to complain in this case, as the page was * valid before, we just didn't write it out. Instead we want to catch * the case where we didn't actually read the block properly, which * would have !PageUptodate and !EXTENT_BUFFER_WRITE_ERR. */ if (test_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags)) return; if (btrfs_meta_is_subpage(fs_info)) { folio = eb->folios[0]; ASSERT(i == 0); if (WARN_ON(!btrfs_subpage_test_uptodate(fs_info, folio, eb->start, eb->len))) btrfs_subpage_dump_bitmap(fs_info, folio, eb->start, eb->len); } else { WARN_ON(!folio_test_uptodate(folio)); } } static void __write_extent_buffer(const struct extent_buffer *eb, const void *srcv, unsigned long start, unsigned long len, bool use_memmove) { const int unit_size = eb->folio_size; size_t cur; size_t offset; char *kaddr; const char *src = (const char *)srcv; unsigned long i = get_eb_folio_index(eb, start); /* For unmapped (dummy) ebs, no need to check their uptodate status. */ const bool check_uptodate = !test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags); if (check_eb_range(eb, start, len)) return; if (eb->addr) { if (use_memmove) memmove(eb->addr + start, srcv, len); else memcpy(eb->addr + start, srcv, len); return; } offset = get_eb_offset_in_folio(eb, start); while (len > 0) { if (check_uptodate) assert_eb_folio_uptodate(eb, i); cur = min(len, unit_size - offset); kaddr = folio_address(eb->folios[i]); if (use_memmove) memmove(kaddr + offset, src, cur); else memcpy(kaddr + offset, src, cur); src += cur; len -= cur; offset = 0; i++; } } void write_extent_buffer(const struct extent_buffer *eb, const void *srcv, unsigned long start, unsigned long len) { return __write_extent_buffer(eb, srcv, start, len, false); } static void memset_extent_buffer(const struct extent_buffer *eb, int c, unsigned long start, unsigned long len) { const int unit_size = eb->folio_size; unsigned long cur = start; if (eb->addr) { memset(eb->addr + start, c, len); return; } while (cur < start + len) { unsigned long index = get_eb_folio_index(eb, cur); unsigned int offset = get_eb_offset_in_folio(eb, cur); unsigned int cur_len = min(start + len - cur, unit_size - offset); assert_eb_folio_uptodate(eb, index); memset(folio_address(eb->folios[index]) + offset, c, cur_len); cur += cur_len; } } void memzero_extent_buffer(const struct extent_buffer *eb, unsigned long start, unsigned long len) { if (check_eb_range(eb, start, len)) return; return memset_extent_buffer(eb, 0, start, len); } void copy_extent_buffer_full(const struct extent_buffer *dst, const struct extent_buffer *src) { const int unit_size = src->folio_size; unsigned long cur = 0; ASSERT(dst->len == src->len); while (cur < src->len) { unsigned long index = get_eb_folio_index(src, cur); unsigned long offset = get_eb_offset_in_folio(src, cur); unsigned long cur_len = min(src->len, unit_size - offset); void *addr = folio_address(src->folios[index]) + offset; write_extent_buffer(dst, addr, cur, cur_len); cur += cur_len; } } void copy_extent_buffer(const struct extent_buffer *dst, const struct extent_buffer *src, unsigned long dst_offset, unsigned long src_offset, unsigned long len) { const int unit_size = dst->folio_size; u64 dst_len = dst->len; size_t cur; size_t offset; char *kaddr; unsigned long i = get_eb_folio_index(dst, dst_offset); if (check_eb_range(dst, dst_offset, len) || check_eb_range(src, src_offset, len)) return; WARN_ON(src->len != dst_len); offset = get_eb_offset_in_folio(dst, dst_offset); while (len > 0) { assert_eb_folio_uptodate(dst, i); cur = min(len, (unsigned long)(unit_size - offset)); kaddr = folio_address(dst->folios[i]); read_extent_buffer(src, kaddr + offset, src_offset, cur); src_offset += cur; len -= cur; offset = 0; i++; } } /* * Calculate the folio and offset of the byte containing the given bit number. * * @eb: the extent buffer * @start: offset of the bitmap item in the extent buffer * @nr: bit number * @folio_index: return index of the folio in the extent buffer that contains * the given bit number * @folio_offset: return offset into the folio given by folio_index * * This helper hides the ugliness of finding the byte in an extent buffer which * contains a given bit. */ static inline void eb_bitmap_offset(const struct extent_buffer *eb, unsigned long start, unsigned long nr, unsigned long *folio_index, size_t *folio_offset) { size_t byte_offset = BIT_BYTE(nr); size_t offset; /* * The byte we want is the offset of the extent buffer + the offset of * the bitmap item in the extent buffer + the offset of the byte in the * bitmap item. */ offset = start + offset_in_eb_folio(eb, eb->start) + byte_offset; *folio_index = offset >> eb->folio_shift; *folio_offset = offset_in_eb_folio(eb, offset); } /* * Determine whether a bit in a bitmap item is set. * * @eb: the extent buffer * @start: offset of the bitmap item in the extent buffer * @nr: bit number to test */ bool extent_buffer_test_bit(const struct extent_buffer *eb, unsigned long start, unsigned long nr) { unsigned long i; size_t offset; u8 *kaddr; eb_bitmap_offset(eb, start, nr, &i, &offset); assert_eb_folio_uptodate(eb, i); kaddr = folio_address(eb->folios[i]); return 1U & (kaddr[offset] >> (nr & (BITS_PER_BYTE - 1))); } static u8 *extent_buffer_get_byte(const struct extent_buffer *eb, unsigned long bytenr) { unsigned long index = get_eb_folio_index(eb, bytenr); if (check_eb_range(eb, bytenr, 1)) return NULL; return folio_address(eb->folios[index]) + get_eb_offset_in_folio(eb, bytenr); } /* * Set an area of a bitmap to 1. * * @eb: the extent buffer * @start: offset of the bitmap item in the extent buffer * @pos: bit number of the first bit * @len: number of bits to set */ void extent_buffer_bitmap_set(const struct extent_buffer *eb, unsigned long start, unsigned long pos, unsigned long len) { unsigned int first_byte = start + BIT_BYTE(pos); unsigned int last_byte = start + BIT_BYTE(pos + len - 1); const bool same_byte = (first_byte == last_byte); u8 mask = BITMAP_FIRST_BYTE_MASK(pos); u8 *kaddr; if (same_byte) mask &= BITMAP_LAST_BYTE_MASK(pos + len); /* Handle the first byte. */ kaddr = extent_buffer_get_byte(eb, first_byte); *kaddr |= mask; if (same_byte) return; /* Handle the byte aligned part. */ ASSERT(first_byte + 1 <= last_byte); memset_extent_buffer(eb, 0xff, first_byte + 1, last_byte - first_byte - 1); /* Handle the last byte. */ kaddr = extent_buffer_get_byte(eb, last_byte); *kaddr |= BITMAP_LAST_BYTE_MASK(pos + len); } /* * Clear an area of a bitmap. * * @eb: the extent buffer * @start: offset of the bitmap item in the extent buffer * @pos: bit number of the first bit * @len: number of bits to clear */ void extent_buffer_bitmap_clear(const struct extent_buffer *eb, unsigned long start, unsigned long pos, unsigned long len) { unsigned int first_byte = start + BIT_BYTE(pos); unsigned int last_byte = start + BIT_BYTE(pos + len - 1); const bool same_byte = (first_byte == last_byte); u8 mask = BITMAP_FIRST_BYTE_MASK(pos); u8 *kaddr; if (same_byte) mask &= BITMAP_LAST_BYTE_MASK(pos + len); /* Handle the first byte. */ kaddr = extent_buffer_get_byte(eb, first_byte); *kaddr &= ~mask; if (same_byte) return; /* Handle the byte aligned part. */ ASSERT(first_byte + 1 <= last_byte); memset_extent_buffer(eb, 0, first_byte + 1, last_byte - first_byte - 1); /* Handle the last byte. */ kaddr = extent_buffer_get_byte(eb, last_byte); *kaddr &= ~BITMAP_LAST_BYTE_MASK(pos + len); } static inline bool areas_overlap(unsigned long src, unsigned long dst, unsigned long len) { unsigned long distance = (src > dst) ? src - dst : dst - src; return distance < len; } void memcpy_extent_buffer(const struct extent_buffer *dst, unsigned long dst_offset, unsigned long src_offset, unsigned long len) { const int unit_size = dst->folio_size; unsigned long cur_off = 0; if (check_eb_range(dst, dst_offset, len) || check_eb_range(dst, src_offset, len)) return; if (dst->addr) { const bool use_memmove = areas_overlap(src_offset, dst_offset, len); if (use_memmove) memmove(dst->addr + dst_offset, dst->addr + src_offset, len); else memcpy(dst->addr + dst_offset, dst->addr + src_offset, len); return; } while (cur_off < len) { unsigned long cur_src = cur_off + src_offset; unsigned long folio_index = get_eb_folio_index(dst, cur_src); unsigned long folio_off = get_eb_offset_in_folio(dst, cur_src); unsigned long cur_len = min(src_offset + len - cur_src, unit_size - folio_off); void *src_addr = folio_address(dst->folios[folio_index]) + folio_off; const bool use_memmove = areas_overlap(src_offset + cur_off, dst_offset + cur_off, cur_len); __write_extent_buffer(dst, src_addr, dst_offset + cur_off, cur_len, use_memmove); cur_off += cur_len; } } void memmove_extent_buffer(const struct extent_buffer *dst, unsigned long dst_offset, unsigned long src_offset, unsigned long len) { unsigned long dst_end = dst_offset + len - 1; unsigned long src_end = src_offset + len - 1; if (check_eb_range(dst, dst_offset, len) || check_eb_range(dst, src_offset, len)) return; if (dst_offset < src_offset) { memcpy_extent_buffer(dst, dst_offset, src_offset, len); return; } if (dst->addr) { memmove(dst->addr + dst_offset, dst->addr + src_offset, len); return; } while (len > 0) { unsigned long src_i; size_t cur; size_t dst_off_in_folio; size_t src_off_in_folio; void *src_addr; bool use_memmove; src_i = get_eb_folio_index(dst, src_end); dst_off_in_folio = get_eb_offset_in_folio(dst, dst_end); src_off_in_folio = get_eb_offset_in_folio(dst, src_end); cur = min_t(unsigned long, len, src_off_in_folio + 1); cur = min(cur, dst_off_in_folio + 1); src_addr = folio_address(dst->folios[src_i]) + src_off_in_folio - cur + 1; use_memmove = areas_overlap(src_end - cur + 1, dst_end - cur + 1, cur); __write_extent_buffer(dst, src_addr, dst_end - cur + 1, cur, use_memmove); dst_end -= cur; src_end -= cur; len -= cur; } } static int try_release_subpage_extent_buffer(struct folio *folio) { struct btrfs_fs_info *fs_info = folio_to_fs_info(folio); struct extent_buffer *eb; unsigned long start = (folio_pos(folio) >> fs_info->nodesize_bits); unsigned long index = start; unsigned long end = index + (PAGE_SIZE >> fs_info->nodesize_bits) - 1; int ret; rcu_read_lock(); xa_for_each_range(&fs_info->buffer_tree, index, eb, start, end) { /* * The same as try_release_extent_buffer(), to ensure the eb * won't disappear out from under us. */ spin_lock(&eb->refs_lock); rcu_read_unlock(); if (refcount_read(&eb->refs) != 1 || extent_buffer_under_io(eb)) { spin_unlock(&eb->refs_lock); rcu_read_lock(); continue; } /* * If tree ref isn't set then we know the ref on this eb is a * real ref, so just return, this eb will likely be freed soon * anyway. */ if (!test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) { spin_unlock(&eb->refs_lock); break; } /* * Here we don't care about the return value, we will always * check the folio private at the end. And * release_extent_buffer() will release the refs_lock. */ release_extent_buffer(eb); rcu_read_lock(); } rcu_read_unlock(); /* * Finally to check if we have cleared folio private, as if we have * released all ebs in the page, the folio private should be cleared now. */ spin_lock(&folio->mapping->i_private_lock); if (!folio_test_private(folio)) ret = 1; else ret = 0; spin_unlock(&folio->mapping->i_private_lock); return ret; } int try_release_extent_buffer(struct folio *folio) { struct extent_buffer *eb; if (btrfs_meta_is_subpage(folio_to_fs_info(folio))) return try_release_subpage_extent_buffer(folio); /* * We need to make sure nobody is changing folio private, as we rely on * folio private as the pointer to extent buffer. */ spin_lock(&folio->mapping->i_private_lock); if (!folio_test_private(folio)) { spin_unlock(&folio->mapping->i_private_lock); return 1; } eb = folio_get_private(folio); BUG_ON(!eb); /* * This is a little awful but should be ok, we need to make sure that * the eb doesn't disappear out from under us while we're looking at * this page. */ spin_lock(&eb->refs_lock); if (refcount_read(&eb->refs) != 1 || extent_buffer_under_io(eb)) { spin_unlock(&eb->refs_lock); spin_unlock(&folio->mapping->i_private_lock); return 0; } spin_unlock(&folio->mapping->i_private_lock); /* * If tree ref isn't set then we know the ref on this eb is a real ref, * so just return, this page will likely be freed soon anyway. */ if (!test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) { spin_unlock(&eb->refs_lock); return 0; } return release_extent_buffer(eb); } /* * Attempt to readahead a child block. * * @fs_info: the fs_info * @bytenr: bytenr to read * @owner_root: objectid of the root that owns this eb * @gen: generation for the uptodate check, can be 0 * @level: level for the eb * * Attempt to readahead a tree block at @bytenr. If @gen is 0 then we do a * normal uptodate check of the eb, without checking the generation. If we have * to read the block we will not block on anything. */ void btrfs_readahead_tree_block(struct btrfs_fs_info *fs_info, u64 bytenr, u64 owner_root, u64 gen, int level) { struct btrfs_tree_parent_check check = { .level = level, .transid = gen }; struct extent_buffer *eb; int ret; eb = btrfs_find_create_tree_block(fs_info, bytenr, owner_root, level); if (IS_ERR(eb)) return; if (btrfs_buffer_uptodate(eb, gen, 1)) { free_extent_buffer(eb); return; } ret = read_extent_buffer_pages_nowait(eb, 0, &check); if (ret < 0) free_extent_buffer_stale(eb); else free_extent_buffer(eb); } /* * Readahead a node's child block. * * @node: parent node we're reading from * @slot: slot in the parent node for the child we want to read * * A helper for btrfs_readahead_tree_block, we simply read the bytenr pointed at * the slot in the node provided. */ void btrfs_readahead_node_child(struct extent_buffer *node, int slot) { btrfs_readahead_tree_block(node->fs_info, btrfs_node_blockptr(node, slot), btrfs_header_owner(node), btrfs_node_ptr_generation(node, slot), btrfs_header_level(node) - 1); } |
| 3 3 3 1 1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 | // SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (C) 2011 Intel Corporation. All rights reserved. */ #define pr_fmt(fmt) "llcp: %s: " fmt, __func__ #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/nfc.h> #include <net/nfc/nfc.h> #include "nfc.h" #include "llcp.h" static const u8 llcp_tlv_length[LLCP_TLV_MAX] = { 0, 1, /* VERSION */ 2, /* MIUX */ 2, /* WKS */ 1, /* LTO */ 1, /* RW */ 0, /* SN */ 1, /* OPT */ 0, /* SDREQ */ 2, /* SDRES */ }; static u8 llcp_tlv8(const u8 *tlv, u8 type) { if (tlv[0] != type || tlv[1] != llcp_tlv_length[tlv[0]]) return 0; return tlv[2]; } static u16 llcp_tlv16(const u8 *tlv, u8 type) { if (tlv[0] != type || tlv[1] != llcp_tlv_length[tlv[0]]) return 0; return be16_to_cpu(*((__be16 *)(tlv + 2))); } static u8 llcp_tlv_version(const u8 *tlv) { return llcp_tlv8(tlv, LLCP_TLV_VERSION); } static u16 llcp_tlv_miux(const u8 *tlv) { return llcp_tlv16(tlv, LLCP_TLV_MIUX) & 0x7ff; } static u16 llcp_tlv_wks(const u8 *tlv) { return llcp_tlv16(tlv, LLCP_TLV_WKS); } static u16 llcp_tlv_lto(const u8 *tlv) { return llcp_tlv8(tlv, LLCP_TLV_LTO); } static u8 llcp_tlv_opt(const u8 *tlv) { return llcp_tlv8(tlv, LLCP_TLV_OPT); } static u8 llcp_tlv_rw(const u8 *tlv) { return llcp_tlv8(tlv, LLCP_TLV_RW) & 0xf; } u8 *nfc_llcp_build_tlv(u8 type, const u8 *value, u8 value_length, u8 *tlv_length) { u8 *tlv, length; pr_debug("type %d\n", type); if (type >= LLCP_TLV_MAX) return NULL; length = llcp_tlv_length[type]; if (length == 0 && value_length == 0) return NULL; else if (length == 0) length = value_length; *tlv_length = 2 + length; tlv = kzalloc(2 + length, GFP_KERNEL); if (tlv == NULL) return tlv; tlv[0] = type; tlv[1] = length; memcpy(tlv + 2, value, length); return tlv; } struct nfc_llcp_sdp_tlv *nfc_llcp_build_sdres_tlv(u8 tid, u8 sap) { struct nfc_llcp_sdp_tlv *sdres; u8 value[2]; sdres = kzalloc(sizeof(struct nfc_llcp_sdp_tlv), GFP_KERNEL); if (sdres == NULL) return NULL; value[0] = tid; value[1] = sap; sdres->tlv = nfc_llcp_build_tlv(LLCP_TLV_SDRES, value, 2, &sdres->tlv_len); if (sdres->tlv == NULL) { kfree(sdres); return NULL; } sdres->tid = tid; sdres->sap = sap; INIT_HLIST_NODE(&sdres->node); return sdres; } struct nfc_llcp_sdp_tlv *nfc_llcp_build_sdreq_tlv(u8 tid, const char *uri, size_t uri_len) { struct nfc_llcp_sdp_tlv *sdreq; pr_debug("uri: %s, len: %zu\n", uri, uri_len); /* sdreq->tlv_len is u8, takes uri_len, + 3 for header, + 1 for NULL */ if (WARN_ON_ONCE(uri_len > U8_MAX - 4)) return NULL; sdreq = kzalloc(sizeof(struct nfc_llcp_sdp_tlv), GFP_KERNEL); if (sdreq == NULL) return NULL; sdreq->tlv_len = uri_len + 3; if (uri[uri_len - 1] == 0) sdreq->tlv_len--; sdreq->tlv = kzalloc(sdreq->tlv_len + 1, GFP_KERNEL); if (sdreq->tlv == NULL) { kfree(sdreq); return NULL; } sdreq->tlv[0] = LLCP_TLV_SDREQ; sdreq->tlv[1] = sdreq->tlv_len - 2; sdreq->tlv[2] = tid; sdreq->tid = tid; sdreq->uri = sdreq->tlv + 3; memcpy(sdreq->uri, uri, uri_len); sdreq->time = jiffies; INIT_HLIST_NODE(&sdreq->node); return sdreq; } void nfc_llcp_free_sdp_tlv(struct nfc_llcp_sdp_tlv *sdp) { kfree(sdp->tlv); kfree(sdp); } void nfc_llcp_free_sdp_tlv_list(struct hlist_head *head) { struct nfc_llcp_sdp_tlv *sdp; struct hlist_node *n; hlist_for_each_entry_safe(sdp, n, head, node) { hlist_del(&sdp->node); nfc_llcp_free_sdp_tlv(sdp); } } int nfc_llcp_parse_gb_tlv(struct nfc_llcp_local *local, const u8 *tlv_array, u16 tlv_array_len) { const u8 *tlv = tlv_array; u8 type, length, offset = 0; pr_debug("TLV array length %d\n", tlv_array_len); if (local == NULL) return -ENODEV; while (offset < tlv_array_len) { type = tlv[0]; length = tlv[1]; pr_debug("type 0x%x length %d\n", type, length); switch (type) { case LLCP_TLV_VERSION: local->remote_version = llcp_tlv_version(tlv); break; case LLCP_TLV_MIUX: local->remote_miu = llcp_tlv_miux(tlv) + 128; break; case LLCP_TLV_WKS: local->remote_wks = llcp_tlv_wks(tlv); break; case LLCP_TLV_LTO: local->remote_lto = llcp_tlv_lto(tlv) * 10; break; case LLCP_TLV_OPT: local->remote_opt = llcp_tlv_opt(tlv); break; default: pr_err("Invalid gt tlv value 0x%x\n", type); break; } offset += length + 2; tlv += length + 2; } pr_debug("version 0x%x miu %d lto %d opt 0x%x wks 0x%x\n", local->remote_version, local->remote_miu, local->remote_lto, local->remote_opt, local->remote_wks); return 0; } int nfc_llcp_parse_connection_tlv(struct nfc_llcp_sock *sock, const u8 *tlv_array, u16 tlv_array_len) { const u8 *tlv = tlv_array; u8 type, length, offset = 0; pr_debug("TLV array length %d\n", tlv_array_len); if (sock == NULL) return -ENOTCONN; while (offset < tlv_array_len) { type = tlv[0]; length = tlv[1]; pr_debug("type 0x%x length %d\n", type, length); switch (type) { case LLCP_TLV_MIUX: sock->remote_miu = llcp_tlv_miux(tlv) + 128; break; case LLCP_TLV_RW: sock->remote_rw = llcp_tlv_rw(tlv); break; case LLCP_TLV_SN: break; default: pr_err("Invalid gt tlv value 0x%x\n", type); break; } offset += length + 2; tlv += length + 2; } pr_debug("sock %p rw %d miu %d\n", sock, sock->remote_rw, sock->remote_miu); return 0; } static struct sk_buff *llcp_add_header(struct sk_buff *pdu, u8 dsap, u8 ssap, u8 ptype) { u8 header[2]; pr_debug("ptype 0x%x dsap 0x%x ssap 0x%x\n", ptype, dsap, ssap); header[0] = (u8)((dsap << 2) | (ptype >> 2)); header[1] = (u8)((ptype << 6) | ssap); pr_debug("header 0x%x 0x%x\n", header[0], header[1]); skb_put_data(pdu, header, LLCP_HEADER_SIZE); return pdu; } static struct sk_buff *llcp_add_tlv(struct sk_buff *pdu, const u8 *tlv, u8 tlv_length) { /* XXX Add an skb length check */ if (tlv == NULL) return NULL; skb_put_data(pdu, tlv, tlv_length); return pdu; } static struct sk_buff *llcp_allocate_pdu(struct nfc_llcp_sock *sock, u8 cmd, u16 size) { struct sk_buff *skb; int err; if (sock->ssap == 0) return NULL; skb = nfc_alloc_send_skb(sock->dev, &sock->sk, MSG_DONTWAIT, size + LLCP_HEADER_SIZE, &err); if (skb == NULL) { pr_err("Could not allocate PDU\n"); return NULL; } skb = llcp_add_header(skb, sock->dsap, sock->ssap, cmd); return skb; } int nfc_llcp_send_disconnect(struct nfc_llcp_sock *sock) { struct sk_buff *skb; struct nfc_dev *dev; struct nfc_llcp_local *local; local = sock->local; if (local == NULL) return -ENODEV; dev = sock->dev; if (dev == NULL) return -ENODEV; skb = llcp_allocate_pdu(sock, LLCP_PDU_DISC, 0); if (skb == NULL) return -ENOMEM; skb_queue_tail(&local->tx_queue, skb); return 0; } int nfc_llcp_send_symm(struct nfc_dev *dev) { struct sk_buff *skb; struct nfc_llcp_local *local; u16 size = 0; int err; local = nfc_llcp_find_local(dev); if (local == NULL) return -ENODEV; size += LLCP_HEADER_SIZE; size += dev->tx_headroom + dev->tx_tailroom + NFC_HEADER_SIZE; skb = alloc_skb(size, GFP_KERNEL); if (skb == NULL) { err = -ENOMEM; goto out; } skb_reserve(skb, dev->tx_headroom + NFC_HEADER_SIZE); skb = llcp_add_header(skb, 0, 0, LLCP_PDU_SYMM); __net_timestamp(skb); nfc_llcp_send_to_raw_sock(local, skb, NFC_DIRECTION_TX); err = nfc_data_exchange(dev, local->target_idx, skb, nfc_llcp_recv, local); out: nfc_llcp_local_put(local); return err; } int nfc_llcp_send_connect(struct nfc_llcp_sock *sock) { struct nfc_llcp_local *local; struct sk_buff *skb; const u8 *service_name_tlv = NULL; const u8 *miux_tlv = NULL; const u8 *rw_tlv = NULL; u8 service_name_tlv_length = 0; u8 miux_tlv_length, rw_tlv_length, rw; int err; u16 size = 0; __be16 miux; local = sock->local; if (local == NULL) return -ENODEV; if (sock->service_name != NULL) { service_name_tlv = nfc_llcp_build_tlv(LLCP_TLV_SN, sock->service_name, sock->service_name_len, &service_name_tlv_length); if (!service_name_tlv) { err = -ENOMEM; goto error_tlv; } size += service_name_tlv_length; } /* If the socket parameters are not set, use the local ones */ miux = be16_to_cpu(sock->miux) > LLCP_MAX_MIUX ? local->miux : sock->miux; rw = sock->rw > LLCP_MAX_RW ? local->rw : sock->rw; miux_tlv = nfc_llcp_build_tlv(LLCP_TLV_MIUX, (u8 *)&miux, 0, &miux_tlv_length); if (!miux_tlv) { err = -ENOMEM; goto error_tlv; } size += miux_tlv_length; rw_tlv = nfc_llcp_build_tlv(LLCP_TLV_RW, &rw, 0, &rw_tlv_length); if (!rw_tlv) { err = -ENOMEM; goto error_tlv; } size += rw_tlv_length; pr_debug("SKB size %d SN length %zu\n", size, sock->service_name_len); skb = llcp_allocate_pdu(sock, LLCP_PDU_CONNECT, size); if (skb == NULL) { err = -ENOMEM; goto error_tlv; } llcp_add_tlv(skb, service_name_tlv, service_name_tlv_length); llcp_add_tlv(skb, miux_tlv, miux_tlv_length); llcp_add_tlv(skb, rw_tlv, rw_tlv_length); skb_queue_tail(&local->tx_queue, skb); err = 0; error_tlv: if (err) pr_err("error %d\n", err); kfree(service_name_tlv); kfree(miux_tlv); kfree(rw_tlv); return err; } int nfc_llcp_send_cc(struct nfc_llcp_sock *sock) { struct nfc_llcp_local *local; struct sk_buff *skb; const u8 *miux_tlv = NULL; const u8 *rw_tlv = NULL; u8 miux_tlv_length, rw_tlv_length, rw; int err; u16 size = 0; __be16 miux; local = sock->local; if (local == NULL) return -ENODEV; /* If the socket parameters are not set, use the local ones */ miux = be16_to_cpu(sock->miux) > LLCP_MAX_MIUX ? local->miux : sock->miux; rw = sock->rw > LLCP_MAX_RW ? local->rw : sock->rw; miux_tlv = nfc_llcp_build_tlv(LLCP_TLV_MIUX, (u8 *)&miux, 0, &miux_tlv_length); if (!miux_tlv) { err = -ENOMEM; goto error_tlv; } size += miux_tlv_length; rw_tlv = nfc_llcp_build_tlv(LLCP_TLV_RW, &rw, 0, &rw_tlv_length); if (!rw_tlv) { err = -ENOMEM; goto error_tlv; } size += rw_tlv_length; skb = llcp_allocate_pdu(sock, LLCP_PDU_CC, size); if (skb == NULL) { err = -ENOMEM; goto error_tlv; } llcp_add_tlv(skb, miux_tlv, miux_tlv_length); llcp_add_tlv(skb, rw_tlv, rw_tlv_length); skb_queue_tail(&local->tx_queue, skb); err = 0; error_tlv: if (err) pr_err("error %d\n", err); kfree(miux_tlv); kfree(rw_tlv); return err; } static struct sk_buff *nfc_llcp_allocate_snl(struct nfc_llcp_local *local, size_t tlv_length) { struct sk_buff *skb; struct nfc_dev *dev; u16 size = 0; if (local == NULL) return ERR_PTR(-ENODEV); dev = local->dev; if (dev == NULL) return ERR_PTR(-ENODEV); size += LLCP_HEADER_SIZE; size += dev->tx_headroom + dev->tx_tailroom + NFC_HEADER_SIZE; size += tlv_length; skb = alloc_skb(size, GFP_KERNEL); if (skb == NULL) return ERR_PTR(-ENOMEM); skb_reserve(skb, dev->tx_headroom + NFC_HEADER_SIZE); skb = llcp_add_header(skb, LLCP_SAP_SDP, LLCP_SAP_SDP, LLCP_PDU_SNL); return skb; } int nfc_llcp_send_snl_sdres(struct nfc_llcp_local *local, struct hlist_head *tlv_list, size_t tlvs_len) { struct nfc_llcp_sdp_tlv *sdp; struct hlist_node *n; struct sk_buff *skb; skb = nfc_llcp_allocate_snl(local, tlvs_len); if (IS_ERR(skb)) return PTR_ERR(skb); hlist_for_each_entry_safe(sdp, n, tlv_list, node) { skb_put_data(skb, sdp->tlv, sdp->tlv_len); hlist_del(&sdp->node); nfc_llcp_free_sdp_tlv(sdp); } skb_queue_tail(&local->tx_queue, skb); return 0; } int nfc_llcp_send_snl_sdreq(struct nfc_llcp_local *local, struct hlist_head *tlv_list, size_t tlvs_len) { struct nfc_llcp_sdp_tlv *sdreq; struct hlist_node *n; struct sk_buff *skb; skb = nfc_llcp_allocate_snl(local, tlvs_len); if (IS_ERR(skb)) return PTR_ERR(skb); mutex_lock(&local->sdreq_lock); if (hlist_empty(&local->pending_sdreqs)) mod_timer(&local->sdreq_timer, jiffies + msecs_to_jiffies(3 * local->remote_lto)); hlist_for_each_entry_safe(sdreq, n, tlv_list, node) { pr_debug("tid %d for %s\n", sdreq->tid, sdreq->uri); skb_put_data(skb, sdreq->tlv, sdreq->tlv_len); hlist_del(&sdreq->node); hlist_add_head(&sdreq->node, &local->pending_sdreqs); } mutex_unlock(&local->sdreq_lock); skb_queue_tail(&local->tx_queue, skb); return 0; } int nfc_llcp_send_dm(struct nfc_llcp_local *local, u8 ssap, u8 dsap, u8 reason) { struct sk_buff *skb; struct nfc_dev *dev; u16 size = 1; /* Reason code */ pr_debug("Sending DM reason 0x%x\n", reason); if (local == NULL) return -ENODEV; dev = local->dev; if (dev == NULL) return -ENODEV; size += LLCP_HEADER_SIZE; size += dev->tx_headroom + dev->tx_tailroom + NFC_HEADER_SIZE; skb = alloc_skb(size, GFP_KERNEL); if (skb == NULL) return -ENOMEM; skb_reserve(skb, dev->tx_headroom + NFC_HEADER_SIZE); skb = llcp_add_header(skb, dsap, ssap, LLCP_PDU_DM); skb_put_data(skb, &reason, 1); skb_queue_head(&local->tx_queue, skb); return 0; } int nfc_llcp_send_i_frame(struct nfc_llcp_sock *sock, struct msghdr *msg, size_t len) { struct sk_buff *pdu; struct sock *sk = &sock->sk; struct nfc_llcp_local *local; size_t frag_len = 0, remaining_len; u8 *msg_data, *msg_ptr; u16 remote_miu; pr_debug("Send I frame len %zd\n", len); local = sock->local; if (local == NULL) return -ENODEV; /* Remote is ready but has not acknowledged our frames */ if((sock->remote_ready && skb_queue_len(&sock->tx_pending_queue) >= sock->remote_rw && skb_queue_len(&sock->tx_queue) >= 2 * sock->remote_rw)) { pr_err("Pending queue is full %d frames\n", skb_queue_len(&sock->tx_pending_queue)); return -ENOBUFS; } /* Remote is not ready and we've been queueing enough frames */ if ((!sock->remote_ready && skb_queue_len(&sock->tx_queue) >= 2 * sock->remote_rw)) { pr_err("Tx queue is full %d frames\n", skb_queue_len(&sock->tx_queue)); return -ENOBUFS; } msg_data = kmalloc(len, GFP_USER | __GFP_NOWARN); if (msg_data == NULL) return -ENOMEM; if (memcpy_from_msg(msg_data, msg, len)) { kfree(msg_data); return -EFAULT; } remaining_len = len; msg_ptr = msg_data; do { remote_miu = sock->remote_miu > LLCP_MAX_MIU ? LLCP_DEFAULT_MIU : sock->remote_miu; frag_len = min_t(size_t, remote_miu, remaining_len); pr_debug("Fragment %zd bytes remaining %zd", frag_len, remaining_len); pdu = llcp_allocate_pdu(sock, LLCP_PDU_I, frag_len + LLCP_SEQUENCE_SIZE); if (pdu == NULL) { kfree(msg_data); return -ENOMEM; } skb_put(pdu, LLCP_SEQUENCE_SIZE); if (likely(frag_len > 0)) skb_put_data(pdu, msg_ptr, frag_len); skb_queue_tail(&sock->tx_queue, pdu); lock_sock(sk); nfc_llcp_queue_i_frames(sock); release_sock(sk); remaining_len -= frag_len; msg_ptr += frag_len; } while (remaining_len > 0); kfree(msg_data); return len; } int nfc_llcp_send_ui_frame(struct nfc_llcp_sock *sock, u8 ssap, u8 dsap, struct msghdr *msg, size_t len) { struct sk_buff *pdu; struct nfc_llcp_local *local; size_t frag_len = 0, remaining_len; u8 *msg_ptr, *msg_data; u16 remote_miu; int err; pr_debug("Send UI frame len %zd\n", len); local = sock->local; if (local == NULL) return -ENODEV; msg_data = kmalloc(len, GFP_USER | __GFP_NOWARN); if (msg_data == NULL) return -ENOMEM; if (memcpy_from_msg(msg_data, msg, len)) { kfree(msg_data); return -EFAULT; } remaining_len = len; msg_ptr = msg_data; do { remote_miu = sock->remote_miu > LLCP_MAX_MIU ? local->remote_miu : sock->remote_miu; frag_len = min_t(size_t, remote_miu, remaining_len); pr_debug("Fragment %zd bytes remaining %zd", frag_len, remaining_len); pdu = nfc_alloc_send_skb(sock->dev, &sock->sk, 0, frag_len + LLCP_HEADER_SIZE, &err); if (pdu == NULL) { pr_err("Could not allocate PDU (error=%d)\n", err); len -= remaining_len; if (len == 0) len = err; break; } pdu = llcp_add_header(pdu, dsap, ssap, LLCP_PDU_UI); if (likely(frag_len > 0)) skb_put_data(pdu, msg_ptr, frag_len); /* No need to check for the peer RW for UI frames */ skb_queue_tail(&local->tx_queue, pdu); remaining_len -= frag_len; msg_ptr += frag_len; } while (remaining_len > 0); kfree(msg_data); return len; } int nfc_llcp_send_rr(struct nfc_llcp_sock *sock) { struct sk_buff *skb; struct nfc_llcp_local *local; pr_debug("Send rr nr %d\n", sock->recv_n); local = sock->local; if (local == NULL) return -ENODEV; skb = llcp_allocate_pdu(sock, LLCP_PDU_RR, LLCP_SEQUENCE_SIZE); if (skb == NULL) return -ENOMEM; skb_put(skb, LLCP_SEQUENCE_SIZE); skb->data[2] = sock->recv_n; skb_queue_head(&local->tx_queue, skb); return 0; } |
| 33 33 33 39 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 | // SPDX-License-Identifier: GPL-2.0 #include <crypto/internal/hash.h> #include <linux/init.h> #include <linux/module.h> #include <linux/xxhash.h> #include <linux/unaligned.h> #define XXHASH64_BLOCK_SIZE 32 #define XXHASH64_DIGEST_SIZE 8 struct xxhash64_tfm_ctx { u64 seed; }; struct xxhash64_desc_ctx { struct xxh64_state xxhstate; }; static int xxhash64_setkey(struct crypto_shash *tfm, const u8 *key, unsigned int keylen) { struct xxhash64_tfm_ctx *tctx = crypto_shash_ctx(tfm); if (keylen != sizeof(tctx->seed)) return -EINVAL; tctx->seed = get_unaligned_le64(key); return 0; } static int xxhash64_init(struct shash_desc *desc) { struct xxhash64_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm); struct xxhash64_desc_ctx *dctx = shash_desc_ctx(desc); xxh64_reset(&dctx->xxhstate, tctx->seed); return 0; } static int xxhash64_update(struct shash_desc *desc, const u8 *data, unsigned int length) { struct xxhash64_desc_ctx *dctx = shash_desc_ctx(desc); xxh64_update(&dctx->xxhstate, data, length); return 0; } static int xxhash64_final(struct shash_desc *desc, u8 *out) { struct xxhash64_desc_ctx *dctx = shash_desc_ctx(desc); put_unaligned_le64(xxh64_digest(&dctx->xxhstate), out); return 0; } static int xxhash64_digest(struct shash_desc *desc, const u8 *data, unsigned int length, u8 *out) { struct xxhash64_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm); put_unaligned_le64(xxh64(data, length, tctx->seed), out); return 0; } static struct shash_alg alg = { .digestsize = XXHASH64_DIGEST_SIZE, .setkey = xxhash64_setkey, .init = xxhash64_init, .update = xxhash64_update, .final = xxhash64_final, .digest = xxhash64_digest, .descsize = sizeof(struct xxhash64_desc_ctx), .base = { .cra_name = "xxhash64", .cra_driver_name = "xxhash64-generic", .cra_priority = 100, .cra_flags = CRYPTO_ALG_OPTIONAL_KEY, .cra_blocksize = XXHASH64_BLOCK_SIZE, .cra_ctxsize = sizeof(struct xxhash64_tfm_ctx), .cra_module = THIS_MODULE, } }; static int __init xxhash_mod_init(void) { return crypto_register_shash(&alg); } static void __exit xxhash_mod_fini(void) { crypto_unregister_shash(&alg); } module_init(xxhash_mod_init); module_exit(xxhash_mod_fini); MODULE_AUTHOR("Nikolay Borisov <nborisov@suse.com>"); MODULE_DESCRIPTION("xxhash calculations wrapper for lib/xxhash.c"); MODULE_LICENSE("GPL"); MODULE_ALIAS_CRYPTO("xxhash64"); MODULE_ALIAS_CRYPTO("xxhash64-generic"); |
| 2 2 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 | // SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause /* * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). * You may select, at your option, one of the above-listed licenses. */ /* zstd_ddict.c : * concentrates all logic that needs to know the internals of ZSTD_DDict object */ /*-******************************************************* * Dependencies *********************************************************/ #include "../common/allocations.h" /* ZSTD_customMalloc, ZSTD_customFree */ #include "../common/zstd_deps.h" /* ZSTD_memcpy, ZSTD_memmove, ZSTD_memset */ #include "../common/cpu.h" /* bmi2 */ #include "../common/mem.h" /* low level memory routines */ #define FSE_STATIC_LINKING_ONLY #include "../common/fse.h" #include "../common/huf.h" #include "zstd_decompress_internal.h" #include "zstd_ddict.h" /*-******************************************************* * Types *********************************************************/ struct ZSTD_DDict_s { void* dictBuffer; const void* dictContent; size_t dictSize; ZSTD_entropyDTables_t entropy; U32 dictID; U32 entropyPresent; ZSTD_customMem cMem; }; /* typedef'd to ZSTD_DDict within "zstd.h" */ const void* ZSTD_DDict_dictContent(const ZSTD_DDict* ddict) { assert(ddict != NULL); return ddict->dictContent; } size_t ZSTD_DDict_dictSize(const ZSTD_DDict* ddict) { assert(ddict != NULL); return ddict->dictSize; } void ZSTD_copyDDictParameters(ZSTD_DCtx* dctx, const ZSTD_DDict* ddict) { DEBUGLOG(4, "ZSTD_copyDDictParameters"); assert(dctx != NULL); assert(ddict != NULL); dctx->dictID = ddict->dictID; dctx->prefixStart = ddict->dictContent; dctx->virtualStart = ddict->dictContent; dctx->dictEnd = (const BYTE*)ddict->dictContent + ddict->dictSize; dctx->previousDstEnd = dctx->dictEnd; #ifdef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION dctx->dictContentBeginForFuzzing = dctx->prefixStart; dctx->dictContentEndForFuzzing = dctx->previousDstEnd; #endif if (ddict->entropyPresent) { dctx->litEntropy = 1; dctx->fseEntropy = 1; dctx->LLTptr = ddict->entropy.LLTable; dctx->MLTptr = ddict->entropy.MLTable; dctx->OFTptr = ddict->entropy.OFTable; dctx->HUFptr = ddict->entropy.hufTable; dctx->entropy.rep[0] = ddict->entropy.rep[0]; dctx->entropy.rep[1] = ddict->entropy.rep[1]; dctx->entropy.rep[2] = ddict->entropy.rep[2]; } else { dctx->litEntropy = 0; dctx->fseEntropy = 0; } } static size_t ZSTD_loadEntropy_intoDDict(ZSTD_DDict* ddict, ZSTD_dictContentType_e dictContentType) { ddict->dictID = 0; ddict->entropyPresent = 0; if (dictContentType == ZSTD_dct_rawContent) return 0; if (ddict->dictSize < 8) { if (dictContentType == ZSTD_dct_fullDict) return ERROR(dictionary_corrupted); /* only accept specified dictionaries */ return 0; /* pure content mode */ } { U32 const magic = MEM_readLE32(ddict->dictContent); if (magic != ZSTD_MAGIC_DICTIONARY) { if (dictContentType == ZSTD_dct_fullDict) return ERROR(dictionary_corrupted); /* only accept specified dictionaries */ return 0; /* pure content mode */ } } ddict->dictID = MEM_readLE32((const char*)ddict->dictContent + ZSTD_FRAMEIDSIZE); /* load entropy tables */ RETURN_ERROR_IF(ZSTD_isError(ZSTD_loadDEntropy( &ddict->entropy, ddict->dictContent, ddict->dictSize)), dictionary_corrupted, ""); ddict->entropyPresent = 1; return 0; } static size_t ZSTD_initDDict_internal(ZSTD_DDict* ddict, const void* dict, size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType) { if ((dictLoadMethod == ZSTD_dlm_byRef) || (!dict) || (!dictSize)) { ddict->dictBuffer = NULL; ddict->dictContent = dict; if (!dict) dictSize = 0; } else { void* const internalBuffer = ZSTD_customMalloc(dictSize, ddict->cMem); ddict->dictBuffer = internalBuffer; ddict->dictContent = internalBuffer; if (!internalBuffer) return ERROR(memory_allocation); ZSTD_memcpy(internalBuffer, dict, dictSize); } ddict->dictSize = dictSize; ddict->entropy.hufTable[0] = (HUF_DTable)((ZSTD_HUFFDTABLE_CAPACITY_LOG)*0x1000001); /* cover both little and big endian */ /* parse dictionary content */ FORWARD_IF_ERROR( ZSTD_loadEntropy_intoDDict(ddict, dictContentType) , ""); return 0; } ZSTD_DDict* ZSTD_createDDict_advanced(const void* dict, size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType, ZSTD_customMem customMem) { if ((!customMem.customAlloc) ^ (!customMem.customFree)) return NULL; { ZSTD_DDict* const ddict = (ZSTD_DDict*) ZSTD_customMalloc(sizeof(ZSTD_DDict), customMem); if (ddict == NULL) return NULL; ddict->cMem = customMem; { size_t const initResult = ZSTD_initDDict_internal(ddict, dict, dictSize, dictLoadMethod, dictContentType); if (ZSTD_isError(initResult)) { ZSTD_freeDDict(ddict); return NULL; } } return ddict; } } /*! ZSTD_createDDict() : * Create a digested dictionary, to start decompression without startup delay. * `dict` content is copied inside DDict. * Consequently, `dict` can be released after `ZSTD_DDict` creation */ ZSTD_DDict* ZSTD_createDDict(const void* dict, size_t dictSize) { ZSTD_customMem const allocator = { NULL, NULL, NULL }; return ZSTD_createDDict_advanced(dict, dictSize, ZSTD_dlm_byCopy, ZSTD_dct_auto, allocator); } /*! ZSTD_createDDict_byReference() : * Create a digested dictionary, to start decompression without startup delay. * Dictionary content is simply referenced, it will be accessed during decompression. * Warning : dictBuffer must outlive DDict (DDict must be freed before dictBuffer) */ ZSTD_DDict* ZSTD_createDDict_byReference(const void* dictBuffer, size_t dictSize) { ZSTD_customMem const allocator = { NULL, NULL, NULL }; return ZSTD_createDDict_advanced(dictBuffer, dictSize, ZSTD_dlm_byRef, ZSTD_dct_auto, allocator); } const ZSTD_DDict* ZSTD_initStaticDDict( void* sBuffer, size_t sBufferSize, const void* dict, size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType) { size_t const neededSpace = sizeof(ZSTD_DDict) + (dictLoadMethod == ZSTD_dlm_byRef ? 0 : dictSize); ZSTD_DDict* const ddict = (ZSTD_DDict*)sBuffer; assert(sBuffer != NULL); assert(dict != NULL); if ((size_t)sBuffer & 7) return NULL; /* 8-aligned */ if (sBufferSize < neededSpace) return NULL; if (dictLoadMethod == ZSTD_dlm_byCopy) { ZSTD_memcpy(ddict+1, dict, dictSize); /* local copy */ dict = ddict+1; } if (ZSTD_isError( ZSTD_initDDict_internal(ddict, dict, dictSize, ZSTD_dlm_byRef, dictContentType) )) return NULL; return ddict; } size_t ZSTD_freeDDict(ZSTD_DDict* ddict) { if (ddict==NULL) return 0; /* support free on NULL */ { ZSTD_customMem const cMem = ddict->cMem; ZSTD_customFree(ddict->dictBuffer, cMem); ZSTD_customFree(ddict, cMem); return 0; } } /*! ZSTD_estimateDDictSize() : * Estimate amount of memory that will be needed to create a dictionary for decompression. * Note : dictionary created by reference using ZSTD_dlm_byRef are smaller */ size_t ZSTD_estimateDDictSize(size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod) { return sizeof(ZSTD_DDict) + (dictLoadMethod == ZSTD_dlm_byRef ? 0 : dictSize); } size_t ZSTD_sizeof_DDict(const ZSTD_DDict* ddict) { if (ddict==NULL) return 0; /* support sizeof on NULL */ return sizeof(*ddict) + (ddict->dictBuffer ? ddict->dictSize : 0) ; } /*! ZSTD_getDictID_fromDDict() : * Provides the dictID of the dictionary loaded into `ddict`. * If @return == 0, the dictionary is not conformant to Zstandard specification, or empty. * Non-conformant dictionaries can still be loaded, but as content-only dictionaries. */ unsigned ZSTD_getDictID_fromDDict(const ZSTD_DDict* ddict) { if (ddict==NULL) return 0; return ddict->dictID; } |
| 11 11 1 2 1 1 1 11 11 11 10 11 11 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 | // SPDX-License-Identifier: GPL-2.0 /* * HugeTLB Vmemmap Optimization (HVO) * * Copyright (c) 2020, ByteDance. All rights reserved. * * Author: Muchun Song <songmuchun@bytedance.com> * * See Documentation/mm/vmemmap_dedup.rst */ #define pr_fmt(fmt) "HugeTLB: " fmt #include <linux/pgtable.h> #include <linux/moduleparam.h> #include <linux/bootmem_info.h> #include <linux/mmdebug.h> #include <linux/pagewalk.h> #include <asm/pgalloc.h> #include <asm/tlbflush.h> #include "hugetlb_vmemmap.h" /** * struct vmemmap_remap_walk - walk vmemmap page table * * @remap_pte: called for each lowest-level entry (PTE). * @nr_walked: the number of walked pte. * @reuse_page: the page which is reused for the tail vmemmap pages. * @reuse_addr: the virtual address of the @reuse_page page. * @vmemmap_pages: the list head of the vmemmap pages that can be freed * or is mapped from. * @flags: used to modify behavior in vmemmap page table walking * operations. */ struct vmemmap_remap_walk { void (*remap_pte)(pte_t *pte, unsigned long addr, struct vmemmap_remap_walk *walk); unsigned long nr_walked; struct page *reuse_page; unsigned long reuse_addr; struct list_head *vmemmap_pages; /* Skip the TLB flush when we split the PMD */ #define VMEMMAP_SPLIT_NO_TLB_FLUSH BIT(0) /* Skip the TLB flush when we remap the PTE */ #define VMEMMAP_REMAP_NO_TLB_FLUSH BIT(1) /* synchronize_rcu() to avoid writes from page_ref_add_unless() */ #define VMEMMAP_SYNCHRONIZE_RCU BIT(2) unsigned long flags; }; static int vmemmap_split_pmd(pmd_t *pmd, struct page *head, unsigned long start, struct vmemmap_remap_walk *walk) { pmd_t __pmd; int i; unsigned long addr = start; pte_t *pgtable; pgtable = pte_alloc_one_kernel(&init_mm); if (!pgtable) return -ENOMEM; pmd_populate_kernel(&init_mm, &__pmd, pgtable); for (i = 0; i < PTRS_PER_PTE; i++, addr += PAGE_SIZE) { pte_t entry, *pte; pgprot_t pgprot = PAGE_KERNEL; entry = mk_pte(head + i, pgprot); pte = pte_offset_kernel(&__pmd, addr); set_pte_at(&init_mm, addr, pte, entry); } spin_lock(&init_mm.page_table_lock); if (likely(pmd_leaf(*pmd))) { /* * Higher order allocations from buddy allocator must be able to * be treated as indepdenent small pages (as they can be freed * individually). */ if (!PageReserved(head)) split_page(head, get_order(PMD_SIZE)); /* Make pte visible before pmd. See comment in pmd_install(). */ smp_wmb(); pmd_populate_kernel(&init_mm, pmd, pgtable); if (!(walk->flags & VMEMMAP_SPLIT_NO_TLB_FLUSH)) flush_tlb_kernel_range(start, start + PMD_SIZE); } else { pte_free_kernel(&init_mm, pgtable); } spin_unlock(&init_mm.page_table_lock); return 0; } static int vmemmap_pmd_entry(pmd_t *pmd, unsigned long addr, unsigned long next, struct mm_walk *walk) { int ret = 0; struct page *head; struct vmemmap_remap_walk *vmemmap_walk = walk->private; /* Only splitting, not remapping the vmemmap pages. */ if (!vmemmap_walk->remap_pte) walk->action = ACTION_CONTINUE; spin_lock(&init_mm.page_table_lock); head = pmd_leaf(*pmd) ? pmd_page(*pmd) : NULL; /* * Due to HugeTLB alignment requirements and the vmemmap * pages being at the start of the hotplugged memory * region in memory_hotplug.memmap_on_memory case. Checking * the vmemmap page associated with the first vmemmap page * if it is self-hosted is sufficient. * * [ hotplugged memory ] * [ section ][...][ section ] * [ vmemmap ][ usable memory ] * ^ | ^ | * +--+ | | * +------------------------+ */ if (IS_ENABLED(CONFIG_MEMORY_HOTPLUG) && unlikely(!vmemmap_walk->nr_walked)) { struct page *page = head ? head + pte_index(addr) : pte_page(ptep_get(pte_offset_kernel(pmd, addr))); if (PageVmemmapSelfHosted(page)) ret = -ENOTSUPP; } spin_unlock(&init_mm.page_table_lock); if (!head || ret) return ret; return vmemmap_split_pmd(pmd, head, addr & PMD_MASK, vmemmap_walk); } static int vmemmap_pte_entry(pte_t *pte, unsigned long addr, unsigned long next, struct mm_walk *walk) { struct vmemmap_remap_walk *vmemmap_walk = walk->private; /* * The reuse_page is found 'first' in page table walking before * starting remapping. */ if (!vmemmap_walk->reuse_page) vmemmap_walk->reuse_page = pte_page(ptep_get(pte)); else vmemmap_walk->remap_pte(pte, addr, vmemmap_walk); vmemmap_walk->nr_walked++; return 0; } static const struct mm_walk_ops vmemmap_remap_ops = { .pmd_entry = vmemmap_pmd_entry, .pte_entry = vmemmap_pte_entry, }; static int vmemmap_remap_range(unsigned long start, unsigned long end, struct vmemmap_remap_walk *walk) { int ret; VM_BUG_ON(!PAGE_ALIGNED(start | end)); mmap_read_lock(&init_mm); ret = walk_kernel_page_table_range(start, end, &vmemmap_remap_ops, NULL, walk); mmap_read_unlock(&init_mm); if (ret) return ret; if (walk->remap_pte && !(walk->flags & VMEMMAP_REMAP_NO_TLB_FLUSH)) flush_tlb_kernel_range(start, end); return 0; } /* * Free a vmemmap page. A vmemmap page can be allocated from the memblock * allocator or buddy allocator. If the PG_reserved flag is set, it means * that it allocated from the memblock allocator, just free it via the * free_bootmem_page(). Otherwise, use __free_page(). */ static inline void free_vmemmap_page(struct page *page) { if (PageReserved(page)) { memmap_boot_pages_add(-1); free_bootmem_page(page); } else { memmap_pages_add(-1); __free_page(page); } } /* Free a list of the vmemmap pages */ static void free_vmemmap_page_list(struct list_head *list) { struct page *page, *next; list_for_each_entry_safe(page, next, list, lru) free_vmemmap_page(page); } static void vmemmap_remap_pte(pte_t *pte, unsigned long addr, struct vmemmap_remap_walk *walk) { /* * Remap the tail pages as read-only to catch illegal write operation * to the tail pages. */ pgprot_t pgprot = PAGE_KERNEL_RO; struct page *page = pte_page(ptep_get(pte)); pte_t entry; /* Remapping the head page requires r/w */ if (unlikely(addr == walk->reuse_addr)) { pgprot = PAGE_KERNEL; list_del(&walk->reuse_page->lru); /* * Makes sure that preceding stores to the page contents from * vmemmap_remap_free() become visible before the set_pte_at() * write. */ smp_wmb(); } entry = mk_pte(walk->reuse_page, pgprot); list_add(&page->lru, walk->vmemmap_pages); set_pte_at(&init_mm, addr, pte, entry); } /* * How many struct page structs need to be reset. When we reuse the head * struct page, the special metadata (e.g. page->flags or page->mapping) * cannot copy to the tail struct page structs. The invalid value will be * checked in the free_tail_page_prepare(). In order to avoid the message * of "corrupted mapping in tail page". We need to reset at least 4 (one * head struct page struct and three tail struct page structs) struct page * structs. */ #define NR_RESET_STRUCT_PAGE 4 static inline void reset_struct_pages(struct page *start) { struct page *from = start + NR_RESET_STRUCT_PAGE; BUILD_BUG_ON(NR_RESET_STRUCT_PAGE * 2 > PAGE_SIZE / sizeof(struct page)); memcpy(start, from, sizeof(*from) * NR_RESET_STRUCT_PAGE); } static void vmemmap_restore_pte(pte_t *pte, unsigned long addr, struct vmemmap_remap_walk *walk) { pgprot_t pgprot = PAGE_KERNEL; struct page *page; void *to; BUG_ON(pte_page(ptep_get(pte)) != walk->reuse_page); page = list_first_entry(walk->vmemmap_pages, struct page, lru); list_del(&page->lru); to = page_to_virt(page); copy_page(to, (void *)walk->reuse_addr); reset_struct_pages(to); /* * Makes sure that preceding stores to the page contents become visible * before the set_pte_at() write. */ smp_wmb(); set_pte_at(&init_mm, addr, pte, mk_pte(page, pgprot)); } /** * vmemmap_remap_split - split the vmemmap virtual address range [@start, @end) * backing PMDs of the directmap into PTEs * @start: start address of the vmemmap virtual address range that we want * to remap. * @end: end address of the vmemmap virtual address range that we want to * remap. * @reuse: reuse address. * * Return: %0 on success, negative error code otherwise. */ static int vmemmap_remap_split(unsigned long start, unsigned long end, unsigned long reuse) { struct vmemmap_remap_walk walk = { .remap_pte = NULL, .flags = VMEMMAP_SPLIT_NO_TLB_FLUSH, }; /* See the comment in the vmemmap_remap_free(). */ BUG_ON(start - reuse != PAGE_SIZE); return vmemmap_remap_range(reuse, end, &walk); } /** * vmemmap_remap_free - remap the vmemmap virtual address range [@start, @end) * to the page which @reuse is mapped to, then free vmemmap * which the range are mapped to. * @start: start address of the vmemmap virtual address range that we want * to remap. * @end: end address of the vmemmap virtual address range that we want to * remap. * @reuse: reuse address. * @vmemmap_pages: list to deposit vmemmap pages to be freed. It is callers * responsibility to free pages. * @flags: modifications to vmemmap_remap_walk flags * * Return: %0 on success, negative error code otherwise. */ static int vmemmap_remap_free(unsigned long start, unsigned long end, unsigned long reuse, struct list_head *vmemmap_pages, unsigned long flags) { int ret; struct vmemmap_remap_walk walk = { .remap_pte = vmemmap_remap_pte, .reuse_addr = reuse, .vmemmap_pages = vmemmap_pages, .flags = flags, }; int nid = page_to_nid((struct page *)reuse); gfp_t gfp_mask = GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN; /* * Allocate a new head vmemmap page to avoid breaking a contiguous * block of struct page memory when freeing it back to page allocator * in free_vmemmap_page_list(). This will allow the likely contiguous * struct page backing memory to be kept contiguous and allowing for * more allocations of hugepages. Fallback to the currently * mapped head page in case should it fail to allocate. */ walk.reuse_page = alloc_pages_node(nid, gfp_mask, 0); if (walk.reuse_page) { copy_page(page_to_virt(walk.reuse_page), (void *)walk.reuse_addr); list_add(&walk.reuse_page->lru, vmemmap_pages); memmap_pages_add(1); } /* * In order to make remapping routine most efficient for the huge pages, * the routine of vmemmap page table walking has the following rules * (see more details from the vmemmap_pte_range()): * * - The range [@start, @end) and the range [@reuse, @reuse + PAGE_SIZE) * should be continuous. * - The @reuse address is part of the range [@reuse, @end) that we are * walking which is passed to vmemmap_remap_range(). * - The @reuse address is the first in the complete range. * * So we need to make sure that @start and @reuse meet the above rules. */ BUG_ON(start - reuse != PAGE_SIZE); ret = vmemmap_remap_range(reuse, end, &walk); if (ret && walk.nr_walked) { end = reuse + walk.nr_walked * PAGE_SIZE; /* * vmemmap_pages contains pages from the previous * vmemmap_remap_range call which failed. These * are pages which were removed from the vmemmap. * They will be restored in the following call. */ walk = (struct vmemmap_remap_walk) { .remap_pte = vmemmap_restore_pte, .reuse_addr = reuse, .vmemmap_pages = vmemmap_pages, .flags = 0, }; vmemmap_remap_range(reuse, end, &walk); } return ret; } static int alloc_vmemmap_page_list(unsigned long start, unsigned long end, struct list_head *list) { gfp_t gfp_mask = GFP_KERNEL | __GFP_RETRY_MAYFAIL; unsigned long nr_pages = (end - start) >> PAGE_SHIFT; int nid = page_to_nid((struct page *)start); struct page *page, *next; int i; for (i = 0; i < nr_pages; i++) { page = alloc_pages_node(nid, gfp_mask, 0); if (!page) goto out; list_add(&page->lru, list); } memmap_pages_add(nr_pages); return 0; out: list_for_each_entry_safe(page, next, list, lru) __free_page(page); return -ENOMEM; } /** * vmemmap_remap_alloc - remap the vmemmap virtual address range [@start, end) * to the page which is from the @vmemmap_pages * respectively. * @start: start address of the vmemmap virtual address range that we want * to remap. * @end: end address of the vmemmap virtual address range that we want to * remap. * @reuse: reuse address. * @flags: modifications to vmemmap_remap_walk flags * * Return: %0 on success, negative error code otherwise. */ static int vmemmap_remap_alloc(unsigned long start, unsigned long end, unsigned long reuse, unsigned long flags) { LIST_HEAD(vmemmap_pages); struct vmemmap_remap_walk walk = { .remap_pte = vmemmap_restore_pte, .reuse_addr = reuse, .vmemmap_pages = &vmemmap_pages, .flags = flags, }; /* See the comment in the vmemmap_remap_free(). */ BUG_ON(start - reuse != PAGE_SIZE); if (alloc_vmemmap_page_list(start, end, &vmemmap_pages)) return -ENOMEM; return vmemmap_remap_range(reuse, end, &walk); } DEFINE_STATIC_KEY_FALSE(hugetlb_optimize_vmemmap_key); EXPORT_SYMBOL(hugetlb_optimize_vmemmap_key); static bool vmemmap_optimize_enabled = IS_ENABLED(CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP_DEFAULT_ON); static int __init hugetlb_vmemmap_optimize_param(char *buf) { return kstrtobool(buf, &vmemmap_optimize_enabled); } early_param("hugetlb_free_vmemmap", hugetlb_vmemmap_optimize_param); static int __hugetlb_vmemmap_restore_folio(const struct hstate *h, struct folio *folio, unsigned long flags) { int ret; unsigned long vmemmap_start = (unsigned long)&folio->page, vmemmap_end; unsigned long vmemmap_reuse; VM_WARN_ON_ONCE_FOLIO(!folio_test_hugetlb(folio), folio); VM_WARN_ON_ONCE_FOLIO(folio_ref_count(folio), folio); if (!folio_test_hugetlb_vmemmap_optimized(folio)) return 0; if (flags & VMEMMAP_SYNCHRONIZE_RCU) synchronize_rcu(); vmemmap_end = vmemmap_start + hugetlb_vmemmap_size(h); vmemmap_reuse = vmemmap_start; vmemmap_start += HUGETLB_VMEMMAP_RESERVE_SIZE; /* * The pages which the vmemmap virtual address range [@vmemmap_start, * @vmemmap_end) are mapped to are freed to the buddy allocator, and * the range is mapped to the page which @vmemmap_reuse is mapped to. * When a HugeTLB page is freed to the buddy allocator, previously * discarded vmemmap pages must be allocated and remapping. */ ret = vmemmap_remap_alloc(vmemmap_start, vmemmap_end, vmemmap_reuse, flags); if (!ret) { folio_clear_hugetlb_vmemmap_optimized(folio); static_branch_dec(&hugetlb_optimize_vmemmap_key); } return ret; } /** * hugetlb_vmemmap_restore_folio - restore previously optimized (by * hugetlb_vmemmap_optimize_folio()) vmemmap pages which * will be reallocated and remapped. * @h: struct hstate. * @folio: the folio whose vmemmap pages will be restored. * * Return: %0 if @folio's vmemmap pages have been reallocated and remapped, * negative error code otherwise. */ int hugetlb_vmemmap_restore_folio(const struct hstate *h, struct folio *folio) { return __hugetlb_vmemmap_restore_folio(h, folio, VMEMMAP_SYNCHRONIZE_RCU); } /** * hugetlb_vmemmap_restore_folios - restore vmemmap for every folio on the list. * @h: hstate. * @folio_list: list of folios. * @non_hvo_folios: Output list of folios for which vmemmap exists. * * Return: number of folios for which vmemmap was restored, or an error code * if an error was encountered restoring vmemmap for a folio. * Folios that have vmemmap are moved to the non_hvo_folios * list. Processing of entries stops when the first error is * encountered. The folio that experienced the error and all * non-processed folios will remain on folio_list. */ long hugetlb_vmemmap_restore_folios(const struct hstate *h, struct list_head *folio_list, struct list_head *non_hvo_folios) { struct folio *folio, *t_folio; long restored = 0; long ret = 0; unsigned long flags = VMEMMAP_REMAP_NO_TLB_FLUSH | VMEMMAP_SYNCHRONIZE_RCU; list_for_each_entry_safe(folio, t_folio, folio_list, lru) { if (folio_test_hugetlb_vmemmap_optimized(folio)) { ret = __hugetlb_vmemmap_restore_folio(h, folio, flags); /* only need to synchronize_rcu() once for each batch */ flags &= ~VMEMMAP_SYNCHRONIZE_RCU; if (ret) break; restored++; } /* Add non-optimized folios to output list */ list_move(&folio->lru, non_hvo_folios); } if (restored) flush_tlb_all(); if (!ret) ret = restored; return ret; } /* Return true iff a HugeTLB whose vmemmap should and can be optimized. */ static bool vmemmap_should_optimize_folio(const struct hstate *h, struct folio *folio) { if (folio_test_hugetlb_vmemmap_optimized(folio)) return false; if (!READ_ONCE(vmemmap_optimize_enabled)) return false; if (!hugetlb_vmemmap_optimizable(h)) return false; return true; } static int __hugetlb_vmemmap_optimize_folio(const struct hstate *h, struct folio *folio, struct list_head *vmemmap_pages, unsigned long flags) { int ret = 0; unsigned long vmemmap_start = (unsigned long)&folio->page, vmemmap_end; unsigned long vmemmap_reuse; VM_WARN_ON_ONCE_FOLIO(!folio_test_hugetlb(folio), folio); VM_WARN_ON_ONCE_FOLIO(folio_ref_count(folio), folio); if (!vmemmap_should_optimize_folio(h, folio)) return ret; static_branch_inc(&hugetlb_optimize_vmemmap_key); if (flags & VMEMMAP_SYNCHRONIZE_RCU) synchronize_rcu(); /* * Very Subtle * If VMEMMAP_REMAP_NO_TLB_FLUSH is set, TLB flushing is not performed * immediately after remapping. As a result, subsequent accesses * and modifications to struct pages associated with the hugetlb * page could be to the OLD struct pages. Set the vmemmap optimized * flag here so that it is copied to the new head page. This keeps * the old and new struct pages in sync. * If there is an error during optimization, we will immediately FLUSH * the TLB and clear the flag below. */ folio_set_hugetlb_vmemmap_optimized(folio); vmemmap_end = vmemmap_start + hugetlb_vmemmap_size(h); vmemmap_reuse = vmemmap_start; vmemmap_start += HUGETLB_VMEMMAP_RESERVE_SIZE; /* * Remap the vmemmap virtual address range [@vmemmap_start, @vmemmap_end) * to the page which @vmemmap_reuse is mapped to. Add pages previously * mapping the range to vmemmap_pages list so that they can be freed by * the caller. */ ret = vmemmap_remap_free(vmemmap_start, vmemmap_end, vmemmap_reuse, vmemmap_pages, flags); if (ret) { static_branch_dec(&hugetlb_optimize_vmemmap_key); folio_clear_hugetlb_vmemmap_optimized(folio); } return ret; } /** * hugetlb_vmemmap_optimize_folio - optimize @folio's vmemmap pages. * @h: struct hstate. * @folio: the folio whose vmemmap pages will be optimized. * * This function only tries to optimize @folio's vmemmap pages and does not * guarantee that the optimization will succeed after it returns. The caller * can use folio_test_hugetlb_vmemmap_optimized(@folio) to detect if @folio's * vmemmap pages have been optimized. */ void hugetlb_vmemmap_optimize_folio(const struct hstate *h, struct folio *folio) { LIST_HEAD(vmemmap_pages); __hugetlb_vmemmap_optimize_folio(h, folio, &vmemmap_pages, VMEMMAP_SYNCHRONIZE_RCU); free_vmemmap_page_list(&vmemmap_pages); } static int hugetlb_vmemmap_split_folio(const struct hstate *h, struct folio *folio) { unsigned long vmemmap_start = (unsigned long)&folio->page, vmemmap_end; unsigned long vmemmap_reuse; if (!vmemmap_should_optimize_folio(h, folio)) return 0; vmemmap_end = vmemmap_start + hugetlb_vmemmap_size(h); vmemmap_reuse = vmemmap_start; vmemmap_start += HUGETLB_VMEMMAP_RESERVE_SIZE; /* * Split PMDs on the vmemmap virtual address range [@vmemmap_start, * @vmemmap_end] */ return vmemmap_remap_split(vmemmap_start, vmemmap_end, vmemmap_reuse); } static void __hugetlb_vmemmap_optimize_folios(struct hstate *h, struct list_head *folio_list, bool boot) { struct folio *folio; int nr_to_optimize; LIST_HEAD(vmemmap_pages); unsigned long flags = VMEMMAP_REMAP_NO_TLB_FLUSH | VMEMMAP_SYNCHRONIZE_RCU; nr_to_optimize = 0; list_for_each_entry(folio, folio_list, lru) { int ret; unsigned long spfn, epfn; if (boot && folio_test_hugetlb_vmemmap_optimized(folio)) { /* * Already optimized by pre-HVO, just map the * mirrored tail page structs RO. */ spfn = (unsigned long)&folio->page; epfn = spfn + pages_per_huge_page(h); vmemmap_wrprotect_hvo(spfn, epfn, folio_nid(folio), HUGETLB_VMEMMAP_RESERVE_SIZE); register_page_bootmem_memmap(pfn_to_section_nr(spfn), &folio->page, HUGETLB_VMEMMAP_RESERVE_SIZE); static_branch_inc(&hugetlb_optimize_vmemmap_key); continue; } nr_to_optimize++; ret = hugetlb_vmemmap_split_folio(h, folio); /* * Spliting the PMD requires allocating a page, thus lets fail * early once we encounter the first OOM. No point in retrying * as it can be dynamically done on remap with the memory * we get back from the vmemmap deduplication. */ if (ret == -ENOMEM) break; } if (!nr_to_optimize) /* * All pre-HVO folios, nothing left to do. It's ok if * there is a mix of pre-HVO and not yet HVO-ed folios * here, as __hugetlb_vmemmap_optimize_folio() will * skip any folios that already have the optimized flag * set, see vmemmap_should_optimize_folio(). */ goto out; flush_tlb_all(); list_for_each_entry(folio, folio_list, lru) { int ret; ret = __hugetlb_vmemmap_optimize_folio(h, folio, &vmemmap_pages, flags); /* only need to synchronize_rcu() once for each batch */ flags &= ~VMEMMAP_SYNCHRONIZE_RCU; /* * Pages to be freed may have been accumulated. If we * encounter an ENOMEM, free what we have and try again. * This can occur in the case that both spliting fails * halfway and head page allocation also failed. In this * case __hugetlb_vmemmap_optimize_folio() would free memory * allowing more vmemmap remaps to occur. */ if (ret == -ENOMEM && !list_empty(&vmemmap_pages)) { flush_tlb_all(); free_vmemmap_page_list(&vmemmap_pages); INIT_LIST_HEAD(&vmemmap_pages); __hugetlb_vmemmap_optimize_folio(h, folio, &vmemmap_pages, flags); } } out: flush_tlb_all(); free_vmemmap_page_list(&vmemmap_pages); } void hugetlb_vmemmap_optimize_folios(struct hstate *h, struct list_head *folio_list) { __hugetlb_vmemmap_optimize_folios(h, folio_list, false); } void hugetlb_vmemmap_optimize_bootmem_folios(struct hstate *h, struct list_head *folio_list) { __hugetlb_vmemmap_optimize_folios(h, folio_list, true); } #ifdef CONFIG_SPARSEMEM_VMEMMAP_PREINIT /* Return true of a bootmem allocated HugeTLB page should be pre-HVO-ed */ static bool vmemmap_should_optimize_bootmem_page(struct huge_bootmem_page *m) { unsigned long section_size, psize, pmd_vmemmap_size; phys_addr_t paddr; if (!READ_ONCE(vmemmap_optimize_enabled)) return false; if (!hugetlb_vmemmap_optimizable(m->hstate)) return false; psize = huge_page_size(m->hstate); paddr = virt_to_phys(m); /* * Pre-HVO only works if the bootmem huge page * is aligned to the section size. */ section_size = (1UL << PA_SECTION_SHIFT); if (!IS_ALIGNED(paddr, section_size) || !IS_ALIGNED(psize, section_size)) return false; /* * The pre-HVO code does not deal with splitting PMDS, * so the bootmem page must be aligned to the number * of base pages that can be mapped with one vmemmap PMD. */ pmd_vmemmap_size = (PMD_SIZE / (sizeof(struct page))) << PAGE_SHIFT; if (!IS_ALIGNED(paddr, pmd_vmemmap_size) || !IS_ALIGNED(psize, pmd_vmemmap_size)) return false; return true; } /* * Initialize memmap section for a gigantic page, HVO-style. */ void __init hugetlb_vmemmap_init_early(int nid) { unsigned long psize, paddr, section_size; unsigned long ns, i, pnum, pfn, nr_pages; unsigned long start, end; struct huge_bootmem_page *m = NULL; void *map; /* * Noting to do if bootmem pages were not allocated * early in boot, or if HVO wasn't enabled in the * first place. */ if (!hugetlb_bootmem_allocated()) return; if (!READ_ONCE(vmemmap_optimize_enabled)) return; section_size = (1UL << PA_SECTION_SHIFT); list_for_each_entry(m, &huge_boot_pages[nid], list) { if (!vmemmap_should_optimize_bootmem_page(m)) continue; nr_pages = pages_per_huge_page(m->hstate); psize = nr_pages << PAGE_SHIFT; paddr = virt_to_phys(m); pfn = PHYS_PFN(paddr); map = pfn_to_page(pfn); start = (unsigned long)map; end = start + nr_pages * sizeof(struct page); if (vmemmap_populate_hvo(start, end, nid, HUGETLB_VMEMMAP_RESERVE_SIZE) < 0) continue; memmap_boot_pages_add(HUGETLB_VMEMMAP_RESERVE_SIZE / PAGE_SIZE); pnum = pfn_to_section_nr(pfn); ns = psize / section_size; for (i = 0; i < ns; i++) { sparse_init_early_section(nid, map, pnum, SECTION_IS_VMEMMAP_PREINIT); map += section_map_size(); pnum++; } m->flags |= HUGE_BOOTMEM_HVO; } } void __init hugetlb_vmemmap_init_late(int nid) { struct huge_bootmem_page *m, *tm; unsigned long phys, nr_pages, start, end; unsigned long pfn, nr_mmap; struct hstate *h; void *map; if (!hugetlb_bootmem_allocated()) return; if (!READ_ONCE(vmemmap_optimize_enabled)) return; list_for_each_entry_safe(m, tm, &huge_boot_pages[nid], list) { if (!(m->flags & HUGE_BOOTMEM_HVO)) continue; phys = virt_to_phys(m); h = m->hstate; pfn = PHYS_PFN(phys); nr_pages = pages_per_huge_page(h); if (!hugetlb_bootmem_page_zones_valid(nid, m)) { /* * Oops, the hugetlb page spans multiple zones. * Remove it from the list, and undo HVO. */ list_del(&m->list); map = pfn_to_page(pfn); start = (unsigned long)map; end = start + nr_pages * sizeof(struct page); vmemmap_undo_hvo(start, end, nid, HUGETLB_VMEMMAP_RESERVE_SIZE); nr_mmap = end - start - HUGETLB_VMEMMAP_RESERVE_SIZE; memmap_boot_pages_add(DIV_ROUND_UP(nr_mmap, PAGE_SIZE)); memblock_phys_free(phys, huge_page_size(h)); continue; } else m->flags |= HUGE_BOOTMEM_ZONES_VALID; } } #endif static const struct ctl_table hugetlb_vmemmap_sysctls[] = { { .procname = "hugetlb_optimize_vmemmap", .data = &vmemmap_optimize_enabled, .maxlen = sizeof(vmemmap_optimize_enabled), .mode = 0644, .proc_handler = proc_dobool, }, }; static int __init hugetlb_vmemmap_init(void) { const struct hstate *h; /* HUGETLB_VMEMMAP_RESERVE_SIZE should cover all used struct pages */ BUILD_BUG_ON(__NR_USED_SUBPAGE > HUGETLB_VMEMMAP_RESERVE_PAGES); for_each_hstate(h) { if (hugetlb_vmemmap_optimizable(h)) { register_sysctl_init("vm", hugetlb_vmemmap_sysctls); break; } } return 0; } late_initcall(hugetlb_vmemmap_init); |
| 4 1 1 4 1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 | /* SPDX-License-Identifier: GPL-2.0+ */ /* * Driver for 8250/16550-type serial ports * * Based on drivers/char/serial.c, by Linus Torvalds, Theodore Ts'o. * * Copyright (C) 2001 Russell King. */ #include <linux/bits.h> #include <linux/serial_8250.h> #include <linux/serial_core.h> #include <linux/dmaengine.h> #include "../serial_mctrl_gpio.h" struct uart_8250_dma { int (*tx_dma)(struct uart_8250_port *p); int (*rx_dma)(struct uart_8250_port *p); void (*prepare_tx_dma)(struct uart_8250_port *p); void (*prepare_rx_dma)(struct uart_8250_port *p); /* Filter function */ dma_filter_fn fn; /* Parameter to the filter function */ void *rx_param; void *tx_param; struct dma_slave_config rxconf; struct dma_slave_config txconf; struct dma_chan *rxchan; struct dma_chan *txchan; /* Device address base for DMA operations */ phys_addr_t rx_dma_addr; phys_addr_t tx_dma_addr; /* DMA address of the buffer in memory */ dma_addr_t rx_addr; dma_addr_t tx_addr; dma_cookie_t rx_cookie; dma_cookie_t tx_cookie; void *rx_buf; size_t rx_size; size_t tx_size; unsigned char tx_running; unsigned char tx_err; unsigned char rx_running; }; struct old_serial_port { unsigned int uart; unsigned int baud_base; unsigned int port; unsigned int irq; upf_t flags; unsigned char io_type; unsigned char __iomem *iomem_base; unsigned short iomem_reg_shift; }; struct serial8250_config { const char *name; unsigned short fifo_size; unsigned short tx_loadsz; unsigned char fcr; unsigned char rxtrig_bytes[UART_FCR_R_TRIG_MAX_STATE]; unsigned int flags; }; #define UART_CAP_FIFO BIT(8) /* UART has FIFO */ #define UART_CAP_EFR BIT(9) /* UART has EFR */ #define UART_CAP_SLEEP BIT(10) /* UART has IER sleep */ #define UART_CAP_AFE BIT(11) /* MCR-based hw flow control */ #define UART_CAP_UUE BIT(12) /* UART needs IER bit 6 set (Xscale) */ #define UART_CAP_RTOIE BIT(13) /* UART needs IER bit 4 set (Xscale, Tegra) */ #define UART_CAP_HFIFO BIT(14) /* UART has a "hidden" FIFO */ #define UART_CAP_RPM BIT(15) /* Runtime PM is active while idle */ #define UART_CAP_IRDA BIT(16) /* UART supports IrDA line discipline */ #define UART_CAP_MINI BIT(17) /* Mini UART on BCM283X family lacks: * STOP PARITY EPAR SPAR WLEN5 WLEN6 */ #define UART_CAP_NOTEMT BIT(18) /* UART without interrupt on TEMT available */ #define UART_BUG_QUOT BIT(0) /* UART has buggy quot LSB */ #define UART_BUG_TXEN BIT(1) /* UART has buggy TX IIR status */ #define UART_BUG_NOMSR BIT(2) /* UART has buggy MSR status bits (Au1x00) */ #define UART_BUG_THRE BIT(3) /* UART has buggy THRE reassertion */ #define UART_BUG_TXRACE BIT(5) /* UART Tx fails to set remote DR */ /* Module parameters */ #define UART_NR CONFIG_SERIAL_8250_NR_UARTS extern unsigned int nr_uarts; #ifdef CONFIG_SERIAL_8250_SHARE_IRQ #define SERIAL8250_SHARE_IRQS 1 #else #define SERIAL8250_SHARE_IRQS 0 #endif extern unsigned int share_irqs; extern unsigned int skip_txen_test; #define SERIAL8250_PORT_FLAGS(_base, _irq, _flags) \ { \ .iobase = _base, \ .irq = _irq, \ .uartclk = 1843200, \ .iotype = UPIO_PORT, \ .flags = UPF_BOOT_AUTOCONF | (_flags), \ } #define SERIAL8250_PORT(_base, _irq) SERIAL8250_PORT_FLAGS(_base, _irq, 0) extern struct uart_driver serial8250_reg; void serial8250_register_ports(struct uart_driver *drv, struct device *dev); /* Legacy ISA bus related APIs */ typedef void (*serial8250_isa_config_fn)(int, struct uart_port *, u32 *); extern serial8250_isa_config_fn serial8250_isa_config; void serial8250_isa_init_ports(void); extern struct platform_device *serial8250_isa_devs; extern const struct uart_ops *univ8250_port_base_ops; extern struct uart_ops univ8250_port_ops; static inline int serial_in(struct uart_8250_port *up, int offset) { return up->port.serial_in(&up->port, offset); } static inline void serial_out(struct uart_8250_port *up, int offset, int value) { up->port.serial_out(&up->port, offset, value); } /** * serial_lsr_in - Read LSR register and preserve flags across reads * @up: uart 8250 port * * Read LSR register and handle saving non-preserved flags across reads. * The flags that are not preserved across reads are stored into * up->lsr_saved_flags. * * Returns LSR value or'ed with the preserved flags (if any). */ static inline u16 serial_lsr_in(struct uart_8250_port *up) { u16 lsr = up->lsr_saved_flags; lsr |= serial_in(up, UART_LSR); up->lsr_saved_flags = lsr & up->lsr_save_mask; return lsr; } /* * For the 16C950 */ static void serial_icr_write(struct uart_8250_port *up, int offset, int value) { serial_out(up, UART_SCR, offset); serial_out(up, UART_ICR, value); } static unsigned int __maybe_unused serial_icr_read(struct uart_8250_port *up, int offset) { unsigned int value; serial_icr_write(up, UART_ACR, up->acr | UART_ACR_ICRRD); serial_out(up, UART_SCR, offset); value = serial_in(up, UART_ICR); serial_icr_write(up, UART_ACR, up->acr); return value; } void serial8250_clear_and_reinit_fifos(struct uart_8250_port *p); static inline u32 serial_dl_read(struct uart_8250_port *up) { return up->dl_read(up); } static inline void serial_dl_write(struct uart_8250_port *up, u32 value) { up->dl_write(up, value); } static inline bool serial8250_set_THRI(struct uart_8250_port *up) { /* Port locked to synchronize UART_IER access against the console. */ lockdep_assert_held_once(&up->port.lock); if (up->ier & UART_IER_THRI) return false; up->ier |= UART_IER_THRI; serial_out(up, UART_IER, up->ier); return true; } static inline bool serial8250_clear_THRI(struct uart_8250_port *up) { /* Port locked to synchronize UART_IER access against the console. */ lockdep_assert_held_once(&up->port.lock); if (!(up->ier & UART_IER_THRI)) return false; up->ier &= ~UART_IER_THRI; serial_out(up, UART_IER, up->ier); return true; } struct uart_8250_port *serial8250_setup_port(int index); struct uart_8250_port *serial8250_get_port(int line); int serial8250_em485_config(struct uart_port *port, struct ktermios *termios, struct serial_rs485 *rs485); void serial8250_em485_start_tx(struct uart_8250_port *p, bool toggle_ier); void serial8250_em485_stop_tx(struct uart_8250_port *p, bool toggle_ier); void serial8250_em485_destroy(struct uart_8250_port *p); extern struct serial_rs485 serial8250_em485_supported; /* MCR <-> TIOCM conversion */ static inline int serial8250_TIOCM_to_MCR(int tiocm) { int mcr = 0; if (tiocm & TIOCM_RTS) mcr |= UART_MCR_RTS; if (tiocm & TIOCM_DTR) mcr |= UART_MCR_DTR; if (tiocm & TIOCM_OUT1) mcr |= UART_MCR_OUT1; if (tiocm & TIOCM_OUT2) mcr |= UART_MCR_OUT2; if (tiocm & TIOCM_LOOP) mcr |= UART_MCR_LOOP; return mcr; } static inline int serial8250_MCR_to_TIOCM(int mcr) { int tiocm = 0; if (mcr & UART_MCR_RTS) tiocm |= TIOCM_RTS; if (mcr & UART_MCR_DTR) tiocm |= TIOCM_DTR; if (mcr & UART_MCR_OUT1) tiocm |= TIOCM_OUT1; if (mcr & UART_MCR_OUT2) tiocm |= TIOCM_OUT2; if (mcr & UART_MCR_LOOP) tiocm |= TIOCM_LOOP; return tiocm; } /* MSR <-> TIOCM conversion */ static inline int serial8250_MSR_to_TIOCM(int msr) { int tiocm = 0; if (msr & UART_MSR_DCD) tiocm |= TIOCM_CAR; if (msr & UART_MSR_RI) tiocm |= TIOCM_RNG; if (msr & UART_MSR_DSR) tiocm |= TIOCM_DSR; if (msr & UART_MSR_CTS) tiocm |= TIOCM_CTS; return tiocm; } static inline void serial8250_out_MCR(struct uart_8250_port *up, int value) { serial_out(up, UART_MCR, value); if (up->gpios) mctrl_gpio_set(up->gpios, serial8250_MCR_to_TIOCM(value)); } static inline int serial8250_in_MCR(struct uart_8250_port *up) { int mctrl; mctrl = serial_in(up, UART_MCR); if (up->gpios) { unsigned int mctrl_gpio = 0; mctrl_gpio = mctrl_gpio_get_outputs(up->gpios, &mctrl_gpio); mctrl |= serial8250_TIOCM_to_MCR(mctrl_gpio); } return mctrl; } #ifdef CONFIG_SERIAL_8250_PNP int serial8250_pnp_init(void); void serial8250_pnp_exit(void); #else static inline int serial8250_pnp_init(void) { return 0; } static inline void serial8250_pnp_exit(void) { } #endif #ifdef CONFIG_SERIAL_8250_RSA void univ8250_rsa_support(struct uart_ops *ops); void rsa_enable(struct uart_8250_port *up); void rsa_disable(struct uart_8250_port *up); void rsa_autoconfig(struct uart_8250_port *up); void rsa_reset(struct uart_8250_port *up); #else static inline void univ8250_rsa_support(struct uart_ops *ops) { } static inline void rsa_enable(struct uart_8250_port *up) {} static inline void rsa_disable(struct uart_8250_port *up) {} static inline void rsa_autoconfig(struct uart_8250_port *up) {} static inline void rsa_reset(struct uart_8250_port *up) {} #endif #ifdef CONFIG_SERIAL_8250_FINTEK int fintek_8250_probe(struct uart_8250_port *uart); #else static inline int fintek_8250_probe(struct uart_8250_port *uart) { return 0; } #endif #ifdef CONFIG_ARCH_OMAP1 #include <linux/soc/ti/omap1-soc.h> static inline int is_omap1_8250(struct uart_8250_port *pt) { int res; switch (pt->port.mapbase) { case OMAP1_UART1_BASE: case OMAP1_UART2_BASE: case OMAP1_UART3_BASE: res = 1; break; default: res = 0; break; } return res; } static inline int is_omap1510_8250(struct uart_8250_port *pt) { if (!cpu_is_omap1510()) return 0; return is_omap1_8250(pt); } #else static inline int is_omap1_8250(struct uart_8250_port *pt) { return 0; } static inline int is_omap1510_8250(struct uart_8250_port *pt) { return 0; } #endif #ifdef CONFIG_SERIAL_8250_DMA extern int serial8250_tx_dma(struct uart_8250_port *); extern void serial8250_tx_dma_flush(struct uart_8250_port *); extern int serial8250_rx_dma(struct uart_8250_port *); extern void serial8250_rx_dma_flush(struct uart_8250_port *); extern int serial8250_request_dma(struct uart_8250_port *); extern void serial8250_release_dma(struct uart_8250_port *); static inline void serial8250_do_prepare_tx_dma(struct uart_8250_port *p) { struct uart_8250_dma *dma = p->dma; if (dma->prepare_tx_dma) dma->prepare_tx_dma(p); } static inline void serial8250_do_prepare_rx_dma(struct uart_8250_port *p) { struct uart_8250_dma *dma = p->dma; if (dma->prepare_rx_dma) dma->prepare_rx_dma(p); } static inline bool serial8250_tx_dma_running(struct uart_8250_port *p) { struct uart_8250_dma *dma = p->dma; return dma && dma->tx_running; } #else static inline int serial8250_tx_dma(struct uart_8250_port *p) { return -1; } static inline void serial8250_tx_dma_flush(struct uart_8250_port *p) { } static inline int serial8250_rx_dma(struct uart_8250_port *p) { return -1; } static inline void serial8250_rx_dma_flush(struct uart_8250_port *p) { } static inline int serial8250_request_dma(struct uart_8250_port *p) { return -1; } static inline void serial8250_release_dma(struct uart_8250_port *p) { } static inline bool serial8250_tx_dma_running(struct uart_8250_port *p) { return false; } #endif static inline int ns16550a_goto_highspeed(struct uart_8250_port *up) { unsigned char status; status = serial_in(up, 0x04); /* EXCR2 */ #define PRESL(x) ((x) & 0x30) if (PRESL(status) == 0x10) { /* already in high speed mode */ return 0; } else { status &= ~0xB0; /* Disable LOCK, mask out PRESL[01] */ status |= 0x10; /* 1.625 divisor for baud_base --> 921600 */ serial_out(up, 0x04, status); } return 1; } static inline int serial_index(struct uart_port *port) { return port->minor - 64; } |
| 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 | // SPDX-License-Identifier: GPL-2.0 /* * Copyright (c) 2016-present, Facebook, Inc. * All rights reserved. * */ #include <linux/bio.h> #include <linux/bitmap.h> #include <linux/err.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/sched/mm.h> #include <linux/pagemap.h> #include <linux/refcount.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/zstd.h> #include "misc.h" #include "fs.h" #include "btrfs_inode.h" #include "compression.h" #include "super.h" #define ZSTD_BTRFS_MAX_WINDOWLOG 17 #define ZSTD_BTRFS_MAX_INPUT (1U << ZSTD_BTRFS_MAX_WINDOWLOG) #define ZSTD_BTRFS_DEFAULT_LEVEL 3 #define ZSTD_BTRFS_MIN_LEVEL -15 #define ZSTD_BTRFS_MAX_LEVEL 15 /* 307s to avoid pathologically clashing with transaction commit */ #define ZSTD_BTRFS_RECLAIM_JIFFIES (307 * HZ) static zstd_parameters zstd_get_btrfs_parameters(int level, size_t src_len) { zstd_parameters params = zstd_get_params(level, src_len); if (params.cParams.windowLog > ZSTD_BTRFS_MAX_WINDOWLOG) params.cParams.windowLog = ZSTD_BTRFS_MAX_WINDOWLOG; WARN_ON(src_len > ZSTD_BTRFS_MAX_INPUT); return params; } struct workspace { void *mem; size_t size; char *buf; int level; int req_level; unsigned long last_used; /* jiffies */ struct list_head list; struct list_head lru_list; zstd_in_buffer in_buf; zstd_out_buffer out_buf; zstd_parameters params; }; /* * Zstd Workspace Management * * Zstd workspaces have different memory requirements depending on the level. * The zstd workspaces are managed by having individual lists for each level * and a global lru. Forward progress is maintained by protecting a max level * workspace. * * Getting a workspace is done by using the bitmap to identify the levels that * have available workspaces and scans up. This lets us recycle higher level * workspaces because of the monotonic memory guarantee. A workspace's * last_used is only updated if it is being used by the corresponding memory * level. Putting a workspace involves adding it back to the appropriate places * and adding it back to the lru if necessary. * * A timer is used to reclaim workspaces if they have not been used for * ZSTD_BTRFS_RECLAIM_JIFFIES. This helps keep only active workspaces around. * The upper bound is provided by the workqueue limit which is 2 (percpu limit). */ struct zstd_workspace_manager { const struct btrfs_compress_op *ops; spinlock_t lock; struct list_head lru_list; struct list_head idle_ws[ZSTD_BTRFS_MAX_LEVEL]; unsigned long active_map; wait_queue_head_t wait; struct timer_list timer; }; static struct zstd_workspace_manager wsm; static size_t zstd_ws_mem_sizes[ZSTD_BTRFS_MAX_LEVEL]; static inline struct workspace *list_to_workspace(struct list_head *list) { return container_of(list, struct workspace, list); } static inline int clip_level(int level) { return max(0, level - 1); } /* * Timer callback to free unused workspaces. * * @t: timer * * This scans the lru_list and attempts to reclaim any workspace that hasn't * been used for ZSTD_BTRFS_RECLAIM_JIFFIES. * * The context is softirq and does not need the _bh locking primitives. */ static void zstd_reclaim_timer_fn(struct timer_list *timer) { unsigned long reclaim_threshold = jiffies - ZSTD_BTRFS_RECLAIM_JIFFIES; struct list_head *pos, *next; ASSERT(timer == &wsm.timer); spin_lock(&wsm.lock); if (list_empty(&wsm.lru_list)) { spin_unlock(&wsm.lock); return; } list_for_each_prev_safe(pos, next, &wsm.lru_list) { struct workspace *victim = container_of(pos, struct workspace, lru_list); int level; if (time_after(victim->last_used, reclaim_threshold)) break; /* workspace is in use */ if (victim->req_level) continue; level = victim->level; list_del(&victim->lru_list); list_del(&victim->list); zstd_free_workspace(&victim->list); if (list_empty(&wsm.idle_ws[level])) clear_bit(level, &wsm.active_map); } if (!list_empty(&wsm.lru_list)) mod_timer(&wsm.timer, jiffies + ZSTD_BTRFS_RECLAIM_JIFFIES); spin_unlock(&wsm.lock); } /* * Calculate monotonic memory bounds. * * It is possible based on the level configurations that a higher level * workspace uses less memory than a lower level workspace. In order to reuse * workspaces, this must be made a monotonic relationship. This precomputes * the required memory for each level and enforces the monotonicity between * level and memory required. */ static void zstd_calc_ws_mem_sizes(void) { size_t max_size = 0; int level; for (level = ZSTD_BTRFS_MIN_LEVEL; level <= ZSTD_BTRFS_MAX_LEVEL; level++) { if (level == 0) continue; zstd_parameters params = zstd_get_btrfs_parameters(level, ZSTD_BTRFS_MAX_INPUT); size_t level_size = max_t(size_t, zstd_cstream_workspace_bound(¶ms.cParams), zstd_dstream_workspace_bound(ZSTD_BTRFS_MAX_INPUT)); max_size = max_t(size_t, max_size, level_size); /* Use level 1 workspace size for all the fast mode negative levels. */ zstd_ws_mem_sizes[clip_level(level)] = max_size; } } void zstd_init_workspace_manager(void) { struct list_head *ws; int i; zstd_calc_ws_mem_sizes(); wsm.ops = &btrfs_zstd_compress; spin_lock_init(&wsm.lock); init_waitqueue_head(&wsm.wait); timer_setup(&wsm.timer, zstd_reclaim_timer_fn, 0); INIT_LIST_HEAD(&wsm.lru_list); for (i = 0; i < ZSTD_BTRFS_MAX_LEVEL; i++) INIT_LIST_HEAD(&wsm.idle_ws[i]); ws = zstd_alloc_workspace(ZSTD_BTRFS_MAX_LEVEL); if (IS_ERR(ws)) { btrfs_warn(NULL, "cannot preallocate zstd compression workspace"); } else { set_bit(ZSTD_BTRFS_MAX_LEVEL - 1, &wsm.active_map); list_add(ws, &wsm.idle_ws[ZSTD_BTRFS_MAX_LEVEL - 1]); } } void zstd_cleanup_workspace_manager(void) { struct workspace *workspace; int i; spin_lock_bh(&wsm.lock); for (i = 0; i < ZSTD_BTRFS_MAX_LEVEL; i++) { while (!list_empty(&wsm.idle_ws[i])) { workspace = container_of(wsm.idle_ws[i].next, struct workspace, list); list_del(&workspace->list); list_del(&workspace->lru_list); zstd_free_workspace(&workspace->list); } } spin_unlock_bh(&wsm.lock); timer_delete_sync(&wsm.timer); } /* * Find workspace for given level. * * @level: compression level * * This iterates over the set bits in the active_map beginning at the requested * compression level. This lets us utilize already allocated workspaces before * allocating a new one. If the workspace is of a larger size, it is used, but * the place in the lru_list and last_used times are not updated. This is to * offer the opportunity to reclaim the workspace in favor of allocating an * appropriately sized one in the future. */ static struct list_head *zstd_find_workspace(int level) { struct list_head *ws; struct workspace *workspace; int i = clip_level(level); spin_lock_bh(&wsm.lock); for_each_set_bit_from(i, &wsm.active_map, ZSTD_BTRFS_MAX_LEVEL) { if (!list_empty(&wsm.idle_ws[i])) { ws = wsm.idle_ws[i].next; workspace = list_to_workspace(ws); list_del_init(ws); /* keep its place if it's a lower level using this */ workspace->req_level = level; if (clip_level(level) == workspace->level) list_del(&workspace->lru_list); if (list_empty(&wsm.idle_ws[i])) clear_bit(i, &wsm.active_map); spin_unlock_bh(&wsm.lock); return ws; } } spin_unlock_bh(&wsm.lock); return NULL; } /* * Zstd get_workspace for level. * * @level: compression level * * If @level is 0, then any compression level can be used. Therefore, we begin * scanning from 1. We first scan through possible workspaces and then after * attempt to allocate a new workspace. If we fail to allocate one due to * memory pressure, go to sleep waiting for the max level workspace to free up. */ struct list_head *zstd_get_workspace(int level) { struct list_head *ws; unsigned int nofs_flag; /* level == 0 means we can use any workspace */ if (!level) level = 1; again: ws = zstd_find_workspace(level); if (ws) return ws; nofs_flag = memalloc_nofs_save(); ws = zstd_alloc_workspace(level); memalloc_nofs_restore(nofs_flag); if (IS_ERR(ws)) { DEFINE_WAIT(wait); prepare_to_wait(&wsm.wait, &wait, TASK_UNINTERRUPTIBLE); schedule(); finish_wait(&wsm.wait, &wait); goto again; } return ws; } /* * Zstd put_workspace. * * @ws: list_head for the workspace * * When putting back a workspace, we only need to update the LRU if we are of * the requested compression level. Here is where we continue to protect the * max level workspace or update last_used accordingly. If the reclaim timer * isn't set, it is also set here. Only the max level workspace tries and wakes * up waiting workspaces. */ void zstd_put_workspace(struct list_head *ws) { struct workspace *workspace = list_to_workspace(ws); spin_lock_bh(&wsm.lock); /* A node is only taken off the lru if we are the corresponding level */ if (clip_level(workspace->req_level) == workspace->level) { /* Hide a max level workspace from reclaim */ if (list_empty(&wsm.idle_ws[ZSTD_BTRFS_MAX_LEVEL - 1])) { INIT_LIST_HEAD(&workspace->lru_list); } else { workspace->last_used = jiffies; list_add(&workspace->lru_list, &wsm.lru_list); if (!timer_pending(&wsm.timer)) mod_timer(&wsm.timer, jiffies + ZSTD_BTRFS_RECLAIM_JIFFIES); } } set_bit(workspace->level, &wsm.active_map); list_add(&workspace->list, &wsm.idle_ws[workspace->level]); workspace->req_level = 0; spin_unlock_bh(&wsm.lock); if (workspace->level == clip_level(ZSTD_BTRFS_MAX_LEVEL)) cond_wake_up(&wsm.wait); } void zstd_free_workspace(struct list_head *ws) { struct workspace *workspace = list_entry(ws, struct workspace, list); kvfree(workspace->mem); kfree(workspace->buf); kfree(workspace); } struct list_head *zstd_alloc_workspace(int level) { struct workspace *workspace; workspace = kzalloc(sizeof(*workspace), GFP_KERNEL); if (!workspace) return ERR_PTR(-ENOMEM); /* Use level 1 workspace size for all the fast mode negative levels. */ workspace->size = zstd_ws_mem_sizes[clip_level(level)]; workspace->level = clip_level(level); workspace->req_level = level; workspace->last_used = jiffies; workspace->mem = kvmalloc(workspace->size, GFP_KERNEL | __GFP_NOWARN); workspace->buf = kmalloc(PAGE_SIZE, GFP_KERNEL); if (!workspace->mem || !workspace->buf) goto fail; INIT_LIST_HEAD(&workspace->list); INIT_LIST_HEAD(&workspace->lru_list); return &workspace->list; fail: zstd_free_workspace(&workspace->list); return ERR_PTR(-ENOMEM); } int zstd_compress_folios(struct list_head *ws, struct address_space *mapping, u64 start, struct folio **folios, unsigned long *out_folios, unsigned long *total_in, unsigned long *total_out) { struct workspace *workspace = list_entry(ws, struct workspace, list); zstd_cstream *stream; int ret = 0; int nr_folios = 0; struct folio *in_folio = NULL; /* The current folio to read. */ struct folio *out_folio = NULL; /* The current folio to write to. */ unsigned long tot_in = 0; unsigned long tot_out = 0; unsigned long len = *total_out; const unsigned long nr_dest_folios = *out_folios; const u64 orig_end = start + len; unsigned long max_out = nr_dest_folios * PAGE_SIZE; unsigned int cur_len; workspace->params = zstd_get_btrfs_parameters(workspace->req_level, len); *out_folios = 0; *total_out = 0; *total_in = 0; /* Initialize the stream */ stream = zstd_init_cstream(&workspace->params, len, workspace->mem, workspace->size); if (unlikely(!stream)) { struct btrfs_inode *inode = BTRFS_I(mapping->host); btrfs_err(inode->root->fs_info, "zstd compression init level %d failed, root %llu inode %llu offset %llu", workspace->req_level, btrfs_root_id(inode->root), btrfs_ino(inode), start); ret = -EIO; goto out; } /* map in the first page of input data */ ret = btrfs_compress_filemap_get_folio(mapping, start, &in_folio); if (ret < 0) goto out; cur_len = btrfs_calc_input_length(in_folio, orig_end, start); workspace->in_buf.src = kmap_local_folio(in_folio, offset_in_folio(in_folio, start)); workspace->in_buf.pos = 0; workspace->in_buf.size = cur_len; /* Allocate and map in the output buffer */ out_folio = btrfs_alloc_compr_folio(); if (out_folio == NULL) { ret = -ENOMEM; goto out; } folios[nr_folios++] = out_folio; workspace->out_buf.dst = folio_address(out_folio); workspace->out_buf.pos = 0; workspace->out_buf.size = min_t(size_t, max_out, PAGE_SIZE); while (1) { size_t ret2; ret2 = zstd_compress_stream(stream, &workspace->out_buf, &workspace->in_buf); if (unlikely(zstd_is_error(ret2))) { struct btrfs_inode *inode = BTRFS_I(mapping->host); btrfs_warn(inode->root->fs_info, "zstd compression level %d failed, error %d root %llu inode %llu offset %llu", workspace->req_level, zstd_get_error_code(ret2), btrfs_root_id(inode->root), btrfs_ino(inode), start); ret = -EIO; goto out; } /* Check to see if we are making it bigger */ if (tot_in + workspace->in_buf.pos > 8192 && tot_in + workspace->in_buf.pos < tot_out + workspace->out_buf.pos) { ret = -E2BIG; goto out; } /* We've reached the end of our output range */ if (workspace->out_buf.pos >= max_out) { tot_out += workspace->out_buf.pos; ret = -E2BIG; goto out; } /* Check if we need more output space */ if (workspace->out_buf.pos == workspace->out_buf.size) { tot_out += PAGE_SIZE; max_out -= PAGE_SIZE; if (nr_folios == nr_dest_folios) { ret = -E2BIG; goto out; } out_folio = btrfs_alloc_compr_folio(); if (out_folio == NULL) { ret = -ENOMEM; goto out; } folios[nr_folios++] = out_folio; workspace->out_buf.dst = folio_address(out_folio); workspace->out_buf.pos = 0; workspace->out_buf.size = min_t(size_t, max_out, PAGE_SIZE); } /* We've reached the end of the input */ if (workspace->in_buf.pos >= len) { tot_in += workspace->in_buf.pos; break; } /* Check if we need more input */ if (workspace->in_buf.pos == workspace->in_buf.size) { tot_in += workspace->in_buf.size; kunmap_local(workspace->in_buf.src); workspace->in_buf.src = NULL; folio_put(in_folio); start += cur_len; len -= cur_len; ret = btrfs_compress_filemap_get_folio(mapping, start, &in_folio); if (ret < 0) goto out; cur_len = btrfs_calc_input_length(in_folio, orig_end, start); workspace->in_buf.src = kmap_local_folio(in_folio, offset_in_folio(in_folio, start)); workspace->in_buf.pos = 0; workspace->in_buf.size = cur_len; } } while (1) { size_t ret2; ret2 = zstd_end_stream(stream, &workspace->out_buf); if (unlikely(zstd_is_error(ret2))) { struct btrfs_inode *inode = BTRFS_I(mapping->host); btrfs_err(inode->root->fs_info, "zstd compression end level %d failed, error %d root %llu inode %llu offset %llu", workspace->req_level, zstd_get_error_code(ret2), btrfs_root_id(inode->root), btrfs_ino(inode), start); ret = -EIO; goto out; } if (ret2 == 0) { tot_out += workspace->out_buf.pos; break; } if (workspace->out_buf.pos >= max_out) { tot_out += workspace->out_buf.pos; ret = -E2BIG; goto out; } tot_out += PAGE_SIZE; max_out -= PAGE_SIZE; if (nr_folios == nr_dest_folios) { ret = -E2BIG; goto out; } out_folio = btrfs_alloc_compr_folio(); if (out_folio == NULL) { ret = -ENOMEM; goto out; } folios[nr_folios++] = out_folio; workspace->out_buf.dst = folio_address(out_folio); workspace->out_buf.pos = 0; workspace->out_buf.size = min_t(size_t, max_out, PAGE_SIZE); } if (tot_out >= tot_in) { ret = -E2BIG; goto out; } ret = 0; *total_in = tot_in; *total_out = tot_out; out: *out_folios = nr_folios; if (workspace->in_buf.src) { kunmap_local(workspace->in_buf.src); folio_put(in_folio); } return ret; } int zstd_decompress_bio(struct list_head *ws, struct compressed_bio *cb) { struct workspace *workspace = list_entry(ws, struct workspace, list); struct folio **folios_in = cb->compressed_folios; size_t srclen = cb->compressed_len; zstd_dstream *stream; int ret = 0; unsigned long folio_in_index = 0; unsigned long total_folios_in = DIV_ROUND_UP(srclen, PAGE_SIZE); unsigned long buf_start; unsigned long total_out = 0; stream = zstd_init_dstream( ZSTD_BTRFS_MAX_INPUT, workspace->mem, workspace->size); if (unlikely(!stream)) { struct btrfs_inode *inode = cb->bbio.inode; btrfs_err(inode->root->fs_info, "zstd decompression init failed, root %llu inode %llu offset %llu", btrfs_root_id(inode->root), btrfs_ino(inode), cb->start); ret = -EIO; goto done; } workspace->in_buf.src = kmap_local_folio(folios_in[folio_in_index], 0); workspace->in_buf.pos = 0; workspace->in_buf.size = min_t(size_t, srclen, PAGE_SIZE); workspace->out_buf.dst = workspace->buf; workspace->out_buf.pos = 0; workspace->out_buf.size = PAGE_SIZE; while (1) { size_t ret2; ret2 = zstd_decompress_stream(stream, &workspace->out_buf, &workspace->in_buf); if (unlikely(zstd_is_error(ret2))) { struct btrfs_inode *inode = cb->bbio.inode; btrfs_err(inode->root->fs_info, "zstd decompression failed, error %d root %llu inode %llu offset %llu", zstd_get_error_code(ret2), btrfs_root_id(inode->root), btrfs_ino(inode), cb->start); ret = -EIO; goto done; } buf_start = total_out; total_out += workspace->out_buf.pos; workspace->out_buf.pos = 0; ret = btrfs_decompress_buf2page(workspace->out_buf.dst, total_out - buf_start, cb, buf_start); if (ret == 0) break; if (workspace->in_buf.pos >= srclen) break; /* Check if we've hit the end of a frame */ if (ret2 == 0) break; if (workspace->in_buf.pos == workspace->in_buf.size) { kunmap_local(workspace->in_buf.src); folio_in_index++; if (folio_in_index >= total_folios_in) { workspace->in_buf.src = NULL; ret = -EIO; goto done; } srclen -= PAGE_SIZE; workspace->in_buf.src = kmap_local_folio(folios_in[folio_in_index], 0); workspace->in_buf.pos = 0; workspace->in_buf.size = min_t(size_t, srclen, PAGE_SIZE); } } ret = 0; done: if (workspace->in_buf.src) kunmap_local(workspace->in_buf.src); return ret; } int zstd_decompress(struct list_head *ws, const u8 *data_in, struct folio *dest_folio, unsigned long dest_pgoff, size_t srclen, size_t destlen) { struct workspace *workspace = list_entry(ws, struct workspace, list); struct btrfs_fs_info *fs_info = btrfs_sb(folio_inode(dest_folio)->i_sb); const u32 sectorsize = fs_info->sectorsize; zstd_dstream *stream; int ret = 0; unsigned long to_copy = 0; stream = zstd_init_dstream( ZSTD_BTRFS_MAX_INPUT, workspace->mem, workspace->size); if (unlikely(!stream)) { struct btrfs_inode *inode = folio_to_inode(dest_folio); btrfs_err(inode->root->fs_info, "zstd decompression init failed, root %llu inode %llu offset %llu", btrfs_root_id(inode->root), btrfs_ino(inode), folio_pos(dest_folio)); ret = -EIO; goto finish; } workspace->in_buf.src = data_in; workspace->in_buf.pos = 0; workspace->in_buf.size = srclen; workspace->out_buf.dst = workspace->buf; workspace->out_buf.pos = 0; workspace->out_buf.size = sectorsize; /* * Since both input and output buffers should not exceed one sector, * one call should end the decompression. */ ret = zstd_decompress_stream(stream, &workspace->out_buf, &workspace->in_buf); if (unlikely(zstd_is_error(ret))) { struct btrfs_inode *inode = folio_to_inode(dest_folio); btrfs_err(inode->root->fs_info, "zstd decompression failed, error %d root %llu inode %llu offset %llu", zstd_get_error_code(ret), btrfs_root_id(inode->root), btrfs_ino(inode), folio_pos(dest_folio)); goto finish; } to_copy = workspace->out_buf.pos; memcpy_to_folio(dest_folio, dest_pgoff, workspace->out_buf.dst, to_copy); finish: /* Error or early end. */ if (unlikely(to_copy < destlen)) { ret = -EIO; folio_zero_range(dest_folio, dest_pgoff + to_copy, destlen - to_copy); } return ret; } const struct btrfs_compress_op btrfs_zstd_compress = { /* ZSTD uses own workspace manager */ .workspace_manager = NULL, .min_level = ZSTD_BTRFS_MIN_LEVEL, .max_level = ZSTD_BTRFS_MAX_LEVEL, .default_level = ZSTD_BTRFS_DEFAULT_LEVEL, }; |
| 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 | /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * acpi.h - ACPI Interface * * Copyright (C) 2001 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> */ #ifndef _LINUX_ACPI_H #define _LINUX_ACPI_H #include <linux/errno.h> #include <linux/ioport.h> /* for struct resource */ #include <linux/resource_ext.h> #include <linux/device.h> #include <linux/mod_devicetable.h> #include <linux/property.h> #include <linux/uuid.h> #include <linux/node.h> struct irq_domain; struct irq_domain_ops; #ifndef _LINUX #define _LINUX #endif #include <acpi/acpi.h> #include <acpi/acpi_numa.h> #ifdef CONFIG_ACPI #include <linux/list.h> #include <linux/dynamic_debug.h> #include <linux/module.h> #include <linux/mutex.h> #include <linux/fw_table.h> #include <acpi/acpi_bus.h> #include <acpi/acpi_drivers.h> #include <acpi/acpi_io.h> #include <asm/acpi.h> #ifdef CONFIG_ACPI_TABLE_LIB #define EXPORT_SYMBOL_ACPI_LIB(x) EXPORT_SYMBOL_NS_GPL(x, "ACPI") #define __init_or_acpilib #define __initdata_or_acpilib #else #define EXPORT_SYMBOL_ACPI_LIB(x) #define __init_or_acpilib __init #define __initdata_or_acpilib __initdata #endif static inline acpi_handle acpi_device_handle(struct acpi_device *adev) { return adev ? adev->handle : NULL; } #define ACPI_COMPANION(dev) to_acpi_device_node((dev)->fwnode) #define ACPI_COMPANION_SET(dev, adev) set_primary_fwnode(dev, (adev) ? \ acpi_fwnode_handle(adev) : NULL) #define ACPI_HANDLE(dev) acpi_device_handle(ACPI_COMPANION(dev)) #define ACPI_HANDLE_FWNODE(fwnode) \ acpi_device_handle(to_acpi_device_node(fwnode)) static inline struct fwnode_handle *acpi_alloc_fwnode_static(void) { struct fwnode_handle *fwnode; fwnode = kzalloc(sizeof(struct fwnode_handle), GFP_KERNEL); if (!fwnode) return NULL; fwnode_init(fwnode, &acpi_static_fwnode_ops); return fwnode; } static inline void acpi_free_fwnode_static(struct fwnode_handle *fwnode) { if (WARN_ON(!is_acpi_static_node(fwnode))) return; kfree(fwnode); } static inline bool has_acpi_companion(struct device *dev) { return is_acpi_device_node(dev->fwnode); } static inline void acpi_preset_companion(struct device *dev, struct acpi_device *parent, u64 addr) { ACPI_COMPANION_SET(dev, acpi_find_child_device(parent, addr, false)); } static inline const char *acpi_dev_name(struct acpi_device *adev) { return dev_name(&adev->dev); } struct device *acpi_get_first_physical_node(struct acpi_device *adev); enum acpi_irq_model_id { ACPI_IRQ_MODEL_PIC = 0, ACPI_IRQ_MODEL_IOAPIC, ACPI_IRQ_MODEL_IOSAPIC, ACPI_IRQ_MODEL_PLATFORM, ACPI_IRQ_MODEL_GIC, ACPI_IRQ_MODEL_LPIC, ACPI_IRQ_MODEL_RINTC, ACPI_IRQ_MODEL_COUNT }; extern enum acpi_irq_model_id acpi_irq_model; enum acpi_interrupt_id { ACPI_INTERRUPT_PMI = 1, ACPI_INTERRUPT_INIT, ACPI_INTERRUPT_CPEI, ACPI_INTERRUPT_COUNT }; #define ACPI_SPACE_MEM 0 enum acpi_address_range_id { ACPI_ADDRESS_RANGE_MEMORY = 1, ACPI_ADDRESS_RANGE_RESERVED = 2, ACPI_ADDRESS_RANGE_ACPI = 3, ACPI_ADDRESS_RANGE_NVS = 4, ACPI_ADDRESS_RANGE_COUNT }; /* Table Handlers */ typedef int (*acpi_tbl_table_handler)(struct acpi_table_header *table); /* Debugger support */ struct acpi_debugger_ops { int (*create_thread)(acpi_osd_exec_callback function, void *context); ssize_t (*write_log)(const char *msg); ssize_t (*read_cmd)(char *buffer, size_t length); int (*wait_command_ready)(bool single_step, char *buffer, size_t length); int (*notify_command_complete)(void); }; struct acpi_debugger { const struct acpi_debugger_ops *ops; struct module *owner; struct mutex lock; }; #ifdef CONFIG_ACPI_DEBUGGER int __init acpi_debugger_init(void); int acpi_register_debugger(struct module *owner, const struct acpi_debugger_ops *ops); void acpi_unregister_debugger(const struct acpi_debugger_ops *ops); int acpi_debugger_create_thread(acpi_osd_exec_callback function, void *context); ssize_t acpi_debugger_write_log(const char *msg); ssize_t acpi_debugger_read_cmd(char *buffer, size_t buffer_length); int acpi_debugger_wait_command_ready(void); int acpi_debugger_notify_command_complete(void); #else static inline int acpi_debugger_init(void) { return -ENODEV; } static inline int acpi_register_debugger(struct module *owner, const struct acpi_debugger_ops *ops) { return -ENODEV; } static inline void acpi_unregister_debugger(const struct acpi_debugger_ops *ops) { } static inline int acpi_debugger_create_thread(acpi_osd_exec_callback function, void *context) { return -ENODEV; } static inline int acpi_debugger_write_log(const char *msg) { return -ENODEV; } static inline int acpi_debugger_read_cmd(char *buffer, u32 buffer_length) { return -ENODEV; } static inline int acpi_debugger_wait_command_ready(void) { return -ENODEV; } static inline int acpi_debugger_notify_command_complete(void) { return -ENODEV; } #endif #define BAD_MADT_ENTRY(entry, end) ( \ (!entry) || (unsigned long)entry + sizeof(*entry) > end || \ ((struct acpi_subtable_header *)entry)->length < sizeof(*entry)) void __iomem *__acpi_map_table(unsigned long phys, unsigned long size); void __acpi_unmap_table(void __iomem *map, unsigned long size); int early_acpi_boot_init(void); int acpi_boot_init (void); void acpi_boot_table_prepare (void); void acpi_boot_table_init (void); int acpi_mps_check (void); int acpi_numa_init (void); int acpi_locate_initial_tables (void); void acpi_reserve_initial_tables (void); void acpi_table_init_complete (void); int acpi_table_init (void); int acpi_table_parse(char *id, acpi_tbl_table_handler handler); int __init_or_acpilib acpi_table_parse_entries(char *id, unsigned long table_size, int entry_id, acpi_tbl_entry_handler handler, unsigned int max_entries); int __init_or_acpilib acpi_table_parse_entries_array(char *id, unsigned long table_size, struct acpi_subtable_proc *proc, int proc_num, unsigned int max_entries); int acpi_table_parse_madt(enum acpi_madt_type id, acpi_tbl_entry_handler handler, unsigned int max_entries); int __init_or_acpilib acpi_table_parse_cedt(enum acpi_cedt_type id, acpi_tbl_entry_handler_arg handler_arg, void *arg); int acpi_parse_mcfg (struct acpi_table_header *header); void acpi_table_print_madt_entry (struct acpi_subtable_header *madt); #if defined(CONFIG_X86) || defined(CONFIG_LOONGARCH) void acpi_numa_processor_affinity_init (struct acpi_srat_cpu_affinity *pa); #else static inline void acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity *pa) { } #endif void acpi_numa_x2apic_affinity_init(struct acpi_srat_x2apic_cpu_affinity *pa); #if defined(CONFIG_ARM64) || defined(CONFIG_LOONGARCH) void acpi_arch_dma_setup(struct device *dev); #else static inline void acpi_arch_dma_setup(struct device *dev) { } #endif #ifdef CONFIG_ARM64 void acpi_numa_gicc_affinity_init(struct acpi_srat_gicc_affinity *pa); #else static inline void acpi_numa_gicc_affinity_init(struct acpi_srat_gicc_affinity *pa) { } #endif #ifdef CONFIG_RISCV void acpi_numa_rintc_affinity_init(struct acpi_srat_rintc_affinity *pa); #else static inline void acpi_numa_rintc_affinity_init(struct acpi_srat_rintc_affinity *pa) { } #endif #ifndef PHYS_CPUID_INVALID typedef u32 phys_cpuid_t; #define PHYS_CPUID_INVALID (phys_cpuid_t)(-1) #endif static inline bool invalid_logical_cpuid(u32 cpuid) { return (int)cpuid < 0; } static inline bool invalid_phys_cpuid(phys_cpuid_t phys_id) { return phys_id == PHYS_CPUID_INVALID; } int __init acpi_get_madt_revision(void); /* Validate the processor object's proc_id */ bool acpi_duplicate_processor_id(int proc_id); /* Processor _CTS control */ struct acpi_processor_power; #ifdef CONFIG_ACPI_PROCESSOR_CSTATE bool acpi_processor_claim_cst_control(void); int acpi_processor_evaluate_cst(acpi_handle handle, u32 cpu, struct acpi_processor_power *info); #else static inline bool acpi_processor_claim_cst_control(void) { return false; } static inline int acpi_processor_evaluate_cst(acpi_handle handle, u32 cpu, struct acpi_processor_power *info) { return -ENODEV; } #endif #ifdef CONFIG_ACPI_HOTPLUG_CPU /* Arch dependent functions for cpu hotplug support */ int acpi_map_cpu(acpi_handle handle, phys_cpuid_t physid, u32 acpi_id, int *pcpu); int acpi_unmap_cpu(int cpu); #endif /* CONFIG_ACPI_HOTPLUG_CPU */ acpi_handle acpi_get_processor_handle(int cpu); #ifdef CONFIG_ACPI_HOTPLUG_IOAPIC int acpi_get_ioapic_id(acpi_handle handle, u32 gsi_base, u64 *phys_addr); #endif int acpi_register_ioapic(acpi_handle handle, u64 phys_addr, u32 gsi_base); int acpi_unregister_ioapic(acpi_handle handle, u32 gsi_base); int acpi_ioapic_registered(acpi_handle handle, u32 gsi_base); void acpi_irq_stats_init(void); extern u32 acpi_irq_handled; extern u32 acpi_irq_not_handled; extern unsigned int acpi_sci_irq; extern bool acpi_no_s5; #define INVALID_ACPI_IRQ ((unsigned)-1) static inline bool acpi_sci_irq_valid(void) { return acpi_sci_irq != INVALID_ACPI_IRQ; } extern int sbf_port; int acpi_register_gsi (struct device *dev, u32 gsi, int triggering, int polarity); int acpi_gsi_to_irq (u32 gsi, unsigned int *irq); int acpi_isa_irq_to_gsi (unsigned isa_irq, u32 *gsi); typedef struct fwnode_handle *(*acpi_gsi_domain_disp_fn)(u32); void acpi_set_irq_model(enum acpi_irq_model_id model, acpi_gsi_domain_disp_fn fn); acpi_gsi_domain_disp_fn acpi_get_gsi_dispatcher(void); void acpi_set_gsi_to_irq_fallback(u32 (*)(u32)); struct irq_domain *acpi_irq_create_hierarchy(unsigned int flags, unsigned int size, struct fwnode_handle *fwnode, const struct irq_domain_ops *ops, void *host_data); #ifdef CONFIG_X86_IO_APIC extern int acpi_get_override_irq(u32 gsi, int *trigger, int *polarity); #else static inline int acpi_get_override_irq(u32 gsi, int *trigger, int *polarity) { return -1; } #endif /* * This function undoes the effect of one call to acpi_register_gsi(). * If this matches the last registration, any IRQ resources for gsi * are freed. */ void acpi_unregister_gsi (u32 gsi); struct pci_dev; struct acpi_prt_entry *acpi_pci_irq_lookup(struct pci_dev *dev, int pin); int acpi_pci_irq_enable (struct pci_dev *dev); void acpi_penalize_isa_irq(int irq, int active); bool acpi_isa_irq_available(int irq); #ifdef CONFIG_PCI void acpi_penalize_sci_irq(int irq, int trigger, int polarity); #else static inline void acpi_penalize_sci_irq(int irq, int trigger, int polarity) { } #endif void acpi_pci_irq_disable (struct pci_dev *dev); extern int ec_read(u8 addr, u8 *val); extern int ec_write(u8 addr, u8 val); extern int ec_transaction(u8 command, const u8 *wdata, unsigned wdata_len, u8 *rdata, unsigned rdata_len); extern acpi_handle ec_get_handle(void); extern bool acpi_is_pnp_device(struct acpi_device *); #if defined(CONFIG_ACPI_WMI) || defined(CONFIG_ACPI_WMI_MODULE) typedef void (*wmi_notify_handler) (union acpi_object *data, void *context); int wmi_instance_count(const char *guid); extern acpi_status wmi_evaluate_method(const char *guid, u8 instance, u32 method_id, const struct acpi_buffer *in, struct acpi_buffer *out); extern acpi_status wmi_query_block(const char *guid, u8 instance, struct acpi_buffer *out); extern acpi_status wmi_set_block(const char *guid, u8 instance, const struct acpi_buffer *in); extern acpi_status wmi_install_notify_handler(const char *guid, wmi_notify_handler handler, void *data); extern acpi_status wmi_remove_notify_handler(const char *guid); extern bool wmi_has_guid(const char *guid); extern char *wmi_get_acpi_device_uid(const char *guid); #endif /* CONFIG_ACPI_WMI */ #define ACPI_VIDEO_OUTPUT_SWITCHING 0x0001 #define ACPI_VIDEO_DEVICE_POSTING 0x0002 #define ACPI_VIDEO_ROM_AVAILABLE 0x0004 #define ACPI_VIDEO_BACKLIGHT 0x0008 #define ACPI_VIDEO_BACKLIGHT_FORCE_VENDOR 0x0010 #define ACPI_VIDEO_BACKLIGHT_FORCE_VIDEO 0x0020 #define ACPI_VIDEO_OUTPUT_SWITCHING_FORCE_VENDOR 0x0040 #define ACPI_VIDEO_OUTPUT_SWITCHING_FORCE_VIDEO 0x0080 #define ACPI_VIDEO_BACKLIGHT_DMI_VENDOR 0x0100 #define ACPI_VIDEO_BACKLIGHT_DMI_VIDEO 0x0200 #define ACPI_VIDEO_OUTPUT_SWITCHING_DMI_VENDOR 0x0400 #define ACPI_VIDEO_OUTPUT_SWITCHING_DMI_VIDEO 0x0800 extern char acpi_video_backlight_string[]; extern long acpi_is_video_device(acpi_handle handle); extern void acpi_osi_setup(char *str); extern bool acpi_osi_is_win8(void); #ifdef CONFIG_ACPI_THERMAL_LIB int thermal_acpi_active_trip_temp(struct acpi_device *adev, int id, int *ret_temp); int thermal_acpi_passive_trip_temp(struct acpi_device *adev, int *ret_temp); int thermal_acpi_hot_trip_temp(struct acpi_device *adev, int *ret_temp); int thermal_acpi_critical_trip_temp(struct acpi_device *adev, int *ret_temp); #endif #ifdef CONFIG_ACPI_HMAT int acpi_get_genport_coordinates(u32 uid, struct access_coordinate *coord); #else static inline int acpi_get_genport_coordinates(u32 uid, struct access_coordinate *coord) { return -EOPNOTSUPP; } #endif #ifdef CONFIG_ACPI_NUMA int acpi_map_pxm_to_node(int pxm); int acpi_get_node(acpi_handle handle); /** * pxm_to_online_node - Map proximity ID to online node * @pxm: ACPI proximity ID * * This is similar to pxm_to_node(), but always returns an online * node. When the mapped node from a given proximity ID is offline, it * looks up the node distance table and returns the nearest online node. * * ACPI device drivers, which are called after the NUMA initialization has * completed in the kernel, can call this interface to obtain their device * NUMA topology from ACPI tables. Such drivers do not have to deal with * offline nodes. A node may be offline when SRAT memory entry does not exist, * or NUMA is disabled, ex. "numa=off" on x86. */ static inline int pxm_to_online_node(int pxm) { int node = pxm_to_node(pxm); return numa_map_to_online_node(node); } #else static inline int pxm_to_online_node(int pxm) { return 0; } static inline int acpi_map_pxm_to_node(int pxm) { return 0; } static inline int acpi_get_node(acpi_handle handle) { return 0; } #endif extern int pnpacpi_disabled; #define PXM_INVAL (-1) bool acpi_dev_resource_memory(struct acpi_resource *ares, struct resource *res); bool acpi_dev_resource_io(struct acpi_resource *ares, struct resource *res); bool acpi_dev_resource_address_space(struct acpi_resource *ares, struct resource_win *win); bool acpi_dev_resource_ext_address_space(struct acpi_resource *ares, struct resource_win *win); unsigned long acpi_dev_irq_flags(u8 triggering, u8 polarity, u8 shareable, u8 wake_capable); unsigned int acpi_dev_get_irq_type(int triggering, int polarity); bool acpi_dev_resource_interrupt(struct acpi_resource *ares, int index, struct resource *res); void acpi_dev_free_resource_list(struct list_head *list); int acpi_dev_get_resources(struct acpi_device *adev, struct list_head *list, int (*preproc)(struct acpi_resource *, void *), void *preproc_data); int acpi_dev_get_dma_resources(struct acpi_device *adev, struct list_head *list); int acpi_dev_get_memory_resources(struct acpi_device *adev, struct list_head *list); int acpi_dev_filter_resource_type(struct acpi_resource *ares, unsigned long types); static inline int acpi_dev_filter_resource_type_cb(struct acpi_resource *ares, void *arg) { return acpi_dev_filter_resource_type(ares, (unsigned long)arg); } struct acpi_device *acpi_resource_consumer(struct resource *res); int acpi_check_resource_conflict(const struct resource *res); int acpi_check_region(resource_size_t start, resource_size_t n, const char *name); int acpi_resources_are_enforced(void); #ifdef CONFIG_HIBERNATION extern int acpi_check_s4_hw_signature; #endif #ifdef CONFIG_PM_SLEEP void __init acpi_old_suspend_ordering(void); void __init acpi_nvs_nosave(void); void __init acpi_nvs_nosave_s3(void); void __init acpi_sleep_no_blacklist(void); #endif /* CONFIG_PM_SLEEP */ int acpi_register_wakeup_handler( int wake_irq, bool (*wakeup)(void *context), void *context); void acpi_unregister_wakeup_handler( bool (*wakeup)(void *context), void *context); struct acpi_osc_context { char *uuid_str; /* UUID string */ int rev; struct acpi_buffer cap; /* list of DWORD capabilities */ struct acpi_buffer ret; /* free by caller if success */ }; acpi_status acpi_run_osc(acpi_handle handle, struct acpi_osc_context *context); /* Number of _OSC capability DWORDS depends on bridge type */ #define OSC_PCI_CAPABILITY_DWORDS 3 #define OSC_CXL_CAPABILITY_DWORDS 5 /* Indexes into _OSC Capabilities Buffer (DWORDs 2 to 5 are device-specific) */ #define OSC_QUERY_DWORD 0 /* DWORD 1 */ #define OSC_SUPPORT_DWORD 1 /* DWORD 2 */ #define OSC_CONTROL_DWORD 2 /* DWORD 3 */ #define OSC_EXT_SUPPORT_DWORD 3 /* DWORD 4 */ #define OSC_EXT_CONTROL_DWORD 4 /* DWORD 5 */ /* _OSC Capabilities DWORD 1: Query/Control and Error Returns (generic) */ #define OSC_QUERY_ENABLE 0x00000001 /* input */ #define OSC_REQUEST_ERROR 0x00000002 /* return */ #define OSC_INVALID_UUID_ERROR 0x00000004 /* return */ #define OSC_INVALID_REVISION_ERROR 0x00000008 /* return */ #define OSC_CAPABILITIES_MASK_ERROR 0x00000010 /* return */ /* Platform-Wide Capabilities _OSC: Capabilities DWORD 2: Support Field */ #define OSC_SB_PAD_SUPPORT 0x00000001 #define OSC_SB_PPC_OST_SUPPORT 0x00000002 #define OSC_SB_PR3_SUPPORT 0x00000004 #define OSC_SB_HOTPLUG_OST_SUPPORT 0x00000008 #define OSC_SB_APEI_SUPPORT 0x00000010 #define OSC_SB_CPC_SUPPORT 0x00000020 #define OSC_SB_CPCV2_SUPPORT 0x00000040 #define OSC_SB_PCLPI_SUPPORT 0x00000080 #define OSC_SB_OSLPI_SUPPORT 0x00000100 #define OSC_SB_FAST_THERMAL_SAMPLING_SUPPORT 0x00000200 #define OSC_SB_OVER_16_PSTATES_SUPPORT 0x00000400 #define OSC_SB_GED_SUPPORT 0x00000800 #define OSC_SB_CPC_DIVERSE_HIGH_SUPPORT 0x00001000 #define OSC_SB_IRQ_RESOURCE_SOURCE_SUPPORT 0x00002000 #define OSC_SB_CPC_FLEXIBLE_ADR_SPACE 0x00004000 #define OSC_SB_GENERIC_INITIATOR_SUPPORT 0x00020000 #define OSC_SB_NATIVE_USB4_SUPPORT 0x00040000 #define OSC_SB_BATTERY_CHARGE_LIMITING_SUPPORT 0x00080000 #define OSC_SB_PRM_SUPPORT 0x00200000 #define OSC_SB_FFH_OPR_SUPPORT 0x00400000 extern bool osc_sb_apei_support_acked; extern bool osc_pc_lpi_support_confirmed; extern bool osc_sb_native_usb4_support_confirmed; extern bool osc_sb_cppc2_support_acked; extern bool osc_cpc_flexible_adr_space_confirmed; /* USB4 Capabilities */ #define OSC_USB_USB3_TUNNELING 0x00000001 #define OSC_USB_DP_TUNNELING 0x00000002 #define OSC_USB_PCIE_TUNNELING 0x00000004 #define OSC_USB_XDOMAIN 0x00000008 extern u32 osc_sb_native_usb4_control; /* PCI Host Bridge _OSC: Capabilities DWORD 2: Support Field */ #define OSC_PCI_EXT_CONFIG_SUPPORT 0x00000001 #define OSC_PCI_ASPM_SUPPORT 0x00000002 #define OSC_PCI_CLOCK_PM_SUPPORT 0x00000004 #define OSC_PCI_SEGMENT_GROUPS_SUPPORT 0x00000008 #define OSC_PCI_MSI_SUPPORT 0x00000010 #define OSC_PCI_EDR_SUPPORT 0x00000080 #define OSC_PCI_HPX_TYPE_3_SUPPORT 0x00000100 /* PCI Host Bridge _OSC: Capabilities DWORD 3: Control Field */ #define OSC_PCI_EXPRESS_NATIVE_HP_CONTROL 0x00000001 #define OSC_PCI_SHPC_NATIVE_HP_CONTROL 0x00000002 #define OSC_PCI_EXPRESS_PME_CONTROL 0x00000004 #define OSC_PCI_EXPRESS_AER_CONTROL 0x00000008 #define OSC_PCI_EXPRESS_CAPABILITY_CONTROL 0x00000010 #define OSC_PCI_EXPRESS_LTR_CONTROL 0x00000020 #define OSC_PCI_EXPRESS_DPC_CONTROL 0x00000080 /* CXL _OSC: Capabilities DWORD 4: Support Field */ #define OSC_CXL_1_1_PORT_REG_ACCESS_SUPPORT 0x00000001 #define OSC_CXL_2_0_PORT_DEV_REG_ACCESS_SUPPORT 0x00000002 #define OSC_CXL_PROTOCOL_ERR_REPORTING_SUPPORT 0x00000004 #define OSC_CXL_NATIVE_HP_SUPPORT 0x00000008 /* CXL _OSC: Capabilities DWORD 5: Control Field */ #define OSC_CXL_ERROR_REPORTING_CONTROL 0x00000001 static inline u32 acpi_osc_ctx_get_pci_control(struct acpi_osc_context *context) { u32 *ret = context->ret.pointer; return ret[OSC_CONTROL_DWORD]; } static inline u32 acpi_osc_ctx_get_cxl_control(struct acpi_osc_context *context) { u32 *ret = context->ret.pointer; return ret[OSC_EXT_CONTROL_DWORD]; } #define ACPI_GSB_ACCESS_ATTRIB_QUICK 0x00000002 #define ACPI_GSB_ACCESS_ATTRIB_SEND_RCV 0x00000004 #define ACPI_GSB_ACCESS_ATTRIB_BYTE 0x00000006 #define ACPI_GSB_ACCESS_ATTRIB_WORD 0x00000008 #define ACPI_GSB_ACCESS_ATTRIB_BLOCK 0x0000000A #define ACPI_GSB_ACCESS_ATTRIB_MULTIBYTE 0x0000000B #define ACPI_GSB_ACCESS_ATTRIB_WORD_CALL 0x0000000C #define ACPI_GSB_ACCESS_ATTRIB_BLOCK_CALL 0x0000000D #define ACPI_GSB_ACCESS_ATTRIB_RAW_BYTES 0x0000000E #define ACPI_GSB_ACCESS_ATTRIB_RAW_PROCESS 0x0000000F /* Enable _OST when all relevant hotplug operations are enabled */ #if defined(CONFIG_ACPI_HOTPLUG_CPU) && \ defined(CONFIG_ACPI_HOTPLUG_MEMORY) && \ defined(CONFIG_ACPI_CONTAINER) #define ACPI_HOTPLUG_OST #endif /* _OST Source Event Code (OSPM Action) */ #define ACPI_OST_EC_OSPM_SHUTDOWN 0x100 #define ACPI_OST_EC_OSPM_EJECT 0x103 #define ACPI_OST_EC_OSPM_INSERTION 0x200 /* _OST General Processing Status Code */ #define ACPI_OST_SC_SUCCESS 0x0 #define ACPI_OST_SC_NON_SPECIFIC_FAILURE 0x1 #define ACPI_OST_SC_UNRECOGNIZED_NOTIFY 0x2 /* _OST OS Shutdown Processing (0x100) Status Code */ #define ACPI_OST_SC_OS_SHUTDOWN_DENIED 0x80 #define ACPI_OST_SC_OS_SHUTDOWN_IN_PROGRESS 0x81 #define ACPI_OST_SC_OS_SHUTDOWN_COMPLETED 0x82 #define ACPI_OST_SC_OS_SHUTDOWN_NOT_SUPPORTED 0x83 /* _OST Ejection Request (0x3, 0x103) Status Code */ #define ACPI_OST_SC_EJECT_NOT_SUPPORTED 0x80 #define ACPI_OST_SC_DEVICE_IN_USE 0x81 #define ACPI_OST_SC_DEVICE_BUSY 0x82 #define ACPI_OST_SC_EJECT_DEPENDENCY_BUSY 0x83 #define ACPI_OST_SC_EJECT_IN_PROGRESS 0x84 /* _OST Insertion Request (0x200) Status Code */ #define ACPI_OST_SC_INSERT_IN_PROGRESS 0x80 #define ACPI_OST_SC_DRIVER_LOAD_FAILURE 0x81 #define ACPI_OST_SC_INSERT_NOT_SUPPORTED 0x82 enum acpi_predicate { all_versions, less_than_or_equal, equal, greater_than_or_equal, }; /* Table must be terminted by a NULL entry */ struct acpi_platform_list { char oem_id[ACPI_OEM_ID_SIZE+1]; char oem_table_id[ACPI_OEM_TABLE_ID_SIZE+1]; u32 oem_revision; char *table; enum acpi_predicate pred; char *reason; u32 data; }; int acpi_match_platform_list(const struct acpi_platform_list *plat); extern void acpi_early_init(void); extern void acpi_subsystem_init(void); extern int acpi_nvs_register(__u64 start, __u64 size); extern int acpi_nvs_for_each_region(int (*func)(__u64, __u64, void *), void *data); const struct acpi_device_id *acpi_match_acpi_device(const struct acpi_device_id *ids, const struct acpi_device *adev); const struct acpi_device_id *acpi_match_device(const struct acpi_device_id *ids, const struct device *dev); const void *acpi_device_get_match_data(const struct device *dev); extern bool acpi_driver_match_device(struct device *dev, const struct device_driver *drv); int acpi_device_uevent_modalias(const struct device *, struct kobj_uevent_env *); int acpi_device_modalias(struct device *, char *, int); struct platform_device *acpi_create_platform_device(struct acpi_device *, const struct property_entry *); #define ACPI_PTR(_ptr) (_ptr) static inline void acpi_device_set_enumerated(struct acpi_device *adev) { adev->flags.visited = true; } static inline void acpi_device_clear_enumerated(struct acpi_device *adev) { adev->flags.visited = false; } enum acpi_reconfig_event { ACPI_RECONFIG_DEVICE_ADD = 0, ACPI_RECONFIG_DEVICE_REMOVE, }; int acpi_reconfig_notifier_register(struct notifier_block *nb); int acpi_reconfig_notifier_unregister(struct notifier_block *nb); #ifdef CONFIG_ACPI_GTDT int acpi_gtdt_init(struct acpi_table_header *table, int *platform_timer_count); int acpi_gtdt_map_ppi(int type); bool acpi_gtdt_c3stop(int type); int acpi_arch_timer_mem_init(struct arch_timer_mem *timer_mem, int *timer_count); #endif #ifndef ACPI_HAVE_ARCH_SET_ROOT_POINTER static __always_inline void acpi_arch_set_root_pointer(u64 addr) { } #endif #ifndef ACPI_HAVE_ARCH_GET_ROOT_POINTER static __always_inline u64 acpi_arch_get_root_pointer(void) { return 0; } #endif int acpi_get_local_u64_address(acpi_handle handle, u64 *addr); int acpi_get_local_address(acpi_handle handle, u32 *addr); const char *acpi_get_subsystem_id(acpi_handle handle); #ifdef CONFIG_ACPI_MRRM int acpi_mrrm_max_mem_region(void); #endif #else /* !CONFIG_ACPI */ #define acpi_disabled 1 #define ACPI_COMPANION(dev) (NULL) #define ACPI_COMPANION_SET(dev, adev) do { } while (0) #define ACPI_HANDLE(dev) (NULL) #define ACPI_HANDLE_FWNODE(fwnode) (NULL) /* Get rid of the -Wunused-variable for adev */ #define acpi_dev_uid_match(adev, uid2) (adev && false) #define acpi_dev_hid_uid_match(adev, hid2, uid2) (adev && false) struct fwnode_handle; static inline bool acpi_dev_found(const char *hid) { return false; } static inline bool acpi_dev_present(const char *hid, const char *uid, s64 hrv) { return false; } struct acpi_device; static inline int acpi_dev_uid_to_integer(struct acpi_device *adev, u64 *integer) { return -ENODEV; } static inline struct acpi_device * acpi_dev_get_first_match_dev(const char *hid, const char *uid, s64 hrv) { return NULL; } static inline bool acpi_reduced_hardware(void) { return false; } static inline void acpi_dev_put(struct acpi_device *adev) {} static inline bool is_acpi_node(const struct fwnode_handle *fwnode) { return false; } static inline bool is_acpi_device_node(const struct fwnode_handle *fwnode) { return false; } static inline struct acpi_device *to_acpi_device_node(const struct fwnode_handle *fwnode) { return NULL; } static inline bool is_acpi_data_node(const struct fwnode_handle *fwnode) { return false; } static inline struct acpi_data_node *to_acpi_data_node(const struct fwnode_handle *fwnode) { return NULL; } static inline bool acpi_data_node_match(const struct fwnode_handle *fwnode, const char *name) { return false; } static inline struct fwnode_handle *acpi_fwnode_handle(struct acpi_device *adev) { return NULL; } static inline acpi_handle acpi_device_handle(struct acpi_device *adev) { return NULL; } static inline bool has_acpi_companion(struct device *dev) { return false; } static inline void acpi_preset_companion(struct device *dev, struct acpi_device *parent, u64 addr) { } static inline const char *acpi_dev_name(struct acpi_device *adev) { return NULL; } static inline struct device *acpi_get_first_physical_node(struct acpi_device *adev) { return NULL; } static inline void acpi_early_init(void) { } static inline void acpi_subsystem_init(void) { } static inline int early_acpi_boot_init(void) { return 0; } static inline int acpi_boot_init(void) { return 0; } static inline void acpi_boot_table_prepare(void) { } static inline void acpi_boot_table_init(void) { } static inline int acpi_mps_check(void) { return 0; } static inline int acpi_check_resource_conflict(struct resource *res) { return 0; } static inline int acpi_check_region(resource_size_t start, resource_size_t n, const char *name) { return 0; } struct acpi_table_header; static inline int acpi_table_parse(char *id, int (*handler)(struct acpi_table_header *)) { return -ENODEV; } static inline int acpi_nvs_register(__u64 start, __u64 size) { return 0; } static inline int acpi_nvs_for_each_region(int (*func)(__u64, __u64, void *), void *data) { return 0; } struct acpi_device_id; static inline const struct acpi_device_id *acpi_match_acpi_device( const struct acpi_device_id *ids, const struct acpi_device *adev) { return NULL; } static inline const struct acpi_device_id *acpi_match_device( const struct acpi_device_id *ids, const struct device *dev) { return NULL; } static inline const void *acpi_device_get_match_data(const struct device *dev) { return NULL; } static inline bool acpi_driver_match_device(struct device *dev, const struct device_driver *drv) { return false; } static inline bool acpi_check_dsm(acpi_handle handle, const guid_t *guid, u64 rev, u64 funcs) { return false; } static inline union acpi_object *acpi_evaluate_dsm(acpi_handle handle, const guid_t *guid, u64 rev, u64 func, union acpi_object *argv4) { return NULL; } static inline union acpi_object *acpi_evaluate_dsm_typed(acpi_handle handle, const guid_t *guid, u64 rev, u64 func, union acpi_object *argv4, acpi_object_type type) { return NULL; } static inline int acpi_device_uevent_modalias(const struct device *dev, struct kobj_uevent_env *env) { return -ENODEV; } static inline int acpi_device_modalias(struct device *dev, char *buf, int size) { return -ENODEV; } static inline struct platform_device * acpi_create_platform_device(struct acpi_device *adev, const struct property_entry *properties) { return NULL; } static inline bool acpi_dma_supported(const struct acpi_device *adev) { return false; } static inline enum dev_dma_attr acpi_get_dma_attr(struct acpi_device *adev) { return DEV_DMA_NOT_SUPPORTED; } static inline int acpi_dma_get_range(struct device *dev, const struct bus_dma_region **map) { return -ENODEV; } static inline int acpi_dma_configure(struct device *dev, enum dev_dma_attr attr) { return 0; } static inline int acpi_dma_configure_id(struct device *dev, enum dev_dma_attr attr, const u32 *input_id) { return 0; } #define ACPI_PTR(_ptr) (NULL) static inline void acpi_device_set_enumerated(struct acpi_device *adev) { } static inline void acpi_device_clear_enumerated(struct acpi_device *adev) { } static inline int acpi_reconfig_notifier_register(struct notifier_block *nb) { return -EINVAL; } static inline int acpi_reconfig_notifier_unregister(struct notifier_block *nb) { return -EINVAL; } static inline struct acpi_device *acpi_resource_consumer(struct resource *res) { return NULL; } static inline int acpi_get_local_address(acpi_handle handle, u32 *addr) { return -ENODEV; } static inline const char *acpi_get_subsystem_id(acpi_handle handle) { return ERR_PTR(-ENODEV); } static inline int acpi_register_wakeup_handler(int wake_irq, bool (*wakeup)(void *context), void *context) { return -ENXIO; } static inline void acpi_unregister_wakeup_handler( bool (*wakeup)(void *context), void *context) { } struct acpi_osc_context; static inline u32 acpi_osc_ctx_get_pci_control(struct acpi_osc_context *context) { return 0; } static inline u32 acpi_osc_ctx_get_cxl_control(struct acpi_osc_context *context) { return 0; } static inline bool acpi_sleep_state_supported(u8 sleep_state) { return false; } static inline acpi_handle acpi_get_processor_handle(int cpu) { return NULL; } static inline int acpi_mrrm_max_mem_region(void) { return 1; } #endif /* !CONFIG_ACPI */ #ifdef CONFIG_ACPI_HMAT int hmat_get_extended_linear_cache_size(struct resource *backing_res, int nid, resource_size_t *size); #else static inline int hmat_get_extended_linear_cache_size(struct resource *backing_res, int nid, resource_size_t *size) { return -EOPNOTSUPP; } #endif extern void arch_post_acpi_subsys_init(void); #ifdef CONFIG_ACPI_HOTPLUG_IOAPIC int acpi_ioapic_add(acpi_handle root); #else static inline int acpi_ioapic_add(acpi_handle root) { return 0; } #endif #ifdef CONFIG_ACPI void acpi_os_set_prepare_sleep(int (*func)(u8 sleep_state, u32 pm1a_ctrl, u32 pm1b_ctrl)); acpi_status acpi_os_prepare_sleep(u8 sleep_state, u32 pm1a_control, u32 pm1b_control); void acpi_os_set_prepare_extended_sleep(int (*func)(u8 sleep_state, u32 val_a, u32 val_b)); acpi_status acpi_os_prepare_extended_sleep(u8 sleep_state, u32 val_a, u32 val_b); struct acpi_s2idle_dev_ops { struct list_head list_node; void (*prepare)(void); void (*check)(void); void (*restore)(void); }; #if defined(CONFIG_SUSPEND) && defined(CONFIG_X86) int acpi_register_lps0_dev(struct acpi_s2idle_dev_ops *arg); void acpi_unregister_lps0_dev(struct acpi_s2idle_dev_ops *arg); int acpi_get_lps0_constraint(struct acpi_device *adev); #else /* CONFIG_SUSPEND && CONFIG_X86 */ static inline int acpi_get_lps0_constraint(struct device *dev) { return ACPI_STATE_UNKNOWN; } static inline int acpi_register_lps0_dev(struct acpi_s2idle_dev_ops *arg) { return -ENODEV; } static inline void acpi_unregister_lps0_dev(struct acpi_s2idle_dev_ops *arg) { } #endif /* CONFIG_SUSPEND && CONFIG_X86 */ void arch_reserve_mem_area(acpi_physical_address addr, size_t size); #else #define acpi_os_set_prepare_sleep(func, pm1a_ctrl, pm1b_ctrl) do { } while (0) #endif #if defined(CONFIG_ACPI) && defined(CONFIG_PM) int acpi_dev_suspend(struct device *dev, bool wakeup); int acpi_dev_resume(struct device *dev); int acpi_subsys_runtime_suspend(struct device *dev); int acpi_subsys_runtime_resume(struct device *dev); int acpi_dev_pm_attach(struct device *dev, bool power_on); bool acpi_storage_d3(struct device *dev); bool acpi_dev_state_d0(struct device *dev); #else static inline int acpi_subsys_runtime_suspend(struct device *dev) { return 0; } static inline int acpi_subsys_runtime_resume(struct device *dev) { return 0; } static inline int acpi_dev_pm_attach(struct device *dev, bool power_on) { return 0; } static inline bool acpi_storage_d3(struct device *dev) { return false; } static inline bool acpi_dev_state_d0(struct device *dev) { return true; } #endif #if defined(CONFIG_ACPI) && defined(CONFIG_PM_SLEEP) int acpi_subsys_prepare(struct device *dev); void acpi_subsys_complete(struct device *dev); int acpi_subsys_suspend_late(struct device *dev); int acpi_subsys_suspend_noirq(struct device *dev); int acpi_subsys_suspend(struct device *dev); int acpi_subsys_freeze(struct device *dev); int acpi_subsys_poweroff(struct device *dev); int acpi_subsys_restore_early(struct device *dev); #else static inline int acpi_subsys_prepare(struct device *dev) { return 0; } static inline void acpi_subsys_complete(struct device *dev) {} static inline int acpi_subsys_suspend_late(struct device *dev) { return 0; } static inline int acpi_subsys_suspend_noirq(struct device *dev) { return 0; } static inline int acpi_subsys_suspend(struct device *dev) { return 0; } static inline int acpi_subsys_freeze(struct device *dev) { return 0; } static inline int acpi_subsys_poweroff(struct device *dev) { return 0; } static inline int acpi_subsys_restore_early(struct device *dev) { return 0; } #endif #if defined(CONFIG_ACPI_EC) && defined(CONFIG_PM_SLEEP) void acpi_ec_mark_gpe_for_wake(void); void acpi_ec_set_gpe_wake_mask(u8 action); #else static inline void acpi_ec_mark_gpe_for_wake(void) {} static inline void acpi_ec_set_gpe_wake_mask(u8 action) {} #endif #ifdef CONFIG_ACPI char *acpi_handle_path(acpi_handle handle); __printf(3, 4) void acpi_handle_printk(const char *level, acpi_handle handle, const char *fmt, ...); void acpi_evaluation_failure_warn(acpi_handle handle, const char *name, acpi_status status); #else /* !CONFIG_ACPI */ static inline __printf(3, 4) void acpi_handle_printk(const char *level, void *handle, const char *fmt, ...) {} static inline void acpi_evaluation_failure_warn(acpi_handle handle, const char *name, acpi_status status) {} #endif /* !CONFIG_ACPI */ #if defined(CONFIG_ACPI) && defined(CONFIG_DYNAMIC_DEBUG) __printf(3, 4) void __acpi_handle_debug(struct _ddebug *descriptor, acpi_handle handle, const char *fmt, ...); #endif /* * acpi_handle_<level>: Print message with ACPI prefix and object path * * These interfaces acquire the global namespace mutex to obtain an object * path. In interrupt context, it shows the object path as <n/a>. */ #define acpi_handle_emerg(handle, fmt, ...) \ acpi_handle_printk(KERN_EMERG, handle, fmt, ##__VA_ARGS__) #define acpi_handle_alert(handle, fmt, ...) \ acpi_handle_printk(KERN_ALERT, handle, fmt, ##__VA_ARGS__) #define acpi_handle_crit(handle, fmt, ...) \ acpi_handle_printk(KERN_CRIT, handle, fmt, ##__VA_ARGS__) #define acpi_handle_err(handle, fmt, ...) \ acpi_handle_printk(KERN_ERR, handle, fmt, ##__VA_ARGS__) #define acpi_handle_warn(handle, fmt, ...) \ acpi_handle_printk(KERN_WARNING, handle, fmt, ##__VA_ARGS__) #define acpi_handle_notice(handle, fmt, ...) \ acpi_handle_printk(KERN_NOTICE, handle, fmt, ##__VA_ARGS__) #define acpi_handle_info(handle, fmt, ...) \ acpi_handle_printk(KERN_INFO, handle, fmt, ##__VA_ARGS__) #if defined(DEBUG) #define acpi_handle_debug(handle, fmt, ...) \ acpi_handle_printk(KERN_DEBUG, handle, fmt, ##__VA_ARGS__) #else #if defined(CONFIG_DYNAMIC_DEBUG) #define acpi_handle_debug(handle, fmt, ...) \ _dynamic_func_call(fmt, __acpi_handle_debug, \ handle, pr_fmt(fmt), ##__VA_ARGS__) #else #define acpi_handle_debug(handle, fmt, ...) \ ({ \ if (0) \ acpi_handle_printk(KERN_DEBUG, handle, fmt, ##__VA_ARGS__); \ 0; \ }) #endif #endif #if defined(CONFIG_ACPI) && defined(CONFIG_GPIOLIB) bool acpi_gpio_get_irq_resource(struct acpi_resource *ares, struct acpi_resource_gpio **agpio); bool acpi_gpio_get_io_resource(struct acpi_resource *ares, struct acpi_resource_gpio **agpio); int acpi_dev_gpio_irq_wake_get_by(struct acpi_device *adev, const char *con_id, int index, bool *wake_capable); #else static inline bool acpi_gpio_get_irq_resource(struct acpi_resource *ares, struct acpi_resource_gpio **agpio) { return false; } static inline bool acpi_gpio_get_io_resource(struct acpi_resource *ares, struct acpi_resource_gpio **agpio) { return false; } static inline int acpi_dev_gpio_irq_wake_get_by(struct acpi_device *adev, const char *con_id, int index, bool *wake_capable) { return -ENXIO; } #endif static inline int acpi_dev_gpio_irq_wake_get(struct acpi_device *adev, int index, bool *wake_capable) { return acpi_dev_gpio_irq_wake_get_by(adev, NULL, index, wake_capable); } static inline int acpi_dev_gpio_irq_get_by(struct acpi_device *adev, const char *con_id, int index) { return acpi_dev_gpio_irq_wake_get_by(adev, con_id, index, NULL); } static inline int acpi_dev_gpio_irq_get(struct acpi_device *adev, int index) { return acpi_dev_gpio_irq_wake_get_by(adev, NULL, index, NULL); } /* Device properties */ #ifdef CONFIG_ACPI int acpi_dev_get_property(const struct acpi_device *adev, const char *name, acpi_object_type type, const union acpi_object **obj); int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode, const char *name, size_t index, size_t num_args, struct fwnode_reference_args *args); static inline int acpi_node_get_property_reference( const struct fwnode_handle *fwnode, const char *name, size_t index, struct fwnode_reference_args *args) { return __acpi_node_get_property_reference(fwnode, name, index, NR_FWNODE_REFERENCE_ARGS, args); } static inline bool acpi_dev_has_props(const struct acpi_device *adev) { return !list_empty(&adev->data.properties); } struct acpi_device_properties * acpi_data_add_props(struct acpi_device_data *data, const guid_t *guid, union acpi_object *properties); int acpi_node_prop_get(const struct fwnode_handle *fwnode, const char *propname, void **valptr); struct fwnode_handle *acpi_get_next_subnode(const struct fwnode_handle *fwnode, struct fwnode_handle *child); struct acpi_probe_entry; typedef bool (*acpi_probe_entry_validate_subtbl)(struct acpi_subtable_header *, struct acpi_probe_entry *); #define ACPI_TABLE_ID_LEN 5 /** * struct acpi_probe_entry - boot-time probing entry * @id: ACPI table name * @type: Optional subtable type to match * (if @id contains subtables) * @subtable_valid: Optional callback to check the validity of * the subtable * @probe_table: Callback to the driver being probed when table * match is successful * @probe_subtbl: Callback to the driver being probed when table and * subtable match (and optional callback is successful) * @driver_data: Sideband data provided back to the driver */ struct acpi_probe_entry { __u8 id[ACPI_TABLE_ID_LEN]; __u8 type; acpi_probe_entry_validate_subtbl subtable_valid; union { acpi_tbl_table_handler probe_table; acpi_tbl_entry_handler probe_subtbl; }; kernel_ulong_t driver_data; }; void arch_sort_irqchip_probe(struct acpi_probe_entry *ap_head, int nr); #define ACPI_DECLARE_PROBE_ENTRY(table, name, table_id, subtable, \ valid, data, fn) \ static const struct acpi_probe_entry __acpi_probe_##name \ __used __section("__" #table "_acpi_probe_table") = { \ .id = table_id, \ .type = subtable, \ .subtable_valid = valid, \ .probe_table = fn, \ .driver_data = data, \ } #define ACPI_DECLARE_SUBTABLE_PROBE_ENTRY(table, name, table_id, \ subtable, valid, data, fn) \ static const struct acpi_probe_entry __acpi_probe_##name \ __used __section("__" #table "_acpi_probe_table") = { \ .id = table_id, \ .type = subtable, \ .subtable_valid = valid, \ .probe_subtbl = fn, \ .driver_data = data, \ } #define ACPI_PROBE_TABLE(name) __##name##_acpi_probe_table #define ACPI_PROBE_TABLE_END(name) __##name##_acpi_probe_table_end int __acpi_probe_device_table(struct acpi_probe_entry *start, int nr); #define acpi_probe_device_table(t) \ ({ \ extern struct acpi_probe_entry ACPI_PROBE_TABLE(t), \ ACPI_PROBE_TABLE_END(t); \ __acpi_probe_device_table(&ACPI_PROBE_TABLE(t), \ (&ACPI_PROBE_TABLE_END(t) - \ &ACPI_PROBE_TABLE(t))); \ }) #else static inline int acpi_dev_get_property(struct acpi_device *adev, const char *name, acpi_object_type type, const union acpi_object **obj) { return -ENXIO; } static inline int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode, const char *name, size_t index, size_t num_args, struct fwnode_reference_args *args) { return -ENXIO; } static inline int acpi_node_get_property_reference(const struct fwnode_handle *fwnode, const char *name, size_t index, struct fwnode_reference_args *args) { return -ENXIO; } static inline int acpi_node_prop_get(const struct fwnode_handle *fwnode, const char *propname, void **valptr) { return -ENXIO; } static inline struct fwnode_handle * acpi_get_next_subnode(const struct fwnode_handle *fwnode, struct fwnode_handle *child) { return NULL; } static inline struct fwnode_handle * acpi_graph_get_next_endpoint(const struct fwnode_handle *fwnode, struct fwnode_handle *prev) { return ERR_PTR(-ENXIO); } static inline int acpi_graph_get_remote_endpoint(const struct fwnode_handle *fwnode, struct fwnode_handle **remote, struct fwnode_handle **port, struct fwnode_handle **endpoint) { return -ENXIO; } #define ACPI_DECLARE_PROBE_ENTRY(table, name, table_id, subtable, valid, data, fn) \ static const void * __acpi_table_##name[] \ __attribute__((unused)) \ = { (void *) table_id, \ (void *) subtable, \ (void *) valid, \ (void *) fn, \ (void *) data } #define acpi_probe_device_table(t) ({ int __r = 0; __r;}) #endif #ifdef CONFIG_ACPI_TABLE_UPGRADE void acpi_table_upgrade(void); #else static inline void acpi_table_upgrade(void) { } #endif #if defined(CONFIG_ACPI) && defined(CONFIG_ACPI_WATCHDOG) extern bool acpi_has_watchdog(void); #else static inline bool acpi_has_watchdog(void) { return false; } #endif #ifdef CONFIG_ACPI_SPCR_TABLE extern bool qdf2400_e44_present; int acpi_parse_spcr(bool enable_earlycon, bool enable_console); #else static inline int acpi_parse_spcr(bool enable_earlycon, bool enable_console) { return -ENODEV; } #endif #if IS_ENABLED(CONFIG_ACPI_GENERIC_GSI) int acpi_irq_get(acpi_handle handle, unsigned int index, struct resource *res); #else static inline int acpi_irq_get(acpi_handle handle, unsigned int index, struct resource *res) { return -EINVAL; } #endif #ifdef CONFIG_ACPI_LPIT int lpit_read_residency_count_address(u64 *address); #else static inline int lpit_read_residency_count_address(u64 *address) { return -EINVAL; } #endif #ifdef CONFIG_ACPI_PROCESSOR_IDLE #ifndef arch_get_idle_state_flags static inline unsigned int arch_get_idle_state_flags(u32 arch_flags) { return 0; } #endif #endif /* CONFIG_ACPI_PROCESSOR_IDLE */ #ifdef CONFIG_ACPI_PPTT int acpi_pptt_cpu_is_thread(unsigned int cpu); int find_acpi_cpu_topology(unsigned int cpu, int level); int find_acpi_cpu_topology_cluster(unsigned int cpu); int find_acpi_cpu_topology_package(unsigned int cpu); int find_acpi_cpu_topology_hetero_id(unsigned int cpu); #else static inline int acpi_pptt_cpu_is_thread(unsigned int cpu) { return -EINVAL; } static inline int find_acpi_cpu_topology(unsigned int cpu, int level) { return -EINVAL; } static inline int find_acpi_cpu_topology_cluster(unsigned int cpu) { return -EINVAL; } static inline int find_acpi_cpu_topology_package(unsigned int cpu) { return -EINVAL; } static inline int find_acpi_cpu_topology_hetero_id(unsigned int cpu) { return -EINVAL; } #endif void acpi_arch_init(void); #ifdef CONFIG_ACPI_PCC void acpi_init_pcc(void); #else static inline void acpi_init_pcc(void) { } #endif #ifdef CONFIG_ACPI_FFH void acpi_init_ffh(void); extern int acpi_ffh_address_space_arch_setup(void *handler_ctxt, void **region_ctxt); extern int acpi_ffh_address_space_arch_handler(acpi_integer *value, void *region_context); #else static inline void acpi_init_ffh(void) { } #endif #ifdef CONFIG_ACPI extern void acpi_device_notify(struct device *dev); extern void acpi_device_notify_remove(struct device *dev); #else static inline void acpi_device_notify(struct device *dev) { } static inline void acpi_device_notify_remove(struct device *dev) { } #endif static inline void acpi_use_parent_companion(struct device *dev) { ACPI_COMPANION_SET(dev, ACPI_COMPANION(dev->parent)); } #ifdef CONFIG_ACPI_HMAT int hmat_update_target_coordinates(int nid, struct access_coordinate *coord, enum access_coordinate_class access); #else static inline int hmat_update_target_coordinates(int nid, struct access_coordinate *coord, enum access_coordinate_class access) { return -EOPNOTSUPP; } #endif #ifdef CONFIG_ACPI_NUMA bool acpi_node_backed_by_real_pxm(int nid); #else static inline bool acpi_node_backed_by_real_pxm(int nid) { return false; } #endif #endif /*_LINUX_ACPI_H*/ |
| 2 2 2 2 2 2 2 2 2 2 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 | // SPDX-License-Identifier: GPL-2.0 #include <linux/types.h> #include <linux/atomic.h> #include <linux/inetdevice.h> #include <linux/netfilter.h> #include <linux/netfilter_ipv4.h> #include <linux/netfilter_ipv6.h> #include <net/netfilter/nf_nat_masquerade.h> struct masq_dev_work { struct work_struct work; struct net *net; netns_tracker ns_tracker; union nf_inet_addr addr; int ifindex; int (*iter)(struct nf_conn *i, void *data); }; #define MAX_MASQ_WORKER_COUNT 16 static DEFINE_MUTEX(masq_mutex); static unsigned int masq_refcnt __read_mostly; static atomic_t masq_worker_count __read_mostly; unsigned int nf_nat_masquerade_ipv4(struct sk_buff *skb, unsigned int hooknum, const struct nf_nat_range2 *range, const struct net_device *out) { struct nf_conn *ct; struct nf_conn_nat *nat; enum ip_conntrack_info ctinfo; struct nf_nat_range2 newrange; const struct rtable *rt; __be32 newsrc, nh; WARN_ON(hooknum != NF_INET_POST_ROUTING); ct = nf_ct_get(skb, &ctinfo); WARN_ON(!(ct && (ctinfo == IP_CT_NEW || ctinfo == IP_CT_RELATED || ctinfo == IP_CT_RELATED_REPLY))); /* Source address is 0.0.0.0 - locally generated packet that is * probably not supposed to be masqueraded. */ if (ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3.ip == 0) return NF_ACCEPT; rt = skb_rtable(skb); nh = rt_nexthop(rt, ip_hdr(skb)->daddr); newsrc = inet_select_addr(out, nh, RT_SCOPE_UNIVERSE); if (!newsrc) { pr_info("%s ate my IP address\n", out->name); return NF_DROP; } nat = nf_ct_nat_ext_add(ct); if (nat) nat->masq_index = out->ifindex; /* Transfer from original range. */ memset(&newrange.min_addr, 0, sizeof(newrange.min_addr)); memset(&newrange.max_addr, 0, sizeof(newrange.max_addr)); newrange.flags = range->flags | NF_NAT_RANGE_MAP_IPS; newrange.min_addr.ip = newsrc; newrange.max_addr.ip = newsrc; newrange.min_proto = range->min_proto; newrange.max_proto = range->max_proto; /* Hand modified range to generic setup. */ return nf_nat_setup_info(ct, &newrange, NF_NAT_MANIP_SRC); } EXPORT_SYMBOL_GPL(nf_nat_masquerade_ipv4); static void iterate_cleanup_work(struct work_struct *work) { struct nf_ct_iter_data iter_data = {}; struct masq_dev_work *w; w = container_of(work, struct masq_dev_work, work); iter_data.net = w->net; iter_data.data = (void *)w; nf_ct_iterate_cleanup_net(w->iter, &iter_data); put_net_track(w->net, &w->ns_tracker); kfree(w); atomic_dec(&masq_worker_count); module_put(THIS_MODULE); } /* Iterate conntrack table in the background and remove conntrack entries * that use the device/address being removed. * * In case too many work items have been queued already or memory allocation * fails iteration is skipped, conntrack entries will time out eventually. */ static void nf_nat_masq_schedule(struct net *net, union nf_inet_addr *addr, int ifindex, int (*iter)(struct nf_conn *i, void *data), gfp_t gfp_flags) { struct masq_dev_work *w; if (atomic_read(&masq_worker_count) > MAX_MASQ_WORKER_COUNT) return; net = maybe_get_net(net); if (!net) return; if (!try_module_get(THIS_MODULE)) goto err_module; w = kzalloc(sizeof(*w), gfp_flags); if (w) { /* We can overshoot MAX_MASQ_WORKER_COUNT, no big deal */ atomic_inc(&masq_worker_count); INIT_WORK(&w->work, iterate_cleanup_work); w->ifindex = ifindex; w->net = net; netns_tracker_alloc(net, &w->ns_tracker, gfp_flags); w->iter = iter; if (addr) w->addr = *addr; schedule_work(&w->work); return; } module_put(THIS_MODULE); err_module: put_net(net); } static int device_cmp(struct nf_conn *i, void *arg) { const struct nf_conn_nat *nat = nfct_nat(i); const struct masq_dev_work *w = arg; if (!nat) return 0; return nat->masq_index == w->ifindex; } static int masq_device_event(struct notifier_block *this, unsigned long event, void *ptr) { const struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct net *net = dev_net(dev); if (event == NETDEV_DOWN) { /* Device was downed. Search entire table for * conntracks which were associated with that device, * and forget them. */ nf_nat_masq_schedule(net, NULL, dev->ifindex, device_cmp, GFP_KERNEL); } return NOTIFY_DONE; } static int inet_cmp(struct nf_conn *ct, void *ptr) { struct nf_conntrack_tuple *tuple; struct masq_dev_work *w = ptr; if (!device_cmp(ct, ptr)) return 0; tuple = &ct->tuplehash[IP_CT_DIR_REPLY].tuple; return nf_inet_addr_cmp(&w->addr, &tuple->dst.u3); } static int masq_inet_event(struct notifier_block *this, unsigned long event, void *ptr) { const struct in_ifaddr *ifa = ptr; const struct in_device *idev; const struct net_device *dev; union nf_inet_addr addr; if (event != NETDEV_DOWN) return NOTIFY_DONE; /* The masq_dev_notifier will catch the case of the device going * down. So if the inetdev is dead and being destroyed we have * no work to do. Otherwise this is an individual address removal * and we have to perform the flush. */ idev = ifa->ifa_dev; if (idev->dead) return NOTIFY_DONE; memset(&addr, 0, sizeof(addr)); addr.ip = ifa->ifa_address; dev = idev->dev; nf_nat_masq_schedule(dev_net(idev->dev), &addr, dev->ifindex, inet_cmp, GFP_KERNEL); return NOTIFY_DONE; } static struct notifier_block masq_dev_notifier = { .notifier_call = masq_device_event, }; static struct notifier_block masq_inet_notifier = { .notifier_call = masq_inet_event, }; #if IS_ENABLED(CONFIG_IPV6) static int nat_ipv6_dev_get_saddr(struct net *net, const struct net_device *dev, const struct in6_addr *daddr, unsigned int srcprefs, struct in6_addr *saddr) { #ifdef CONFIG_IPV6_MODULE const struct nf_ipv6_ops *v6_ops = nf_get_ipv6_ops(); if (!v6_ops) return -EHOSTUNREACH; return v6_ops->dev_get_saddr(net, dev, daddr, srcprefs, saddr); #else return ipv6_dev_get_saddr(net, dev, daddr, srcprefs, saddr); #endif } unsigned int nf_nat_masquerade_ipv6(struct sk_buff *skb, const struct nf_nat_range2 *range, const struct net_device *out) { enum ip_conntrack_info ctinfo; struct nf_conn_nat *nat; struct in6_addr src; struct nf_conn *ct; struct nf_nat_range2 newrange; ct = nf_ct_get(skb, &ctinfo); WARN_ON(!(ct && (ctinfo == IP_CT_NEW || ctinfo == IP_CT_RELATED || ctinfo == IP_CT_RELATED_REPLY))); if (nat_ipv6_dev_get_saddr(nf_ct_net(ct), out, &ipv6_hdr(skb)->daddr, 0, &src) < 0) return NF_DROP; nat = nf_ct_nat_ext_add(ct); if (nat) nat->masq_index = out->ifindex; newrange.flags = range->flags | NF_NAT_RANGE_MAP_IPS; newrange.min_addr.in6 = src; newrange.max_addr.in6 = src; newrange.min_proto = range->min_proto; newrange.max_proto = range->max_proto; return nf_nat_setup_info(ct, &newrange, NF_NAT_MANIP_SRC); } EXPORT_SYMBOL_GPL(nf_nat_masquerade_ipv6); /* atomic notifier; can't call nf_ct_iterate_cleanup_net (it can sleep). * * Defer it to the system workqueue. * * As we can have 'a lot' of inet_events (depending on amount of ipv6 * addresses being deleted), we also need to limit work item queue. */ static int masq_inet6_event(struct notifier_block *this, unsigned long event, void *ptr) { struct inet6_ifaddr *ifa = ptr; const struct net_device *dev; union nf_inet_addr addr; if (event != NETDEV_DOWN) return NOTIFY_DONE; dev = ifa->idev->dev; memset(&addr, 0, sizeof(addr)); addr.in6 = ifa->addr; nf_nat_masq_schedule(dev_net(dev), &addr, dev->ifindex, inet_cmp, GFP_ATOMIC); return NOTIFY_DONE; } static struct notifier_block masq_inet6_notifier = { .notifier_call = masq_inet6_event, }; static int nf_nat_masquerade_ipv6_register_notifier(void) { return register_inet6addr_notifier(&masq_inet6_notifier); } #else static inline int nf_nat_masquerade_ipv6_register_notifier(void) { return 0; } #endif int nf_nat_masquerade_inet_register_notifiers(void) { int ret = 0; mutex_lock(&masq_mutex); if (WARN_ON_ONCE(masq_refcnt == UINT_MAX)) { ret = -EOVERFLOW; goto out_unlock; } /* check if the notifier was already set */ if (++masq_refcnt > 1) goto out_unlock; /* Register for device down reports */ ret = register_netdevice_notifier(&masq_dev_notifier); if (ret) goto err_dec; /* Register IP address change reports */ ret = register_inetaddr_notifier(&masq_inet_notifier); if (ret) goto err_unregister; ret = nf_nat_masquerade_ipv6_register_notifier(); if (ret) goto err_unreg_inet; mutex_unlock(&masq_mutex); return ret; err_unreg_inet: unregister_inetaddr_notifier(&masq_inet_notifier); err_unregister: unregister_netdevice_notifier(&masq_dev_notifier); err_dec: masq_refcnt--; out_unlock: mutex_unlock(&masq_mutex); return ret; } EXPORT_SYMBOL_GPL(nf_nat_masquerade_inet_register_notifiers); void nf_nat_masquerade_inet_unregister_notifiers(void) { mutex_lock(&masq_mutex); /* check if the notifiers still have clients */ if (--masq_refcnt > 0) goto out_unlock; unregister_netdevice_notifier(&masq_dev_notifier); unregister_inetaddr_notifier(&masq_inet_notifier); #if IS_ENABLED(CONFIG_IPV6) unregister_inet6addr_notifier(&masq_inet6_notifier); #endif out_unlock: mutex_unlock(&masq_mutex); } EXPORT_SYMBOL_GPL(nf_nat_masquerade_inet_unregister_notifiers); |
| 5801 5788 172 2 343 3 8 282 46 12 349 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 | /** * css_get - obtain a reference on the specified css * @css: target css * * The caller must already have a reference. */ CGROUP_REF_FN_ATTRS void css_get(struct cgroup_subsys_state *css) { if (!(css->flags & CSS_NO_REF)) percpu_ref_get(&css->refcnt); } CGROUP_REF_EXPORT(css_get) /** * css_get_many - obtain references on the specified css * @css: target css * @n: number of references to get * * The caller must already have a reference. */ CGROUP_REF_FN_ATTRS void css_get_many(struct cgroup_subsys_state *css, unsigned int n) { if (!(css->flags & CSS_NO_REF)) percpu_ref_get_many(&css->refcnt, n); } CGROUP_REF_EXPORT(css_get_many) /** * css_tryget - try to obtain a reference on the specified css * @css: target css * * Obtain a reference on @css unless it already has reached zero and is * being released. This function doesn't care whether @css is on or * offline. The caller naturally needs to ensure that @css is accessible * but doesn't have to be holding a reference on it - IOW, RCU protected * access is good enough for this function. Returns %true if a reference * count was successfully obtained; %false otherwise. */ CGROUP_REF_FN_ATTRS bool css_tryget(struct cgroup_subsys_state *css) { if (!(css->flags & CSS_NO_REF)) return percpu_ref_tryget(&css->refcnt); return true; } CGROUP_REF_EXPORT(css_tryget) /** * css_tryget_online - try to obtain a reference on the specified css if online * @css: target css * * Obtain a reference on @css if it's online. The caller naturally needs * to ensure that @css is accessible but doesn't have to be holding a * reference on it - IOW, RCU protected access is good enough for this * function. Returns %true if a reference count was successfully obtained; * %false otherwise. */ CGROUP_REF_FN_ATTRS bool css_tryget_online(struct cgroup_subsys_state *css) { if (!(css->flags & CSS_NO_REF)) return percpu_ref_tryget_live(&css->refcnt); return true; } CGROUP_REF_EXPORT(css_tryget_online) /** * css_put - put a css reference * @css: target css * * Put a reference obtained via css_get() and css_tryget_online(). */ CGROUP_REF_FN_ATTRS void css_put(struct cgroup_subsys_state *css) { if (!(css->flags & CSS_NO_REF)) percpu_ref_put(&css->refcnt); } CGROUP_REF_EXPORT(css_put) /** * css_put_many - put css references * @css: target css * @n: number of references to put * * Put references obtained via css_get() and css_tryget_online(). */ CGROUP_REF_FN_ATTRS void css_put_many(struct cgroup_subsys_state *css, unsigned int n) { if (!(css->flags & CSS_NO_REF)) percpu_ref_put_many(&css->refcnt, n); } CGROUP_REF_EXPORT(css_put_many) |
| 2 2 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 | // SPDX-License-Identifier: GPL-2.0 #include <linux/mutex.h> #include <linux/netdevice.h> #include <linux/xarray.h> #include <net/busy_poll.h> #include <net/net_debug.h> #include <net/netdev_rx_queue.h> #include <net/page_pool/helpers.h> #include <net/page_pool/types.h> #include <net/page_pool/memory_provider.h> #include <net/sock.h> #include "page_pool_priv.h" #include "netdev-genl-gen.h" static DEFINE_XARRAY_FLAGS(page_pools, XA_FLAGS_ALLOC1); /* Protects: page_pools, netdevice->page_pools, pool->p.napi, pool->slow.netdev, * pool->user. * Ordering: inside rtnl_lock */ DEFINE_MUTEX(page_pools_lock); /* Page pools are only reachable from user space (via netlink) if they are * linked to a netdev at creation time. Following page pool "visibility" * states are possible: * - normal * - user.list: linked to real netdev, netdev: real netdev * - orphaned - real netdev has disappeared * - user.list: linked to lo, netdev: lo * - invisible - either (a) created without netdev linking, (b) unlisted due * to error, or (c) the entire namespace which owned this pool disappeared * - user.list: unhashed, netdev: unknown */ typedef int (*pp_nl_fill_cb)(struct sk_buff *rsp, const struct page_pool *pool, const struct genl_info *info); static int netdev_nl_page_pool_get_do(struct genl_info *info, u32 id, pp_nl_fill_cb fill) { struct page_pool *pool; struct sk_buff *rsp; int err; mutex_lock(&page_pools_lock); pool = xa_load(&page_pools, id); if (!pool || hlist_unhashed(&pool->user.list) || !net_eq(dev_net(pool->slow.netdev), genl_info_net(info))) { err = -ENOENT; goto err_unlock; } rsp = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!rsp) { err = -ENOMEM; goto err_unlock; } err = fill(rsp, pool, info); if (err) goto err_free_msg; mutex_unlock(&page_pools_lock); return genlmsg_reply(rsp, info); err_free_msg: nlmsg_free(rsp); err_unlock: mutex_unlock(&page_pools_lock); return err; } struct page_pool_dump_cb { unsigned long ifindex; u32 pp_id; }; static int netdev_nl_page_pool_get_dump(struct sk_buff *skb, struct netlink_callback *cb, pp_nl_fill_cb fill) { struct page_pool_dump_cb *state = (void *)cb->ctx; const struct genl_info *info = genl_info_dump(cb); struct net *net = sock_net(skb->sk); struct net_device *netdev; struct page_pool *pool; int err = 0; rtnl_lock(); mutex_lock(&page_pools_lock); for_each_netdev_dump(net, netdev, state->ifindex) { hlist_for_each_entry(pool, &netdev->page_pools, user.list) { if (state->pp_id && state->pp_id < pool->user.id) continue; state->pp_id = pool->user.id; err = fill(skb, pool, info); if (err) goto out; } state->pp_id = 0; } out: mutex_unlock(&page_pools_lock); rtnl_unlock(); return err; } static int page_pool_nl_stats_fill(struct sk_buff *rsp, const struct page_pool *pool, const struct genl_info *info) { #ifdef CONFIG_PAGE_POOL_STATS struct page_pool_stats stats = {}; struct nlattr *nest; void *hdr; if (!page_pool_get_stats(pool, &stats)) return 0; hdr = genlmsg_iput(rsp, info); if (!hdr) return -EMSGSIZE; nest = nla_nest_start(rsp, NETDEV_A_PAGE_POOL_STATS_INFO); if (nla_put_uint(rsp, NETDEV_A_PAGE_POOL_ID, pool->user.id) || (pool->slow.netdev->ifindex != LOOPBACK_IFINDEX && nla_put_u32(rsp, NETDEV_A_PAGE_POOL_IFINDEX, pool->slow.netdev->ifindex))) goto err_cancel_nest; nla_nest_end(rsp, nest); if (nla_put_uint(rsp, NETDEV_A_PAGE_POOL_STATS_ALLOC_FAST, stats.alloc_stats.fast) || nla_put_uint(rsp, NETDEV_A_PAGE_POOL_STATS_ALLOC_SLOW, stats.alloc_stats.slow) || nla_put_uint(rsp, NETDEV_A_PAGE_POOL_STATS_ALLOC_SLOW_HIGH_ORDER, stats.alloc_stats.slow_high_order) || nla_put_uint(rsp, NETDEV_A_PAGE_POOL_STATS_ALLOC_EMPTY, stats.alloc_stats.empty) || nla_put_uint(rsp, NETDEV_A_PAGE_POOL_STATS_ALLOC_REFILL, stats.alloc_stats.refill) || nla_put_uint(rsp, NETDEV_A_PAGE_POOL_STATS_ALLOC_WAIVE, stats.alloc_stats.waive) || nla_put_uint(rsp, NETDEV_A_PAGE_POOL_STATS_RECYCLE_CACHED, stats.recycle_stats.cached) || nla_put_uint(rsp, NETDEV_A_PAGE_POOL_STATS_RECYCLE_CACHE_FULL, stats.recycle_stats.cache_full) || nla_put_uint(rsp, NETDEV_A_PAGE_POOL_STATS_RECYCLE_RING, stats.recycle_stats.ring) || nla_put_uint(rsp, NETDEV_A_PAGE_POOL_STATS_RECYCLE_RING_FULL, stats.recycle_stats.ring_full) || nla_put_uint(rsp, NETDEV_A_PAGE_POOL_STATS_RECYCLE_RELEASED_REFCNT, stats.recycle_stats.released_refcnt)) goto err_cancel_msg; genlmsg_end(rsp, hdr); return 0; err_cancel_nest: nla_nest_cancel(rsp, nest); err_cancel_msg: genlmsg_cancel(rsp, hdr); return -EMSGSIZE; #else GENL_SET_ERR_MSG(info, "kernel built without CONFIG_PAGE_POOL_STATS"); return -EOPNOTSUPP; #endif } int netdev_nl_page_pool_stats_get_doit(struct sk_buff *skb, struct genl_info *info) { struct nlattr *tb[ARRAY_SIZE(netdev_page_pool_info_nl_policy)]; struct nlattr *nest; int err; u32 id; if (GENL_REQ_ATTR_CHECK(info, NETDEV_A_PAGE_POOL_STATS_INFO)) return -EINVAL; nest = info->attrs[NETDEV_A_PAGE_POOL_STATS_INFO]; err = nla_parse_nested(tb, ARRAY_SIZE(tb) - 1, nest, netdev_page_pool_info_nl_policy, info->extack); if (err) return err; if (NL_REQ_ATTR_CHECK(info->extack, nest, tb, NETDEV_A_PAGE_POOL_ID)) return -EINVAL; if (tb[NETDEV_A_PAGE_POOL_IFINDEX]) { NL_SET_ERR_MSG_ATTR(info->extack, tb[NETDEV_A_PAGE_POOL_IFINDEX], "selecting by ifindex not supported"); return -EINVAL; } id = nla_get_uint(tb[NETDEV_A_PAGE_POOL_ID]); return netdev_nl_page_pool_get_do(info, id, page_pool_nl_stats_fill); } int netdev_nl_page_pool_stats_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb) { return netdev_nl_page_pool_get_dump(skb, cb, page_pool_nl_stats_fill); } static int page_pool_nl_fill(struct sk_buff *rsp, const struct page_pool *pool, const struct genl_info *info) { size_t inflight, refsz; unsigned int napi_id; void *hdr; hdr = genlmsg_iput(rsp, info); if (!hdr) return -EMSGSIZE; if (nla_put_uint(rsp, NETDEV_A_PAGE_POOL_ID, pool->user.id)) goto err_cancel; if (pool->slow.netdev->ifindex != LOOPBACK_IFINDEX && nla_put_u32(rsp, NETDEV_A_PAGE_POOL_IFINDEX, pool->slow.netdev->ifindex)) goto err_cancel; napi_id = pool->p.napi ? READ_ONCE(pool->p.napi->napi_id) : 0; if (napi_id_valid(napi_id) && nla_put_uint(rsp, NETDEV_A_PAGE_POOL_NAPI_ID, napi_id)) goto err_cancel; inflight = page_pool_inflight(pool, false); refsz = PAGE_SIZE << pool->p.order; if (nla_put_uint(rsp, NETDEV_A_PAGE_POOL_INFLIGHT, inflight) || nla_put_uint(rsp, NETDEV_A_PAGE_POOL_INFLIGHT_MEM, inflight * refsz)) goto err_cancel; if (pool->user.detach_time && nla_put_uint(rsp, NETDEV_A_PAGE_POOL_DETACH_TIME, pool->user.detach_time)) goto err_cancel; if (pool->mp_ops && pool->mp_ops->nl_fill(pool->mp_priv, rsp, NULL)) goto err_cancel; genlmsg_end(rsp, hdr); return 0; err_cancel: genlmsg_cancel(rsp, hdr); return -EMSGSIZE; } static void netdev_nl_page_pool_event(const struct page_pool *pool, u32 cmd) { struct genl_info info; struct sk_buff *ntf; struct net *net; lockdep_assert_held(&page_pools_lock); /* 'invisible' page pools don't matter */ if (hlist_unhashed(&pool->user.list)) return; net = dev_net(pool->slow.netdev); if (!genl_has_listeners(&netdev_nl_family, net, NETDEV_NLGRP_PAGE_POOL)) return; genl_info_init_ntf(&info, &netdev_nl_family, cmd); ntf = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!ntf) return; if (page_pool_nl_fill(ntf, pool, &info)) { nlmsg_free(ntf); return; } genlmsg_multicast_netns(&netdev_nl_family, net, ntf, 0, NETDEV_NLGRP_PAGE_POOL, GFP_KERNEL); } int netdev_nl_page_pool_get_doit(struct sk_buff *skb, struct genl_info *info) { u32 id; if (GENL_REQ_ATTR_CHECK(info, NETDEV_A_PAGE_POOL_ID)) return -EINVAL; id = nla_get_uint(info->attrs[NETDEV_A_PAGE_POOL_ID]); return netdev_nl_page_pool_get_do(info, id, page_pool_nl_fill); } int netdev_nl_page_pool_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb) { return netdev_nl_page_pool_get_dump(skb, cb, page_pool_nl_fill); } int page_pool_list(struct page_pool *pool) { static u32 id_alloc_next; int err; mutex_lock(&page_pools_lock); err = xa_alloc_cyclic(&page_pools, &pool->user.id, pool, xa_limit_32b, &id_alloc_next, GFP_KERNEL); if (err < 0) goto err_unlock; INIT_HLIST_NODE(&pool->user.list); if (pool->slow.netdev) { hlist_add_head(&pool->user.list, &pool->slow.netdev->page_pools); netdev_nl_page_pool_event(pool, NETDEV_CMD_PAGE_POOL_ADD_NTF); } mutex_unlock(&page_pools_lock); return 0; err_unlock: mutex_unlock(&page_pools_lock); return err; } void page_pool_detached(struct page_pool *pool) { mutex_lock(&page_pools_lock); pool->user.detach_time = ktime_get_boottime_seconds(); netdev_nl_page_pool_event(pool, NETDEV_CMD_PAGE_POOL_CHANGE_NTF); mutex_unlock(&page_pools_lock); } void page_pool_unlist(struct page_pool *pool) { mutex_lock(&page_pools_lock); netdev_nl_page_pool_event(pool, NETDEV_CMD_PAGE_POOL_DEL_NTF); xa_erase(&page_pools, pool->user.id); if (!hlist_unhashed(&pool->user.list)) hlist_del(&pool->user.list); mutex_unlock(&page_pools_lock); } int page_pool_check_memory_provider(struct net_device *dev, struct netdev_rx_queue *rxq) { void *binding = rxq->mp_params.mp_priv; struct page_pool *pool; struct hlist_node *n; if (!binding) return 0; mutex_lock(&page_pools_lock); hlist_for_each_entry_safe(pool, n, &dev->page_pools, user.list) { if (pool->mp_priv != binding) continue; if (pool->slow.queue_idx == get_netdev_rx_queue_index(rxq)) { mutex_unlock(&page_pools_lock); return 0; } } mutex_unlock(&page_pools_lock); return -ENODATA; } static void page_pool_unreg_netdev_wipe(struct net_device *netdev) { struct page_pool *pool; struct hlist_node *n; mutex_lock(&page_pools_lock); hlist_for_each_entry_safe(pool, n, &netdev->page_pools, user.list) { hlist_del_init(&pool->user.list); pool->slow.netdev = NET_PTR_POISON; } mutex_unlock(&page_pools_lock); } static void page_pool_unreg_netdev(struct net_device *netdev) { struct page_pool *pool, *last; struct net_device *lo; lo = dev_net(netdev)->loopback_dev; mutex_lock(&page_pools_lock); last = NULL; hlist_for_each_entry(pool, &netdev->page_pools, user.list) { pool->slow.netdev = lo; netdev_nl_page_pool_event(pool, NETDEV_CMD_PAGE_POOL_CHANGE_NTF); last = pool; } if (last) hlist_splice_init(&netdev->page_pools, &last->user.list, &lo->page_pools); mutex_unlock(&page_pools_lock); } static int page_pool_netdevice_event(struct notifier_block *nb, unsigned long event, void *ptr) { struct net_device *netdev = netdev_notifier_info_to_dev(ptr); if (event != NETDEV_UNREGISTER) return NOTIFY_DONE; if (hlist_empty(&netdev->page_pools)) return NOTIFY_OK; if (netdev->ifindex != LOOPBACK_IFINDEX) page_pool_unreg_netdev(netdev); else page_pool_unreg_netdev_wipe(netdev); return NOTIFY_OK; } static struct notifier_block page_pool_netdevice_nb = { .notifier_call = page_pool_netdevice_event, }; static int __init page_pool_user_init(void) { return register_netdevice_notifier(&page_pool_netdevice_nb); } subsys_initcall(page_pool_user_init); |
| 139 357 133 232 1 379 136 136 379 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 | #ifndef _LINUX_GENERIC_RADIX_TREE_H #define _LINUX_GENERIC_RADIX_TREE_H /** * DOC: Generic radix trees/sparse arrays * * Very simple and minimalistic, supporting arbitrary size entries up to * GENRADIX_NODE_SIZE. * * A genradix is defined with the type it will store, like so: * * static GENRADIX(struct foo) foo_genradix; * * The main operations are: * * - genradix_init(radix) - initialize an empty genradix * * - genradix_free(radix) - free all memory owned by the genradix and * reinitialize it * * - genradix_ptr(radix, idx) - gets a pointer to the entry at idx, returning * NULL if that entry does not exist * * - genradix_ptr_alloc(radix, idx, gfp) - gets a pointer to an entry, * allocating it if necessary * * - genradix_for_each(radix, iter, p) - iterate over each entry in a genradix * * The radix tree allocates one page of entries at a time, so entries may exist * that were never explicitly allocated - they will be initialized to all * zeroes. * * Internally, a genradix is just a radix tree of pages, and indexing works in * terms of byte offsets. The wrappers in this header file use sizeof on the * type the radix contains to calculate a byte offset from the index - see * __idx_to_offset. */ #include <asm/page.h> #include <linux/bug.h> #include <linux/limits.h> #include <linux/log2.h> #include <linux/math.h> #include <linux/slab.h> #include <linux/types.h> struct genradix_root; #define GENRADIX_NODE_SHIFT 9 #define GENRADIX_NODE_SIZE (1U << GENRADIX_NODE_SHIFT) #define GENRADIX_ARY (GENRADIX_NODE_SIZE / sizeof(struct genradix_node *)) #define GENRADIX_ARY_SHIFT ilog2(GENRADIX_ARY) /* depth that's needed for a genradix that can address up to ULONG_MAX: */ #define GENRADIX_MAX_DEPTH \ DIV_ROUND_UP(BITS_PER_LONG - GENRADIX_NODE_SHIFT, GENRADIX_ARY_SHIFT) #define GENRADIX_DEPTH_MASK \ ((unsigned long) (roundup_pow_of_two(GENRADIX_MAX_DEPTH + 1) - 1)) static inline int genradix_depth_shift(unsigned depth) { return GENRADIX_NODE_SHIFT + GENRADIX_ARY_SHIFT * depth; } /* * Returns size (of data, in bytes) that a tree of a given depth holds: */ static inline size_t genradix_depth_size(unsigned depth) { return 1UL << genradix_depth_shift(depth); } static inline unsigned genradix_root_to_depth(struct genradix_root *r) { return (unsigned long) r & GENRADIX_DEPTH_MASK; } static inline struct genradix_node *genradix_root_to_node(struct genradix_root *r) { return (void *) ((unsigned long) r & ~GENRADIX_DEPTH_MASK); } struct __genradix { struct genradix_root *root; }; struct genradix_node { union { /* Interior node: */ struct genradix_node *children[GENRADIX_ARY]; /* Leaf: */ u8 data[GENRADIX_NODE_SIZE]; }; }; static inline struct genradix_node *genradix_alloc_node(gfp_t gfp_mask) { return kzalloc(GENRADIX_NODE_SIZE, gfp_mask); } static inline void genradix_free_node(struct genradix_node *node) { kfree(node); } /* * NOTE: currently, sizeof(_type) must not be larger than GENRADIX_NODE_SIZE: */ #define __GENRADIX_INITIALIZER \ { \ .tree = { \ .root = NULL, \ } \ } /* * We use a 0 size array to stash the type we're storing without taking any * space at runtime - then the various accessor macros can use typeof() to get * to it for casts/sizeof - we also force the alignment so that storing a type * with a ridiculous alignment doesn't blow up the alignment or size of the * genradix. */ #define GENRADIX(_type) \ struct { \ struct __genradix tree; \ _type type[0] __aligned(1); \ } #define DEFINE_GENRADIX(_name, _type) \ GENRADIX(_type) _name = __GENRADIX_INITIALIZER /** * genradix_init - initialize a genradix * @_radix: genradix to initialize * * Does not fail */ #define genradix_init(_radix) \ do { \ *(_radix) = (typeof(*_radix)) __GENRADIX_INITIALIZER; \ } while (0) void __genradix_free(struct __genradix *); /** * genradix_free: free all memory owned by a genradix * @_radix: the genradix to free * * After freeing, @_radix will be reinitialized and empty */ #define genradix_free(_radix) __genradix_free(&(_radix)->tree) static inline size_t __idx_to_offset(size_t idx, size_t obj_size) { if (__builtin_constant_p(obj_size)) BUILD_BUG_ON(obj_size > GENRADIX_NODE_SIZE); else BUG_ON(obj_size > GENRADIX_NODE_SIZE); if (!is_power_of_2(obj_size)) { size_t objs_per_page = GENRADIX_NODE_SIZE / obj_size; return (idx / objs_per_page) * GENRADIX_NODE_SIZE + (idx % objs_per_page) * obj_size; } else { return idx * obj_size; } } #define __genradix_cast(_radix) (typeof((_radix)->type[0]) *) #define __genradix_obj_size(_radix) sizeof((_radix)->type[0]) #define __genradix_objs_per_page(_radix) \ (GENRADIX_NODE_SIZE / sizeof((_radix)->type[0])) #define __genradix_page_remainder(_radix) \ (GENRADIX_NODE_SIZE % sizeof((_radix)->type[0])) #define __genradix_idx_to_offset(_radix, _idx) \ __idx_to_offset(_idx, __genradix_obj_size(_radix)) static inline void *__genradix_ptr_inlined(struct __genradix *radix, size_t offset) { struct genradix_root *r = READ_ONCE(radix->root); struct genradix_node *n = genradix_root_to_node(r); unsigned level = genradix_root_to_depth(r); unsigned shift = genradix_depth_shift(level); if (unlikely(ilog2(offset) >= genradix_depth_shift(level))) return NULL; while (n && shift > GENRADIX_NODE_SHIFT) { shift -= GENRADIX_ARY_SHIFT; n = n->children[offset >> shift]; offset &= (1UL << shift) - 1; } return n ? &n->data[offset] : NULL; } #define genradix_ptr_inlined(_radix, _idx) \ (__genradix_cast(_radix) \ __genradix_ptr_inlined(&(_radix)->tree, \ __genradix_idx_to_offset(_radix, _idx))) void *__genradix_ptr(struct __genradix *, size_t); /** * genradix_ptr - get a pointer to a genradix entry * @_radix: genradix to access * @_idx: index to fetch * * Returns a pointer to entry at @_idx, or NULL if that entry does not exist. */ #define genradix_ptr(_radix, _idx) \ (__genradix_cast(_radix) \ __genradix_ptr(&(_radix)->tree, \ __genradix_idx_to_offset(_radix, _idx))) void *__genradix_ptr_alloc(struct __genradix *, size_t, struct genradix_node **, gfp_t); #define genradix_ptr_alloc_inlined(_radix, _idx, _gfp) \ (__genradix_cast(_radix) \ (__genradix_ptr_inlined(&(_radix)->tree, \ __genradix_idx_to_offset(_radix, _idx)) ?: \ __genradix_ptr_alloc(&(_radix)->tree, \ __genradix_idx_to_offset(_radix, _idx), \ NULL, _gfp))) #define genradix_ptr_alloc_preallocated_inlined(_radix, _idx, _new_node, _gfp)\ (__genradix_cast(_radix) \ (__genradix_ptr_inlined(&(_radix)->tree, \ __genradix_idx_to_offset(_radix, _idx)) ?: \ __genradix_ptr_alloc(&(_radix)->tree, \ __genradix_idx_to_offset(_radix, _idx), \ _new_node, _gfp))) /** * genradix_ptr_alloc - get a pointer to a genradix entry, allocating it * if necessary * @_radix: genradix to access * @_idx: index to fetch * @_gfp: gfp mask * * Returns a pointer to entry at @_idx, or NULL on allocation failure */ #define genradix_ptr_alloc(_radix, _idx, _gfp) \ (__genradix_cast(_radix) \ __genradix_ptr_alloc(&(_radix)->tree, \ __genradix_idx_to_offset(_radix, _idx), \ NULL, _gfp)) #define genradix_ptr_alloc_preallocated(_radix, _idx, _new_node, _gfp)\ (__genradix_cast(_radix) \ __genradix_ptr_alloc(&(_radix)->tree, \ __genradix_idx_to_offset(_radix, _idx), \ _new_node, _gfp)) struct genradix_iter { size_t offset; size_t pos; }; /** * genradix_iter_init - initialize a genradix_iter * @_radix: genradix that will be iterated over * @_idx: index to start iterating from */ #define genradix_iter_init(_radix, _idx) \ ((struct genradix_iter) { \ .pos = (_idx), \ .offset = __genradix_idx_to_offset((_radix), (_idx)),\ }) void *__genradix_iter_peek(struct genradix_iter *, struct __genradix *, size_t); /** * genradix_iter_peek - get first entry at or above iterator's current * position * @_iter: a genradix_iter * @_radix: genradix being iterated over * * If no more entries exist at or above @_iter's current position, returns NULL */ #define genradix_iter_peek(_iter, _radix) \ (__genradix_cast(_radix) \ __genradix_iter_peek(_iter, &(_radix)->tree, \ __genradix_objs_per_page(_radix))) void *__genradix_iter_peek_prev(struct genradix_iter *, struct __genradix *, size_t, size_t); /** * genradix_iter_peek_prev - get first entry at or below iterator's current * position * @_iter: a genradix_iter * @_radix: genradix being iterated over * * If no more entries exist at or below @_iter's current position, returns NULL */ #define genradix_iter_peek_prev(_iter, _radix) \ (__genradix_cast(_radix) \ __genradix_iter_peek_prev(_iter, &(_radix)->tree, \ __genradix_objs_per_page(_radix), \ __genradix_obj_size(_radix) + \ __genradix_page_remainder(_radix))) static inline void __genradix_iter_advance(struct genradix_iter *iter, size_t obj_size) { if (iter->offset + obj_size < iter->offset) { iter->offset = SIZE_MAX; iter->pos = SIZE_MAX; return; } iter->offset += obj_size; if (!is_power_of_2(obj_size) && (iter->offset & (GENRADIX_NODE_SIZE - 1)) + obj_size > GENRADIX_NODE_SIZE) iter->offset = round_up(iter->offset, GENRADIX_NODE_SIZE); iter->pos++; } #define genradix_iter_advance(_iter, _radix) \ __genradix_iter_advance(_iter, __genradix_obj_size(_radix)) static inline void __genradix_iter_rewind(struct genradix_iter *iter, size_t obj_size) { if (iter->offset == 0 || iter->offset == SIZE_MAX) { iter->offset = SIZE_MAX; return; } if ((iter->offset & (GENRADIX_NODE_SIZE - 1)) == 0) iter->offset -= GENRADIX_NODE_SIZE % obj_size; iter->offset -= obj_size; iter->pos--; } #define genradix_iter_rewind(_iter, _radix) \ __genradix_iter_rewind(_iter, __genradix_obj_size(_radix)) #define genradix_for_each_from(_radix, _iter, _p, _start) \ for (_iter = genradix_iter_init(_radix, _start); \ (_p = genradix_iter_peek(&_iter, _radix)) != NULL; \ genradix_iter_advance(&_iter, _radix)) /** * genradix_for_each - iterate over entry in a genradix * @_radix: genradix to iterate over * @_iter: a genradix_iter to track current position * @_p: pointer to genradix entry type * * On every iteration, @_p will point to the current entry, and @_iter.pos * will be the current entry's index. */ #define genradix_for_each(_radix, _iter, _p) \ genradix_for_each_from(_radix, _iter, _p, 0) #define genradix_last_pos(_radix) \ (SIZE_MAX / GENRADIX_NODE_SIZE * __genradix_objs_per_page(_radix) - 1) /** * genradix_for_each_reverse - iterate over entry in a genradix, reverse order * @_radix: genradix to iterate over * @_iter: a genradix_iter to track current position * @_p: pointer to genradix entry type * * On every iteration, @_p will point to the current entry, and @_iter.pos * will be the current entry's index. */ #define genradix_for_each_reverse(_radix, _iter, _p) \ for (_iter = genradix_iter_init(_radix, genradix_last_pos(_radix));\ (_p = genradix_iter_peek_prev(&_iter, _radix)) != NULL;\ genradix_iter_rewind(&_iter, _radix)) int __genradix_prealloc(struct __genradix *, size_t, gfp_t); /** * genradix_prealloc - preallocate entries in a generic radix tree * @_radix: genradix to preallocate * @_nr: number of entries to preallocate * @_gfp: gfp mask * * Returns 0 on success, -ENOMEM on failure */ #define genradix_prealloc(_radix, _nr, _gfp) \ __genradix_prealloc(&(_radix)->tree, \ __genradix_idx_to_offset(_radix, _nr + 1),\ _gfp) #endif /* _LINUX_GENERIC_RADIX_TREE_H */ |
| 2 2 2 2 2 2 2 2 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 | // SPDX-License-Identifier: GPL-2.0-only /* * IBSS mode implementation * Copyright 2003-2008, Jouni Malinen <j@w1.fi> * Copyright 2004, Instant802 Networks, Inc. * Copyright 2005, Devicescape Software, Inc. * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz> * Copyright 2007, Michael Wu <flamingice@sourmilk.net> * Copyright 2009, Johannes Berg <johannes@sipsolutions.net> * Copyright 2013-2014 Intel Mobile Communications GmbH * Copyright(c) 2016 Intel Deutschland GmbH * Copyright(c) 2018-2024 Intel Corporation */ #include <linux/delay.h> #include <linux/slab.h> #include <linux/if_ether.h> #include <linux/skbuff.h> #include <linux/if_arp.h> #include <linux/etherdevice.h> #include <linux/rtnetlink.h> #include <net/mac80211.h> #include "ieee80211_i.h" #include "driver-ops.h" #include "rate.h" #define IEEE80211_SCAN_INTERVAL (2 * HZ) #define IEEE80211_IBSS_JOIN_TIMEOUT (7 * HZ) #define IEEE80211_IBSS_MERGE_INTERVAL (30 * HZ) #define IEEE80211_IBSS_INACTIVITY_LIMIT (60 * HZ) #define IEEE80211_IBSS_RSN_INACTIVITY_LIMIT (10 * HZ) #define IEEE80211_IBSS_MAX_STA_ENTRIES 128 static struct beacon_data * ieee80211_ibss_build_presp(struct ieee80211_sub_if_data *sdata, const int beacon_int, const u32 basic_rates, const u16 capability, u64 tsf, struct cfg80211_chan_def *chandef, bool *have_higher_than_11mbit, struct cfg80211_csa_settings *csa_settings) { struct ieee80211_if_ibss *ifibss = &sdata->u.ibss; struct ieee80211_local *local = sdata->local; int rates_n = 0, i, ri; struct ieee80211_mgmt *mgmt; u8 *pos; struct ieee80211_supported_band *sband; u32 rates = 0, rates_added = 0; struct beacon_data *presp; int frame_len; /* Build IBSS probe response */ frame_len = sizeof(struct ieee80211_hdr_3addr) + 12 /* struct ieee80211_mgmt.u.beacon */ + 2 + IEEE80211_MAX_SSID_LEN /* max SSID */ + 2 + 8 /* max Supported Rates */ + 3 /* max DS params */ + 4 /* IBSS params */ + 5 /* Channel Switch Announcement */ + 2 + (IEEE80211_MAX_SUPP_RATES - 8) + 2 + sizeof(struct ieee80211_ht_cap) + 2 + sizeof(struct ieee80211_ht_operation) + 2 + sizeof(struct ieee80211_vht_cap) + 2 + sizeof(struct ieee80211_vht_operation) + ifibss->ie_len; presp = kzalloc(sizeof(*presp) + frame_len, GFP_KERNEL); if (!presp) return NULL; presp->head = (void *)(presp + 1); mgmt = (void *) presp->head; mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_PROBE_RESP); eth_broadcast_addr(mgmt->da); memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN); memcpy(mgmt->bssid, ifibss->bssid, ETH_ALEN); mgmt->u.beacon.beacon_int = cpu_to_le16(beacon_int); mgmt->u.beacon.timestamp = cpu_to_le64(tsf); mgmt->u.beacon.capab_info = cpu_to_le16(capability); pos = (u8 *)mgmt + offsetof(struct ieee80211_mgmt, u.beacon.variable); *pos++ = WLAN_EID_SSID; *pos++ = ifibss->ssid_len; memcpy(pos, ifibss->ssid, ifibss->ssid_len); pos += ifibss->ssid_len; sband = local->hw.wiphy->bands[chandef->chan->band]; rates_n = 0; if (have_higher_than_11mbit) *have_higher_than_11mbit = false; for (i = 0; i < sband->n_bitrates; i++) { if (sband->bitrates[i].bitrate > 110 && have_higher_than_11mbit) *have_higher_than_11mbit = true; rates |= BIT(i); rates_n++; } *pos++ = WLAN_EID_SUPP_RATES; *pos++ = min_t(int, 8, rates_n); for (ri = 0; ri < sband->n_bitrates; ri++) { int rate = DIV_ROUND_UP(sband->bitrates[ri].bitrate, 5); u8 basic = 0; if (!(rates & BIT(ri))) continue; if (basic_rates & BIT(ri)) basic = 0x80; *pos++ = basic | (u8) rate; if (++rates_added == 8) { ri++; /* continue at next rate for EXT_SUPP_RATES */ break; } } if (sband->band == NL80211_BAND_2GHZ) { *pos++ = WLAN_EID_DS_PARAMS; *pos++ = 1; *pos++ = ieee80211_frequency_to_channel( chandef->chan->center_freq); } *pos++ = WLAN_EID_IBSS_PARAMS; *pos++ = 2; /* FIX: set ATIM window based on scan results */ *pos++ = 0; *pos++ = 0; if (csa_settings) { *pos++ = WLAN_EID_CHANNEL_SWITCH; *pos++ = 3; *pos++ = csa_settings->block_tx ? 1 : 0; *pos++ = ieee80211_frequency_to_channel( csa_settings->chandef.chan->center_freq); presp->cntdwn_counter_offsets[0] = (pos - presp->head); *pos++ = csa_settings->count; presp->cntdwn_current_counter = csa_settings->count; } /* put the remaining rates in WLAN_EID_EXT_SUPP_RATES */ if (rates_n > 8) { *pos++ = WLAN_EID_EXT_SUPP_RATES; *pos++ = rates_n - 8; for (; ri < sband->n_bitrates; ri++) { int rate = DIV_ROUND_UP(sband->bitrates[ri].bitrate, 5); u8 basic = 0; if (!(rates & BIT(ri))) continue; if (basic_rates & BIT(ri)) basic = 0x80; *pos++ = basic | (u8) rate; } } if (ifibss->ie_len) { memcpy(pos, ifibss->ie, ifibss->ie_len); pos += ifibss->ie_len; } /* add HT capability and information IEs */ if (chandef->width != NL80211_CHAN_WIDTH_20_NOHT && chandef->width != NL80211_CHAN_WIDTH_5 && chandef->width != NL80211_CHAN_WIDTH_10 && sband->ht_cap.ht_supported) { struct ieee80211_sta_ht_cap ht_cap; memcpy(&ht_cap, &sband->ht_cap, sizeof(ht_cap)); ieee80211_apply_htcap_overrides(sdata, &ht_cap); pos = ieee80211_ie_build_ht_cap(pos, &ht_cap, ht_cap.cap); /* * Note: According to 802.11n-2009 9.13.3.1, HT Protection * field and RIFS Mode are reserved in IBSS mode, therefore * keep them at 0 */ pos = ieee80211_ie_build_ht_oper(pos, &sband->ht_cap, chandef, 0, false); /* add VHT capability and information IEs */ if (chandef->width != NL80211_CHAN_WIDTH_20 && chandef->width != NL80211_CHAN_WIDTH_40 && sband->vht_cap.vht_supported) { pos = ieee80211_ie_build_vht_cap(pos, &sband->vht_cap, sband->vht_cap.cap); pos = ieee80211_ie_build_vht_oper(pos, &sband->vht_cap, chandef); } } if (local->hw.queues >= IEEE80211_NUM_ACS) pos = ieee80211_add_wmm_info_ie(pos, 0); /* U-APSD not in use */ presp->head_len = pos - presp->head; if (WARN_ON(presp->head_len > frame_len)) goto error; return presp; error: kfree(presp); return NULL; } static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata, const u8 *bssid, const int beacon_int, struct cfg80211_chan_def *req_chandef, const u32 basic_rates, const u16 capability, u64 tsf, bool creator) { struct ieee80211_if_ibss *ifibss = &sdata->u.ibss; struct ieee80211_local *local = sdata->local; struct ieee80211_mgmt *mgmt; struct cfg80211_bss *bss; u64 bss_change; struct ieee80211_chan_req chanreq = {}; struct ieee80211_channel *chan; struct beacon_data *presp; struct cfg80211_inform_bss bss_meta = {}; bool have_higher_than_11mbit; bool radar_required; int err; lockdep_assert_wiphy(local->hw.wiphy); /* Reset own TSF to allow time synchronization work. */ drv_reset_tsf(local, sdata); if (!ether_addr_equal(ifibss->bssid, bssid)) sta_info_flush(sdata, -1); /* if merging, indicate to driver that we leave the old IBSS */ if (sdata->vif.cfg.ibss_joined) { sdata->vif.cfg.ibss_joined = false; sdata->vif.cfg.ibss_creator = false; sdata->vif.bss_conf.enable_beacon = false; netif_carrier_off(sdata->dev); synchronize_net(); ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_IBSS | BSS_CHANGED_BEACON_ENABLED); drv_leave_ibss(local, sdata); } presp = sdata_dereference(ifibss->presp, sdata); RCU_INIT_POINTER(ifibss->presp, NULL); if (presp) kfree_rcu(presp, rcu_head); /* make a copy of the chandef, it could be modified below. */ chanreq.oper = *req_chandef; chan = chanreq.oper.chan; if (!cfg80211_reg_can_beacon(local->hw.wiphy, &chanreq.oper, NL80211_IFTYPE_ADHOC)) { if (chanreq.oper.width == NL80211_CHAN_WIDTH_5 || chanreq.oper.width == NL80211_CHAN_WIDTH_10 || chanreq.oper.width == NL80211_CHAN_WIDTH_20_NOHT || chanreq.oper.width == NL80211_CHAN_WIDTH_20) { sdata_info(sdata, "Failed to join IBSS, beacons forbidden\n"); return; } chanreq.oper.width = NL80211_CHAN_WIDTH_20; chanreq.oper.center_freq1 = chan->center_freq; /* check again for downgraded chandef */ if (!cfg80211_reg_can_beacon(local->hw.wiphy, &chanreq.oper, NL80211_IFTYPE_ADHOC)) { sdata_info(sdata, "Failed to join IBSS, beacons forbidden\n"); return; } } err = cfg80211_chandef_dfs_required(sdata->local->hw.wiphy, &chanreq.oper, NL80211_IFTYPE_ADHOC); if (err < 0) { sdata_info(sdata, "Failed to join IBSS, invalid chandef\n"); return; } if (err > 0 && !ifibss->userspace_handles_dfs) { sdata_info(sdata, "Failed to join IBSS, DFS channel without control program\n"); return; } radar_required = err; if (ieee80211_link_use_channel(&sdata->deflink, &chanreq, ifibss->fixed_channel ? IEEE80211_CHANCTX_SHARED : IEEE80211_CHANCTX_EXCLUSIVE)) { sdata_info(sdata, "Failed to join IBSS, no channel context\n"); return; } sdata->deflink.radar_required = radar_required; memcpy(ifibss->bssid, bssid, ETH_ALEN); presp = ieee80211_ibss_build_presp(sdata, beacon_int, basic_rates, capability, tsf, &chanreq.oper, &have_higher_than_11mbit, NULL); if (!presp) return; rcu_assign_pointer(ifibss->presp, presp); mgmt = (void *)presp->head; sdata->vif.bss_conf.enable_beacon = true; sdata->vif.bss_conf.beacon_int = beacon_int; sdata->vif.bss_conf.basic_rates = basic_rates; sdata->vif.cfg.ssid_len = ifibss->ssid_len; memcpy(sdata->vif.cfg.ssid, ifibss->ssid, ifibss->ssid_len); bss_change = BSS_CHANGED_BEACON_INT; bss_change |= ieee80211_reset_erp_info(sdata); bss_change |= BSS_CHANGED_BSSID; bss_change |= BSS_CHANGED_BEACON; bss_change |= BSS_CHANGED_BEACON_ENABLED; bss_change |= BSS_CHANGED_BASIC_RATES; bss_change |= BSS_CHANGED_HT; bss_change |= BSS_CHANGED_IBSS; bss_change |= BSS_CHANGED_SSID; /* * In 5 GHz/802.11a, we can always use short slot time. * (IEEE 802.11-2012 18.3.8.7) * * In 2.4GHz, we must always use long slots in IBSS for compatibility * reasons. * (IEEE 802.11-2012 19.4.5) * * HT follows these specifications (IEEE 802.11-2012 20.3.18) */ sdata->vif.bss_conf.use_short_slot = chan->band == NL80211_BAND_5GHZ; bss_change |= BSS_CHANGED_ERP_SLOT; /* cf. IEEE 802.11 9.2.12 */ sdata->deflink.operating_11g_mode = chan->band == NL80211_BAND_2GHZ && have_higher_than_11mbit; ieee80211_set_wmm_default(&sdata->deflink, true, false); sdata->vif.cfg.ibss_joined = true; sdata->vif.cfg.ibss_creator = creator; err = drv_join_ibss(local, sdata); if (err) { sdata->vif.cfg.ibss_joined = false; sdata->vif.cfg.ibss_creator = false; sdata->vif.bss_conf.enable_beacon = false; sdata->vif.cfg.ssid_len = 0; RCU_INIT_POINTER(ifibss->presp, NULL); kfree_rcu(presp, rcu_head); ieee80211_link_release_channel(&sdata->deflink); sdata_info(sdata, "Failed to join IBSS, driver failure: %d\n", err); return; } ieee80211_bss_info_change_notify(sdata, bss_change); ifibss->state = IEEE80211_IBSS_MLME_JOINED; mod_timer(&ifibss->timer, round_jiffies(jiffies + IEEE80211_IBSS_MERGE_INTERVAL)); bss_meta.chan = chan; bss = cfg80211_inform_bss_frame_data(local->hw.wiphy, &bss_meta, mgmt, presp->head_len, GFP_KERNEL); cfg80211_put_bss(local->hw.wiphy, bss); netif_carrier_on(sdata->dev); cfg80211_ibss_joined(sdata->dev, ifibss->bssid, chan, GFP_KERNEL); } static void ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata, struct ieee80211_bss *bss) { struct cfg80211_bss *cbss = container_of((void *)bss, struct cfg80211_bss, priv); struct ieee80211_supported_band *sband; struct cfg80211_chan_def chandef; u32 basic_rates; int i, j; u16 beacon_int = cbss->beacon_interval; const struct cfg80211_bss_ies *ies; enum nl80211_channel_type chan_type; u64 tsf; lockdep_assert_wiphy(sdata->local->hw.wiphy); if (beacon_int < 10) beacon_int = 10; switch (sdata->u.ibss.chandef.width) { case NL80211_CHAN_WIDTH_20_NOHT: case NL80211_CHAN_WIDTH_20: case NL80211_CHAN_WIDTH_40: chan_type = cfg80211_get_chandef_type(&sdata->u.ibss.chandef); cfg80211_chandef_create(&chandef, cbss->channel, chan_type); break; case NL80211_CHAN_WIDTH_5: case NL80211_CHAN_WIDTH_10: cfg80211_chandef_create(&chandef, cbss->channel, NL80211_CHAN_NO_HT); chandef.width = sdata->u.ibss.chandef.width; break; case NL80211_CHAN_WIDTH_80: case NL80211_CHAN_WIDTH_80P80: case NL80211_CHAN_WIDTH_160: chandef = sdata->u.ibss.chandef; chandef.chan = cbss->channel; break; default: /* fall back to 20 MHz for unsupported modes */ cfg80211_chandef_create(&chandef, cbss->channel, NL80211_CHAN_NO_HT); break; } sband = sdata->local->hw.wiphy->bands[cbss->channel->band]; basic_rates = 0; for (i = 0; i < bss->supp_rates_len; i++) { int rate = bss->supp_rates[i] & 0x7f; bool is_basic = !!(bss->supp_rates[i] & 0x80); for (j = 0; j < sband->n_bitrates; j++) { int brate; brate = DIV_ROUND_UP(sband->bitrates[j].bitrate, 5); if (brate == rate) { if (is_basic) basic_rates |= BIT(j); break; } } } rcu_read_lock(); ies = rcu_dereference(cbss->ies); tsf = ies->tsf; rcu_read_unlock(); __ieee80211_sta_join_ibss(sdata, cbss->bssid, beacon_int, &chandef, basic_rates, cbss->capability, tsf, false); } int ieee80211_ibss_csa_beacon(struct ieee80211_sub_if_data *sdata, struct cfg80211_csa_settings *csa_settings, u64 *changed) { struct ieee80211_if_ibss *ifibss = &sdata->u.ibss; struct beacon_data *presp, *old_presp; struct cfg80211_bss *cbss; const struct cfg80211_bss_ies *ies; u16 capability = WLAN_CAPABILITY_IBSS; u64 tsf; lockdep_assert_wiphy(sdata->local->hw.wiphy); if (ifibss->privacy) capability |= WLAN_CAPABILITY_PRIVACY; cbss = cfg80211_get_bss(sdata->local->hw.wiphy, ifibss->chandef.chan, ifibss->bssid, ifibss->ssid, ifibss->ssid_len, IEEE80211_BSS_TYPE_IBSS, IEEE80211_PRIVACY(ifibss->privacy)); if (unlikely(!cbss)) return -EINVAL; rcu_read_lock(); ies = rcu_dereference(cbss->ies); tsf = ies->tsf; rcu_read_unlock(); cfg80211_put_bss(sdata->local->hw.wiphy, cbss); old_presp = sdata_dereference(ifibss->presp, sdata); presp = ieee80211_ibss_build_presp(sdata, sdata->vif.bss_conf.beacon_int, sdata->vif.bss_conf.basic_rates, capability, tsf, &ifibss->chandef, NULL, csa_settings); if (!presp) return -ENOMEM; rcu_assign_pointer(ifibss->presp, presp); if (old_presp) kfree_rcu(old_presp, rcu_head); *changed |= BSS_CHANGED_BEACON; return 0; } int ieee80211_ibss_finish_csa(struct ieee80211_sub_if_data *sdata, u64 *changed) { struct ieee80211_if_ibss *ifibss = &sdata->u.ibss; struct cfg80211_bss *cbss; lockdep_assert_wiphy(sdata->local->hw.wiphy); /* When not connected/joined, sending CSA doesn't make sense. */ if (ifibss->state != IEEE80211_IBSS_MLME_JOINED) return -ENOLINK; /* update cfg80211 bss information with the new channel */ if (!is_zero_ether_addr(ifibss->bssid)) { cbss = cfg80211_get_bss(sdata->local->hw.wiphy, ifibss->chandef.chan, ifibss->bssid, ifibss->ssid, ifibss->ssid_len, IEEE80211_BSS_TYPE_IBSS, IEEE80211_PRIVACY(ifibss->privacy)); /* XXX: should not really modify cfg80211 data */ if (cbss) { cbss->channel = sdata->deflink.csa.chanreq.oper.chan; cfg80211_put_bss(sdata->local->hw.wiphy, cbss); } } ifibss->chandef = sdata->deflink.csa.chanreq.oper; /* generate the beacon */ return ieee80211_ibss_csa_beacon(sdata, NULL, changed); } void ieee80211_ibss_stop(struct ieee80211_sub_if_data *sdata) { struct ieee80211_if_ibss *ifibss = &sdata->u.ibss; wiphy_work_cancel(sdata->local->hw.wiphy, &ifibss->csa_connection_drop_work); } static struct sta_info *ieee80211_ibss_finish_sta(struct sta_info *sta) __acquires(RCU) { struct ieee80211_sub_if_data *sdata = sta->sdata; u8 addr[ETH_ALEN]; memcpy(addr, sta->sta.addr, ETH_ALEN); ibss_dbg(sdata, "Adding new IBSS station %pM\n", addr); sta_info_pre_move_state(sta, IEEE80211_STA_AUTH); sta_info_pre_move_state(sta, IEEE80211_STA_ASSOC); /* authorize the station only if the network is not RSN protected. If * not wait for the userspace to authorize it */ if (!sta->sdata->u.ibss.control_port) sta_info_pre_move_state(sta, IEEE80211_STA_AUTHORIZED); rate_control_rate_init(&sta->deflink); /* If it fails, maybe we raced another insertion? */ if (sta_info_insert_rcu(sta)) return sta_info_get(sdata, addr); return sta; } static struct sta_info * ieee80211_ibss_add_sta(struct ieee80211_sub_if_data *sdata, const u8 *bssid, const u8 *addr, u32 supp_rates) __acquires(RCU) { struct ieee80211_if_ibss *ifibss = &sdata->u.ibss; struct ieee80211_local *local = sdata->local; struct sta_info *sta; struct ieee80211_chanctx_conf *chanctx_conf; struct ieee80211_supported_band *sband; int band; /* * XXX: Consider removing the least recently used entry and * allow new one to be added. */ if (local->num_sta >= IEEE80211_IBSS_MAX_STA_ENTRIES) { net_info_ratelimited("%s: No room for a new IBSS STA entry %pM\n", sdata->name, addr); rcu_read_lock(); return NULL; } if (ifibss->state == IEEE80211_IBSS_MLME_SEARCH) { rcu_read_lock(); return NULL; } if (!ether_addr_equal(bssid, sdata->u.ibss.bssid)) { rcu_read_lock(); return NULL; } rcu_read_lock(); chanctx_conf = rcu_dereference(sdata->vif.bss_conf.chanctx_conf); if (WARN_ON_ONCE(!chanctx_conf)) return NULL; band = chanctx_conf->def.chan->band; rcu_read_unlock(); sta = sta_info_alloc(sdata, addr, GFP_KERNEL); if (!sta) { rcu_read_lock(); return NULL; } /* make sure mandatory rates are always added */ sband = local->hw.wiphy->bands[band]; sta->sta.deflink.supp_rates[band] = supp_rates | ieee80211_mandatory_rates(sband); return ieee80211_ibss_finish_sta(sta); } static int ieee80211_sta_active_ibss(struct ieee80211_sub_if_data *sdata) { struct ieee80211_local *local = sdata->local; int active = 0; struct sta_info *sta; lockdep_assert_wiphy(sdata->local->hw.wiphy); rcu_read_lock(); list_for_each_entry_rcu(sta, &local->sta_list, list) { unsigned long last_active = ieee80211_sta_last_active(sta, -1); if (sta->sdata == sdata && time_is_after_jiffies(last_active + IEEE80211_IBSS_MERGE_INTERVAL)) { active++; break; } } rcu_read_unlock(); return active; } static void ieee80211_ibss_disconnect(struct ieee80211_sub_if_data *sdata) { struct ieee80211_if_ibss *ifibss = &sdata->u.ibss; struct ieee80211_local *local = sdata->local; struct cfg80211_bss *cbss; struct beacon_data *presp; struct sta_info *sta; lockdep_assert_wiphy(local->hw.wiphy); if (!is_zero_ether_addr(ifibss->bssid)) { cbss = cfg80211_get_bss(local->hw.wiphy, ifibss->chandef.chan, ifibss->bssid, ifibss->ssid, ifibss->ssid_len, IEEE80211_BSS_TYPE_IBSS, IEEE80211_PRIVACY(ifibss->privacy)); if (cbss) { cfg80211_unlink_bss(local->hw.wiphy, cbss); cfg80211_put_bss(sdata->local->hw.wiphy, cbss); } } ifibss->state = IEEE80211_IBSS_MLME_SEARCH; sta_info_flush(sdata, -1); spin_lock_bh(&ifibss->incomplete_lock); while (!list_empty(&ifibss->incomplete_stations)) { sta = list_first_entry(&ifibss->incomplete_stations, struct sta_info, list); list_del(&sta->list); spin_unlock_bh(&ifibss->incomplete_lock); sta_info_free(local, sta); spin_lock_bh(&ifibss->incomplete_lock); } spin_unlock_bh(&ifibss->incomplete_lock); netif_carrier_off(sdata->dev); sdata->vif.cfg.ibss_joined = false; sdata->vif.cfg.ibss_creator = false; sdata->vif.bss_conf.enable_beacon = false; sdata->vif.cfg.ssid_len = 0; /* remove beacon */ presp = sdata_dereference(ifibss->presp, sdata); RCU_INIT_POINTER(sdata->u.ibss.presp, NULL); if (presp) kfree_rcu(presp, rcu_head); clear_bit(SDATA_STATE_OFFCHANNEL_BEACON_STOPPED, &sdata->state); ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON_ENABLED | BSS_CHANGED_IBSS); drv_leave_ibss(local, sdata); ieee80211_link_release_channel(&sdata->deflink); } static void ieee80211_csa_connection_drop_work(struct wiphy *wiphy, struct wiphy_work *work) { struct ieee80211_sub_if_data *sdata = container_of(work, struct ieee80211_sub_if_data, u.ibss.csa_connection_drop_work); ieee80211_ibss_disconnect(sdata); synchronize_rcu(); skb_queue_purge(&sdata->skb_queue); /* trigger a scan to find another IBSS network to join */ wiphy_work_queue(sdata->local->hw.wiphy, &sdata->work); } static void ieee80211_ibss_csa_mark_radar(struct ieee80211_sub_if_data *sdata) { struct ieee80211_if_ibss *ifibss = &sdata->u.ibss; int err; /* if the current channel is a DFS channel, mark the channel as * unavailable. */ err = cfg80211_chandef_dfs_required(sdata->local->hw.wiphy, &ifibss->chandef, NL80211_IFTYPE_ADHOC); if (err > 0) cfg80211_radar_event(sdata->local->hw.wiphy, &ifibss->chandef, GFP_ATOMIC); } static bool ieee80211_ibss_process_chanswitch(struct ieee80211_sub_if_data *sdata, struct ieee802_11_elems *elems, bool beacon) { struct cfg80211_csa_settings params; struct ieee80211_csa_ie csa_ie; struct ieee80211_if_ibss *ifibss = &sdata->u.ibss; enum nl80211_channel_type ch_type; int err; struct ieee80211_conn_settings conn = { .mode = IEEE80211_CONN_MODE_HT, .bw_limit = IEEE80211_CONN_BW_LIMIT_40, }; u32 vht_cap_info = 0; lockdep_assert_wiphy(sdata->local->hw.wiphy); switch (ifibss->chandef.width) { case NL80211_CHAN_WIDTH_5: case NL80211_CHAN_WIDTH_10: case NL80211_CHAN_WIDTH_20_NOHT: conn.mode = IEEE80211_CONN_MODE_LEGACY; fallthrough; case NL80211_CHAN_WIDTH_20: conn.bw_limit = IEEE80211_CONN_BW_LIMIT_20; break; default: break; } if (elems->vht_cap_elem) vht_cap_info = le32_to_cpu(elems->vht_cap_elem->vht_cap_info); memset(¶ms, 0, sizeof(params)); err = ieee80211_parse_ch_switch_ie(sdata, elems, ifibss->chandef.chan->band, vht_cap_info, &conn, ifibss->bssid, false, &csa_ie); /* can't switch to destination channel, fail */ if (err < 0) goto disconnect; /* did not contain a CSA */ if (err) return false; /* channel switch is not supported, disconnect */ if (!(sdata->local->hw.wiphy->flags & WIPHY_FLAG_HAS_CHANNEL_SWITCH)) goto disconnect; params.count = csa_ie.count; params.chandef = csa_ie.chanreq.oper; switch (ifibss->chandef.width) { case NL80211_CHAN_WIDTH_20_NOHT: case NL80211_CHAN_WIDTH_20: case NL80211_CHAN_WIDTH_40: /* keep our current HT mode (HT20/HT40+/HT40-), even if * another mode has been announced. The mode is not adopted * within the beacon while doing CSA and we should therefore * keep the mode which we announce. */ ch_type = cfg80211_get_chandef_type(&ifibss->chandef); cfg80211_chandef_create(¶ms.chandef, params.chandef.chan, ch_type); break; case NL80211_CHAN_WIDTH_5: case NL80211_CHAN_WIDTH_10: if (params.chandef.width != ifibss->chandef.width) { sdata_info(sdata, "IBSS %pM received channel switch from incompatible channel width (%d MHz, width:%d, CF1/2: %d/%d MHz), disconnecting\n", ifibss->bssid, params.chandef.chan->center_freq, params.chandef.width, params.chandef.center_freq1, params.chandef.center_freq2); goto disconnect; } break; default: /* should not happen, conn_flags should prevent VHT modes. */ WARN_ON(1); goto disconnect; } if (!cfg80211_reg_can_beacon(sdata->local->hw.wiphy, ¶ms.chandef, NL80211_IFTYPE_ADHOC)) { sdata_info(sdata, "IBSS %pM switches to unsupported channel (%d MHz, width:%d, CF1/2: %d/%d MHz), disconnecting\n", ifibss->bssid, params.chandef.chan->center_freq, params.chandef.width, params.chandef.center_freq1, params.chandef.center_freq2); goto disconnect; } err = cfg80211_chandef_dfs_required(sdata->local->hw.wiphy, ¶ms.chandef, NL80211_IFTYPE_ADHOC); if (err < 0) goto disconnect; if (err > 0 && !ifibss->userspace_handles_dfs) { /* IBSS-DFS only allowed with a control program */ goto disconnect; } params.radar_required = err; if (cfg80211_chandef_identical(¶ms.chandef, &sdata->vif.bss_conf.chanreq.oper)) { ibss_dbg(sdata, "received csa with an identical chandef, ignoring\n"); return true; } /* all checks done, now perform the channel switch. */ ibss_dbg(sdata, "received channel switch announcement to go to channel %d MHz\n", params.chandef.chan->center_freq); params.block_tx = !!csa_ie.mode; if (ieee80211_channel_switch(sdata->local->hw.wiphy, sdata->dev, ¶ms)) goto disconnect; ieee80211_ibss_csa_mark_radar(sdata); return true; disconnect: ibss_dbg(sdata, "Can't handle channel switch, disconnect\n"); wiphy_work_queue(sdata->local->hw.wiphy, &ifibss->csa_connection_drop_work); ieee80211_ibss_csa_mark_radar(sdata); return true; } static void ieee80211_rx_mgmt_spectrum_mgmt(struct ieee80211_sub_if_data *sdata, struct ieee80211_mgmt *mgmt, size_t len, struct ieee80211_rx_status *rx_status, struct ieee802_11_elems *elems) { int required_len; if (len < IEEE80211_MIN_ACTION_SIZE + 1) return; /* CSA is the only action we handle for now */ if (mgmt->u.action.u.measurement.action_code != WLAN_ACTION_SPCT_CHL_SWITCH) return; required_len = IEEE80211_MIN_ACTION_SIZE + sizeof(mgmt->u.action.u.chan_switch); if (len < required_len) return; if (!sdata->vif.bss_conf.csa_active) ieee80211_ibss_process_chanswitch(sdata, elems, false); } static void ieee80211_rx_mgmt_deauth_ibss(struct ieee80211_sub_if_data *sdata, struct ieee80211_mgmt *mgmt, size_t len) { u16 reason = le16_to_cpu(mgmt->u.deauth.reason_code); if (len < IEEE80211_DEAUTH_FRAME_LEN) return; ibss_dbg(sdata, "RX DeAuth SA=%pM DA=%pM\n", mgmt->sa, mgmt->da); ibss_dbg(sdata, "\tBSSID=%pM (reason: %d)\n", mgmt->bssid, reason); sta_info_destroy_addr(sdata, mgmt->sa); } static void ieee80211_rx_mgmt_auth_ibss(struct ieee80211_sub_if_data *sdata, struct ieee80211_mgmt *mgmt, size_t len) { u16 auth_alg, auth_transaction; lockdep_assert_wiphy(sdata->local->hw.wiphy); if (len < 24 + 6) return; auth_alg = le16_to_cpu(mgmt->u.auth.auth_alg); auth_transaction = le16_to_cpu(mgmt->u.auth.auth_transaction); ibss_dbg(sdata, "RX Auth SA=%pM DA=%pM\n", mgmt->sa, mgmt->da); ibss_dbg(sdata, "\tBSSID=%pM (auth_transaction=%d)\n", mgmt->bssid, auth_transaction); if (auth_alg != WLAN_AUTH_OPEN || auth_transaction != 1) return; /* * IEEE 802.11 standard does not require authentication in IBSS * networks and most implementations do not seem to use it. * However, try to reply to authentication attempts if someone * has actually implemented this. */ ieee80211_send_auth(sdata, 2, WLAN_AUTH_OPEN, 0, NULL, 0, mgmt->sa, sdata->u.ibss.bssid, NULL, 0, 0, 0); } static void ieee80211_update_sta_info(struct ieee80211_sub_if_data *sdata, struct ieee80211_mgmt *mgmt, size_t len, struct ieee80211_rx_status *rx_status, struct ieee802_11_elems *elems, struct ieee80211_channel *channel) { struct sta_info *sta; enum nl80211_band band = rx_status->band; struct ieee80211_local *local = sdata->local; struct ieee80211_supported_band *sband; bool rates_updated = false; u32 supp_rates = 0; if (sdata->vif.type != NL80211_IFTYPE_ADHOC) return; if (!ether_addr_equal(mgmt->bssid, sdata->u.ibss.bssid)) return; sband = local->hw.wiphy->bands[band]; if (WARN_ON(!sband)) return; rcu_read_lock(); sta = sta_info_get(sdata, mgmt->sa); if (elems->supp_rates) { supp_rates = ieee80211_sta_get_rates(sdata, elems, band, NULL); if (sta) { u32 prev_rates; prev_rates = sta->sta.deflink.supp_rates[band]; sta->sta.deflink.supp_rates[band] = supp_rates | ieee80211_mandatory_rates(sband); if (sta->sta.deflink.supp_rates[band] != prev_rates) { ibss_dbg(sdata, "updated supp_rates set for %pM based on beacon/probe_resp (0x%x -> 0x%x)\n", sta->sta.addr, prev_rates, sta->sta.deflink.supp_rates[band]); rates_updated = true; } } else { rcu_read_unlock(); sta = ieee80211_ibss_add_sta(sdata, mgmt->bssid, mgmt->sa, supp_rates); } } if (sta && !sta->sta.wme && (elems->wmm_info || elems->s1g_capab) && local->hw.queues >= IEEE80211_NUM_ACS) { sta->sta.wme = true; ieee80211_check_fast_xmit(sta); } if (sta && elems->ht_operation && elems->ht_cap_elem && sdata->u.ibss.chandef.width != NL80211_CHAN_WIDTH_20_NOHT && sdata->u.ibss.chandef.width != NL80211_CHAN_WIDTH_5 && sdata->u.ibss.chandef.width != NL80211_CHAN_WIDTH_10) { /* we both use HT */ struct ieee80211_ht_cap htcap_ie; struct cfg80211_chan_def chandef; enum ieee80211_sta_rx_bandwidth bw = sta->sta.deflink.bandwidth; cfg80211_chandef_create(&chandef, channel, NL80211_CHAN_NO_HT); ieee80211_chandef_ht_oper(elems->ht_operation, &chandef); memcpy(&htcap_ie, elems->ht_cap_elem, sizeof(htcap_ie)); rates_updated |= ieee80211_ht_cap_ie_to_sta_ht_cap(sdata, sband, &htcap_ie, &sta->deflink); if (elems->vht_operation && elems->vht_cap_elem && sdata->u.ibss.chandef.width != NL80211_CHAN_WIDTH_20 && sdata->u.ibss.chandef.width != NL80211_CHAN_WIDTH_40) { /* we both use VHT */ struct ieee80211_vht_cap cap_ie; struct ieee80211_sta_vht_cap cap = sta->sta.deflink.vht_cap; u32 vht_cap_info = le32_to_cpu(elems->vht_cap_elem->vht_cap_info); ieee80211_chandef_vht_oper(&local->hw, vht_cap_info, elems->vht_operation, elems->ht_operation, &chandef); memcpy(&cap_ie, elems->vht_cap_elem, sizeof(cap_ie)); ieee80211_vht_cap_ie_to_sta_vht_cap(sdata, sband, &cap_ie, NULL, &sta->deflink); if (memcmp(&cap, &sta->sta.deflink.vht_cap, sizeof(cap))) rates_updated |= true; } if (bw != sta->sta.deflink.bandwidth) rates_updated |= true; if (!cfg80211_chandef_compatible(&sdata->u.ibss.chandef, &chandef)) WARN_ON_ONCE(1); } if (sta && rates_updated) { u32 changed = IEEE80211_RC_SUPP_RATES_CHANGED; u8 rx_nss = sta->sta.deflink.rx_nss; /* Force rx_nss recalculation */ sta->sta.deflink.rx_nss = 0; rate_control_rate_init(&sta->deflink); if (sta->sta.deflink.rx_nss != rx_nss) changed |= IEEE80211_RC_NSS_CHANGED; drv_link_sta_rc_update(local, sdata, &sta->sta.deflink, changed); } rcu_read_unlock(); } static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata, struct ieee80211_mgmt *mgmt, size_t len, struct ieee80211_rx_status *rx_status, struct ieee802_11_elems *elems) { struct ieee80211_local *local = sdata->local; struct cfg80211_bss *cbss; struct ieee80211_bss *bss; struct ieee80211_channel *channel; u64 beacon_timestamp, rx_timestamp; u32 supp_rates = 0; enum nl80211_band band = rx_status->band; channel = ieee80211_get_channel(local->hw.wiphy, rx_status->freq); if (!channel) return; ieee80211_update_sta_info(sdata, mgmt, len, rx_status, elems, channel); bss = ieee80211_bss_info_update(local, rx_status, mgmt, len, channel); if (!bss) return; cbss = container_of((void *)bss, struct cfg80211_bss, priv); /* same for beacon and probe response */ beacon_timestamp = le64_to_cpu(mgmt->u.beacon.timestamp); /* check if we need to merge IBSS */ /* not an IBSS */ if (!(cbss->capability & WLAN_CAPABILITY_IBSS)) goto put_bss; /* different channel */ if (sdata->u.ibss.fixed_channel && sdata->u.ibss.chandef.chan != cbss->channel) goto put_bss; /* different SSID */ if (elems->ssid_len != sdata->u.ibss.ssid_len || memcmp(elems->ssid, sdata->u.ibss.ssid, sdata->u.ibss.ssid_len)) goto put_bss; /* process channel switch */ if (sdata->vif.bss_conf.csa_active || ieee80211_ibss_process_chanswitch(sdata, elems, true)) goto put_bss; /* same BSSID */ if (ether_addr_equal(cbss->bssid, sdata->u.ibss.bssid)) goto put_bss; /* we use a fixed BSSID */ if (sdata->u.ibss.fixed_bssid) goto put_bss; if (ieee80211_have_rx_timestamp(rx_status)) { /* time when timestamp field was received */ rx_timestamp = ieee80211_calculate_rx_timestamp(local, rx_status, len + FCS_LEN, 24); } else { /* * second best option: get current TSF * (will return -1 if not supported) */ rx_timestamp = drv_get_tsf(local, sdata); } ibss_dbg(sdata, "RX beacon SA=%pM BSSID=%pM TSF=0x%llx\n", mgmt->sa, mgmt->bssid, (unsigned long long)rx_timestamp); ibss_dbg(sdata, "\tBCN=0x%llx diff=%lld @%lu\n", (unsigned long long)beacon_timestamp, (unsigned long long)(rx_timestamp - beacon_timestamp), jiffies); if (beacon_timestamp > rx_timestamp) { ibss_dbg(sdata, "beacon TSF higher than local TSF - IBSS merge with BSSID %pM\n", mgmt->bssid); ieee80211_sta_join_ibss(sdata, bss); supp_rates = ieee80211_sta_get_rates(sdata, elems, band, NULL); ieee80211_ibss_add_sta(sdata, mgmt->bssid, mgmt->sa, supp_rates); rcu_read_unlock(); } put_bss: ieee80211_rx_bss_put(local, bss); } void ieee80211_ibss_rx_no_sta(struct ieee80211_sub_if_data *sdata, const u8 *bssid, const u8 *addr, u32 supp_rates) { struct ieee80211_if_ibss *ifibss = &sdata->u.ibss; struct ieee80211_local *local = sdata->local; struct sta_info *sta; struct ieee80211_chanctx_conf *chanctx_conf; struct ieee80211_supported_band *sband; int band; /* * XXX: Consider removing the least recently used entry and * allow new one to be added. */ if (local->num_sta >= IEEE80211_IBSS_MAX_STA_ENTRIES) { net_info_ratelimited("%s: No room for a new IBSS STA entry %pM\n", sdata->name, addr); return; } if (ifibss->state == IEEE80211_IBSS_MLME_SEARCH) return; if (!ether_addr_equal(bssid, sdata->u.ibss.bssid)) return; rcu_read_lock(); chanctx_conf = rcu_dereference(sdata->vif.bss_conf.chanctx_conf); if (WARN_ON_ONCE(!chanctx_conf)) { rcu_read_unlock(); return; } band = chanctx_conf->def.chan->band; rcu_read_unlock(); sta = sta_info_alloc(sdata, addr, GFP_ATOMIC); if (!sta) return; /* make sure mandatory rates are always added */ sband = local->hw.wiphy->bands[band]; sta->sta.deflink.supp_rates[band] = supp_rates | ieee80211_mandatory_rates(sband); spin_lock(&ifibss->incomplete_lock); list_add(&sta->list, &ifibss->incomplete_stations); spin_unlock(&ifibss->incomplete_lock); wiphy_work_queue(local->hw.wiphy, &sdata->work); } static void ieee80211_ibss_sta_expire(struct ieee80211_sub_if_data *sdata) { struct ieee80211_if_ibss *ifibss = &sdata->u.ibss; struct ieee80211_local *local = sdata->local; struct sta_info *sta, *tmp; unsigned long exp_time = IEEE80211_IBSS_INACTIVITY_LIMIT; unsigned long exp_rsn = IEEE80211_IBSS_RSN_INACTIVITY_LIMIT; lockdep_assert_wiphy(local->hw.wiphy); list_for_each_entry_safe(sta, tmp, &local->sta_list, list) { unsigned long last_active = ieee80211_sta_last_active(sta, -1); if (sdata != sta->sdata) continue; if (time_is_before_jiffies(last_active + exp_time) || (time_is_before_jiffies(last_active + exp_rsn) && sta->sta_state != IEEE80211_STA_AUTHORIZED)) { u8 frame_buf[IEEE80211_DEAUTH_FRAME_LEN]; sta_dbg(sta->sdata, "expiring inactive %sSTA %pM\n", sta->sta_state != IEEE80211_STA_AUTHORIZED ? "not authorized " : "", sta->sta.addr); ieee80211_send_deauth_disassoc(sdata, sta->sta.addr, ifibss->bssid, IEEE80211_STYPE_DEAUTH, WLAN_REASON_DEAUTH_LEAVING, true, frame_buf); WARN_ON(__sta_info_destroy(sta)); } } } /* * This function is called with state == IEEE80211_IBSS_MLME_JOINED */ static void ieee80211_sta_merge_ibss(struct ieee80211_sub_if_data *sdata) { struct ieee80211_if_ibss *ifibss = &sdata->u.ibss; lockdep_assert_wiphy(sdata->local->hw.wiphy); mod_timer(&ifibss->timer, round_jiffies(jiffies + IEEE80211_IBSS_MERGE_INTERVAL)); ieee80211_ibss_sta_expire(sdata); if (time_before(jiffies, ifibss->last_scan_completed + IEEE80211_IBSS_MERGE_INTERVAL)) return; if (ieee80211_sta_active_ibss(sdata)) return; if (ifibss->fixed_channel) return; sdata_info(sdata, "No active IBSS STAs - trying to scan for other IBSS networks with same SSID (merge)\n"); ieee80211_request_ibss_scan(sdata, ifibss->ssid, ifibss->ssid_len, NULL, 0); } static void ieee80211_sta_create_ibss(struct ieee80211_sub_if_data *sdata) { struct ieee80211_if_ibss *ifibss = &sdata->u.ibss; u8 bssid[ETH_ALEN]; u16 capability; int i; lockdep_assert_wiphy(sdata->local->hw.wiphy); if (ifibss->fixed_bssid) { memcpy(bssid, ifibss->bssid, ETH_ALEN); } else { /* Generate random, not broadcast, locally administered BSSID. Mix in * own MAC address to make sure that devices that do not have proper * random number generator get different BSSID. */ get_random_bytes(bssid, ETH_ALEN); for (i = 0; i < ETH_ALEN; i++) bssid[i] ^= sdata->vif.addr[i]; bssid[0] &= ~0x01; bssid[0] |= 0x02; } sdata_info(sdata, "Creating new IBSS network, BSSID %pM\n", bssid); capability = WLAN_CAPABILITY_IBSS; if (ifibss->privacy) capability |= WLAN_CAPABILITY_PRIVACY; __ieee80211_sta_join_ibss(sdata, bssid, sdata->vif.bss_conf.beacon_int, &ifibss->chandef, ifibss->basic_rates, capability, 0, true); } static unsigned int ibss_setup_channels(struct wiphy *wiphy, struct ieee80211_channel **channels, unsigned int channels_max, u32 center_freq, u32 width) { struct ieee80211_channel *chan = NULL; unsigned int n_chan = 0; u32 start_freq, end_freq, freq; if (width <= 20) { start_freq = center_freq; end_freq = center_freq; } else { start_freq = center_freq - width / 2 + 10; end_freq = center_freq + width / 2 - 10; } for (freq = start_freq; freq <= end_freq; freq += 20) { chan = ieee80211_get_channel(wiphy, freq); if (!chan) continue; if (n_chan >= channels_max) return n_chan; channels[n_chan] = chan; n_chan++; } return n_chan; } static unsigned int ieee80211_ibss_setup_scan_channels(struct wiphy *wiphy, const struct cfg80211_chan_def *chandef, struct ieee80211_channel **channels, unsigned int channels_max) { unsigned int n_chan = 0; u32 width, cf1, cf2 = 0; switch (chandef->width) { case NL80211_CHAN_WIDTH_40: width = 40; break; case NL80211_CHAN_WIDTH_80P80: cf2 = chandef->center_freq2; fallthrough; case NL80211_CHAN_WIDTH_80: width = 80; break; case NL80211_CHAN_WIDTH_160: width = 160; break; default: width = 20; break; } cf1 = chandef->center_freq1; n_chan = ibss_setup_channels(wiphy, channels, channels_max, cf1, width); if (cf2) n_chan += ibss_setup_channels(wiphy, &channels[n_chan], channels_max - n_chan, cf2, width); return n_chan; } /* * This function is called with state == IEEE80211_IBSS_MLME_SEARCH */ static void ieee80211_sta_find_ibss(struct ieee80211_sub_if_data *sdata) { struct ieee80211_if_ibss *ifibss = &sdata->u.ibss; struct ieee80211_local *local = sdata->local; struct cfg80211_bss *cbss; struct ieee80211_channel *chan = NULL; const u8 *bssid = NULL; int active_ibss; lockdep_assert_wiphy(sdata->local->hw.wiphy); active_ibss = ieee80211_sta_active_ibss(sdata); ibss_dbg(sdata, "sta_find_ibss (active_ibss=%d)\n", active_ibss); if (active_ibss) return; if (ifibss->fixed_bssid) bssid = ifibss->bssid; if (ifibss->fixed_channel) chan = ifibss->chandef.chan; if (!is_zero_ether_addr(ifibss->bssid)) bssid = ifibss->bssid; cbss = cfg80211_get_bss(local->hw.wiphy, chan, bssid, ifibss->ssid, ifibss->ssid_len, IEEE80211_BSS_TYPE_IBSS, IEEE80211_PRIVACY(ifibss->privacy)); if (cbss) { struct ieee80211_bss *bss; bss = (void *)cbss->priv; ibss_dbg(sdata, "sta_find_ibss: selected %pM current %pM\n", cbss->bssid, ifibss->bssid); sdata_info(sdata, "Selected IBSS BSSID %pM based on configured SSID\n", cbss->bssid); ieee80211_sta_join_ibss(sdata, bss); ieee80211_rx_bss_put(local, bss); return; } /* if a fixed bssid and a fixed freq have been provided create the IBSS * directly and do not waste time scanning */ if (ifibss->fixed_bssid && ifibss->fixed_channel) { sdata_info(sdata, "Created IBSS using preconfigured BSSID %pM\n", bssid); ieee80211_sta_create_ibss(sdata); return; } ibss_dbg(sdata, "sta_find_ibss: did not try to join ibss\n"); /* Selected IBSS not found in current scan results - try to scan */ if (time_after(jiffies, ifibss->last_scan_completed + IEEE80211_SCAN_INTERVAL)) { struct ieee80211_channel *channels[8]; unsigned int num; sdata_info(sdata, "Trigger new scan to find an IBSS to join\n"); if (ifibss->fixed_channel) { num = ieee80211_ibss_setup_scan_channels(local->hw.wiphy, &ifibss->chandef, channels, ARRAY_SIZE(channels)); ieee80211_request_ibss_scan(sdata, ifibss->ssid, ifibss->ssid_len, channels, num); } else { ieee80211_request_ibss_scan(sdata, ifibss->ssid, ifibss->ssid_len, NULL, 0); } } else { int interval = IEEE80211_SCAN_INTERVAL; if (time_after(jiffies, ifibss->ibss_join_req + IEEE80211_IBSS_JOIN_TIMEOUT)) ieee80211_sta_create_ibss(sdata); mod_timer(&ifibss->timer, round_jiffies(jiffies + interval)); } } static void ieee80211_rx_mgmt_probe_req(struct ieee80211_sub_if_data *sdata, struct sk_buff *req) { struct ieee80211_mgmt *mgmt = (void *)req->data; struct ieee80211_if_ibss *ifibss = &sdata->u.ibss; struct ieee80211_local *local = sdata->local; int tx_last_beacon, len = req->len; struct sk_buff *skb; struct beacon_data *presp; u8 *pos, *end; lockdep_assert_wiphy(sdata->local->hw.wiphy); presp = sdata_dereference(ifibss->presp, sdata); if (ifibss->state != IEEE80211_IBSS_MLME_JOINED || len < 24 + 2 || !presp) return; tx_last_beacon = drv_tx_last_beacon(local); ibss_dbg(sdata, "RX ProbeReq SA=%pM DA=%pM\n", mgmt->sa, mgmt->da); ibss_dbg(sdata, "\tBSSID=%pM (tx_last_beacon=%d)\n", mgmt->bssid, tx_last_beacon); if (!tx_last_beacon && is_multicast_ether_addr(mgmt->da)) return; if (!ether_addr_equal(mgmt->bssid, ifibss->bssid) && !is_broadcast_ether_addr(mgmt->bssid)) return; end = ((u8 *) mgmt) + len; pos = mgmt->u.probe_req.variable; if (pos[0] != WLAN_EID_SSID || pos + 2 + pos[1] > end) { ibss_dbg(sdata, "Invalid SSID IE in ProbeReq from %pM\n", mgmt->sa); return; } if (pos[1] != 0 && (pos[1] != ifibss->ssid_len || memcmp(pos + 2, ifibss->ssid, ifibss->ssid_len))) { /* Ignore ProbeReq for foreign SSID */ return; } /* Reply with ProbeResp */ skb = dev_alloc_skb(local->tx_headroom + presp->head_len); if (!skb) return; skb_reserve(skb, local->tx_headroom); skb_put_data(skb, presp->head, presp->head_len); memcpy(((struct ieee80211_mgmt *) skb->data)->da, mgmt->sa, ETH_ALEN); ibss_dbg(sdata, "Sending ProbeResp to %pM\n", mgmt->sa); IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT; /* avoid excessive retries for probe request to wildcard SSIDs */ if (pos[1] == 0) IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_CTL_NO_ACK; ieee80211_tx_skb(sdata, skb); } static void ieee80211_rx_mgmt_probe_beacon(struct ieee80211_sub_if_data *sdata, struct ieee80211_mgmt *mgmt, size_t len, struct ieee80211_rx_status *rx_status) { size_t baselen; struct ieee802_11_elems *elems; BUILD_BUG_ON(offsetof(typeof(mgmt->u.probe_resp), variable) != offsetof(typeof(mgmt->u.beacon), variable)); /* * either beacon or probe_resp but the variable field is at the * same offset */ baselen = (u8 *) mgmt->u.probe_resp.variable - (u8 *) mgmt; if (baselen > len) return; elems = ieee802_11_parse_elems(mgmt->u.probe_resp.variable, len - baselen, false, NULL); if (elems) { ieee80211_rx_bss_info(sdata, mgmt, len, rx_status, elems); kfree(elems); } } void ieee80211_ibss_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb) { struct ieee80211_rx_status *rx_status; struct ieee80211_mgmt *mgmt; u16 fc; struct ieee802_11_elems *elems; int ies_len; rx_status = IEEE80211_SKB_RXCB(skb); mgmt = (struct ieee80211_mgmt *) skb->data; fc = le16_to_cpu(mgmt->frame_control); if (!sdata->u.ibss.ssid_len) return; /* not ready to merge yet */ switch (fc & IEEE80211_FCTL_STYPE) { case IEEE80211_STYPE_PROBE_REQ: ieee80211_rx_mgmt_probe_req(sdata, skb); break; case IEEE80211_STYPE_PROBE_RESP: case IEEE80211_STYPE_BEACON: ieee80211_rx_mgmt_probe_beacon(sdata, mgmt, skb->len, rx_status); break; case IEEE80211_STYPE_AUTH: ieee80211_rx_mgmt_auth_ibss(sdata, mgmt, skb->len); break; case IEEE80211_STYPE_DEAUTH: ieee80211_rx_mgmt_deauth_ibss(sdata, mgmt, skb->len); break; case IEEE80211_STYPE_ACTION: switch (mgmt->u.action.category) { case WLAN_CATEGORY_SPECTRUM_MGMT: ies_len = skb->len - offsetof(struct ieee80211_mgmt, u.action.u.chan_switch.variable); if (ies_len < 0) break; elems = ieee802_11_parse_elems( mgmt->u.action.u.chan_switch.variable, ies_len, true, NULL); if (elems && !elems->parse_error) ieee80211_rx_mgmt_spectrum_mgmt(sdata, mgmt, skb->len, rx_status, elems); kfree(elems); break; } } } void ieee80211_ibss_work(struct ieee80211_sub_if_data *sdata) { struct ieee80211_if_ibss *ifibss = &sdata->u.ibss; struct sta_info *sta; /* * Work could be scheduled after scan or similar * when we aren't even joined (or trying) with a * network. */ if (!ifibss->ssid_len) return; spin_lock_bh(&ifibss->incomplete_lock); while (!list_empty(&ifibss->incomplete_stations)) { sta = list_first_entry(&ifibss->incomplete_stations, struct sta_info, list); list_del(&sta->list); spin_unlock_bh(&ifibss->incomplete_lock); ieee80211_ibss_finish_sta(sta); rcu_read_unlock(); spin_lock_bh(&ifibss->incomplete_lock); } spin_unlock_bh(&ifibss->incomplete_lock); switch (ifibss->state) { case IEEE80211_IBSS_MLME_SEARCH: ieee80211_sta_find_ibss(sdata); break; case IEEE80211_IBSS_MLME_JOINED: ieee80211_sta_merge_ibss(sdata); break; default: WARN_ON(1); break; } } static void ieee80211_ibss_timer(struct timer_list *t) { struct ieee80211_sub_if_data *sdata = timer_container_of(sdata, t, u.ibss.timer); wiphy_work_queue(sdata->local->hw.wiphy, &sdata->work); } void ieee80211_ibss_setup_sdata(struct ieee80211_sub_if_data *sdata) { struct ieee80211_if_ibss *ifibss = &sdata->u.ibss; timer_setup(&ifibss->timer, ieee80211_ibss_timer, 0); INIT_LIST_HEAD(&ifibss->incomplete_stations); spin_lock_init(&ifibss->incomplete_lock); wiphy_work_init(&ifibss->csa_connection_drop_work, ieee80211_csa_connection_drop_work); } /* scan finished notification */ void ieee80211_ibss_notify_scan_completed(struct ieee80211_local *local) { struct ieee80211_sub_if_data *sdata; lockdep_assert_wiphy(local->hw.wiphy); list_for_each_entry(sdata, &local->interfaces, list) { if (!ieee80211_sdata_running(sdata)) continue; if (sdata->vif.type != NL80211_IFTYPE_ADHOC) continue; sdata->u.ibss.last_scan_completed = jiffies; } } int ieee80211_ibss_join(struct ieee80211_sub_if_data *sdata, struct cfg80211_ibss_params *params) { u64 changed = 0; enum ieee80211_chanctx_mode chanmode; struct ieee80211_local *local = sdata->local; int radar_detect_width = 0; int ret; lockdep_assert_wiphy(local->hw.wiphy); if (params->chandef.chan->freq_offset) { /* this may work, but is untested */ return -EOPNOTSUPP; } ret = cfg80211_chandef_dfs_required(local->hw.wiphy, ¶ms->chandef, sdata->wdev.iftype); if (ret < 0) return ret; if (ret > 0) { if (!params->userspace_handles_dfs) return -EINVAL; radar_detect_width = BIT(params->chandef.width); } chanmode = (params->channel_fixed && !ret) ? IEEE80211_CHANCTX_SHARED : IEEE80211_CHANCTX_EXCLUSIVE; ret = ieee80211_check_combinations(sdata, ¶ms->chandef, chanmode, radar_detect_width, -1); if (ret < 0) return ret; if (params->bssid) { memcpy(sdata->u.ibss.bssid, params->bssid, ETH_ALEN); sdata->u.ibss.fixed_bssid = true; } else sdata->u.ibss.fixed_bssid = false; sdata->u.ibss.privacy = params->privacy; sdata->u.ibss.control_port = params->control_port; sdata->u.ibss.userspace_handles_dfs = params->userspace_handles_dfs; sdata->u.ibss.basic_rates = params->basic_rates; sdata->u.ibss.last_scan_completed = jiffies; /* fix basic_rates if channel does not support these rates */ memcpy(sdata->vif.bss_conf.mcast_rate, params->mcast_rate, sizeof(params->mcast_rate)); sdata->vif.bss_conf.beacon_int = params->beacon_interval; sdata->u.ibss.chandef = params->chandef; sdata->u.ibss.fixed_channel = params->channel_fixed; if (params->ie) { sdata->u.ibss.ie = kmemdup(params->ie, params->ie_len, GFP_KERNEL); if (sdata->u.ibss.ie) sdata->u.ibss.ie_len = params->ie_len; } sdata->u.ibss.state = IEEE80211_IBSS_MLME_SEARCH; sdata->u.ibss.ibss_join_req = jiffies; memcpy(sdata->u.ibss.ssid, params->ssid, params->ssid_len); sdata->u.ibss.ssid_len = params->ssid_len; memcpy(&sdata->u.ibss.ht_capa, ¶ms->ht_capa, sizeof(sdata->u.ibss.ht_capa)); memcpy(&sdata->u.ibss.ht_capa_mask, ¶ms->ht_capa_mask, sizeof(sdata->u.ibss.ht_capa_mask)); /* * 802.11n-2009 9.13.3.1: In an IBSS, the HT Protection field is * reserved, but an HT STA shall protect HT transmissions as though * the HT Protection field were set to non-HT mixed mode. * * In an IBSS, the RIFS Mode field of the HT Operation element is * also reserved, but an HT STA shall operate as though this field * were set to 1. */ sdata->vif.bss_conf.ht_operation_mode |= IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED | IEEE80211_HT_PARAM_RIFS_MODE; changed |= BSS_CHANGED_HT | BSS_CHANGED_MCAST_RATE; ieee80211_link_info_change_notify(sdata, &sdata->deflink, changed); sdata->deflink.smps_mode = IEEE80211_SMPS_OFF; sdata->deflink.needed_rx_chains = local->rx_chains; sdata->control_port_over_nl80211 = params->control_port_over_nl80211; wiphy_work_queue(local->hw.wiphy, &sdata->work); return 0; } int ieee80211_ibss_leave(struct ieee80211_sub_if_data *sdata) { struct ieee80211_if_ibss *ifibss = &sdata->u.ibss; ifibss->ssid_len = 0; ieee80211_ibss_disconnect(sdata); eth_zero_addr(ifibss->bssid); /* remove beacon */ kfree(sdata->u.ibss.ie); sdata->u.ibss.ie = NULL; sdata->u.ibss.ie_len = 0; /* on the next join, re-program HT parameters */ memset(&ifibss->ht_capa, 0, sizeof(ifibss->ht_capa)); memset(&ifibss->ht_capa_mask, 0, sizeof(ifibss->ht_capa_mask)); synchronize_rcu(); skb_queue_purge(&sdata->skb_queue); timer_delete_sync(&sdata->u.ibss.timer); return 0; } |
| 7 4 1 1 1 3 3 1 1 1 1 3 4 4 4 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 | // SPDX-License-Identifier: GPL-2.0-only /* * linux/fs/befs/linuxvfs.c * * Copyright (C) 2001 Will Dyson <will_dyson@pobox.com * */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/slab.h> #include <linux/fs.h> #include <linux/fs_context.h> #include <linux/fs_parser.h> #include <linux/errno.h> #include <linux/stat.h> #include <linux/nls.h> #include <linux/buffer_head.h> #include <linux/vfs.h> #include <linux/namei.h> #include <linux/sched.h> #include <linux/cred.h> #include <linux/exportfs.h> #include <linux/seq_file.h> #include <linux/blkdev.h> #include "befs.h" #include "btree.h" #include "inode.h" #include "datastream.h" #include "super.h" #include "io.h" MODULE_DESCRIPTION("BeOS File System (BeFS) driver"); MODULE_AUTHOR("Will Dyson"); MODULE_LICENSE("GPL"); /* The units the vfs expects inode->i_blocks to be in */ #define VFS_BLOCK_SIZE 512 static int befs_readdir(struct file *, struct dir_context *); static int befs_get_block(struct inode *, sector_t, struct buffer_head *, int); static int befs_read_folio(struct file *file, struct folio *folio); static sector_t befs_bmap(struct address_space *mapping, sector_t block); static struct dentry *befs_lookup(struct inode *, struct dentry *, unsigned int); static struct inode *befs_iget(struct super_block *, unsigned long); static struct inode *befs_alloc_inode(struct super_block *sb); static void befs_free_inode(struct inode *inode); static void befs_destroy_inodecache(void); static int befs_symlink_read_folio(struct file *, struct folio *); static int befs_utf2nls(struct super_block *sb, const char *in, int in_len, char **out, int *out_len); static int befs_nls2utf(struct super_block *sb, const char *in, int in_len, char **out, int *out_len); static void befs_put_super(struct super_block *); static int befs_statfs(struct dentry *, struct kstatfs *); static int befs_show_options(struct seq_file *, struct dentry *); static struct dentry *befs_fh_to_dentry(struct super_block *sb, struct fid *fid, int fh_len, int fh_type); static struct dentry *befs_fh_to_parent(struct super_block *sb, struct fid *fid, int fh_len, int fh_type); static struct dentry *befs_get_parent(struct dentry *child); static void befs_free_fc(struct fs_context *fc); static const struct super_operations befs_sops = { .alloc_inode = befs_alloc_inode, /* allocate a new inode */ .free_inode = befs_free_inode, /* deallocate an inode */ .put_super = befs_put_super, /* uninit super */ .statfs = befs_statfs, /* statfs */ .show_options = befs_show_options, }; /* slab cache for befs_inode_info objects */ static struct kmem_cache *befs_inode_cachep; static const struct file_operations befs_dir_operations = { .read = generic_read_dir, .iterate_shared = befs_readdir, .llseek = generic_file_llseek, }; static const struct inode_operations befs_dir_inode_operations = { .lookup = befs_lookup, }; static const struct address_space_operations befs_aops = { .read_folio = befs_read_folio, .bmap = befs_bmap, }; static const struct address_space_operations befs_symlink_aops = { .read_folio = befs_symlink_read_folio, }; static const struct export_operations befs_export_operations = { .encode_fh = generic_encode_ino32_fh, .fh_to_dentry = befs_fh_to_dentry, .fh_to_parent = befs_fh_to_parent, .get_parent = befs_get_parent, }; /* * Called by generic_file_read() to read a folio of data * * In turn, simply calls a generic block read function and * passes it the address of befs_get_block, for mapping file * positions to disk blocks. */ static int befs_read_folio(struct file *file, struct folio *folio) { return block_read_full_folio(folio, befs_get_block); } static sector_t befs_bmap(struct address_space *mapping, sector_t block) { return generic_block_bmap(mapping, block, befs_get_block); } /* * Generic function to map a file position (block) to a * disk offset (passed back in bh_result). * * Used by many higher level functions. * * Calls befs_fblock2brun() in datastream.c to do the real work. */ static int befs_get_block(struct inode *inode, sector_t block, struct buffer_head *bh_result, int create) { struct super_block *sb = inode->i_sb; befs_data_stream *ds = &BEFS_I(inode)->i_data.ds; befs_block_run run = BAD_IADDR; int res; ulong disk_off; befs_debug(sb, "---> befs_get_block() for inode %lu, block %ld", (unsigned long)inode->i_ino, (long)block); if (create) { befs_error(sb, "befs_get_block() was asked to write to " "block %ld in inode %lu", (long)block, (unsigned long)inode->i_ino); return -EPERM; } res = befs_fblock2brun(sb, ds, block, &run); if (res != BEFS_OK) { befs_error(sb, "<--- %s for inode %lu, block %ld ERROR", __func__, (unsigned long)inode->i_ino, (long)block); return -EFBIG; } disk_off = (ulong) iaddr2blockno(sb, &run); map_bh(bh_result, inode->i_sb, disk_off); befs_debug(sb, "<--- %s for inode %lu, block %ld, disk address %lu", __func__, (unsigned long)inode->i_ino, (long)block, (unsigned long)disk_off); return 0; } static struct dentry * befs_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags) { struct inode *inode; struct super_block *sb = dir->i_sb; const befs_data_stream *ds = &BEFS_I(dir)->i_data.ds; befs_off_t offset; int ret; int utfnamelen; char *utfname; const char *name = dentry->d_name.name; befs_debug(sb, "---> %s name %pd inode %ld", __func__, dentry, dir->i_ino); /* Convert to UTF-8 */ if (BEFS_SB(sb)->nls) { ret = befs_nls2utf(sb, name, strlen(name), &utfname, &utfnamelen); if (ret < 0) { befs_debug(sb, "<--- %s ERROR", __func__); return ERR_PTR(ret); } ret = befs_btree_find(sb, ds, utfname, &offset); kfree(utfname); } else { ret = befs_btree_find(sb, ds, name, &offset); } if (ret == BEFS_BT_NOT_FOUND) { befs_debug(sb, "<--- %s %pd not found", __func__, dentry); inode = NULL; } else if (ret != BEFS_OK || offset == 0) { befs_error(sb, "<--- %s Error", __func__); inode = ERR_PTR(-ENODATA); } else { inode = befs_iget(dir->i_sb, (ino_t) offset); } befs_debug(sb, "<--- %s", __func__); return d_splice_alias(inode, dentry); } static int befs_readdir(struct file *file, struct dir_context *ctx) { struct inode *inode = file_inode(file); struct super_block *sb = inode->i_sb; const befs_data_stream *ds = &BEFS_I(inode)->i_data.ds; befs_off_t value; int result; size_t keysize; char keybuf[BEFS_NAME_LEN + 1]; befs_debug(sb, "---> %s name %pD, inode %ld, ctx->pos %lld", __func__, file, inode->i_ino, ctx->pos); while (1) { result = befs_btree_read(sb, ds, ctx->pos, BEFS_NAME_LEN + 1, keybuf, &keysize, &value); if (result == BEFS_ERR) { befs_debug(sb, "<--- %s ERROR", __func__); befs_error(sb, "IO error reading %pD (inode %lu)", file, inode->i_ino); return -EIO; } else if (result == BEFS_BT_END) { befs_debug(sb, "<--- %s END", __func__); return 0; } else if (result == BEFS_BT_EMPTY) { befs_debug(sb, "<--- %s Empty directory", __func__); return 0; } /* Convert to NLS */ if (BEFS_SB(sb)->nls) { char *nlsname; int nlsnamelen; result = befs_utf2nls(sb, keybuf, keysize, &nlsname, &nlsnamelen); if (result < 0) { befs_debug(sb, "<--- %s ERROR", __func__); return result; } if (!dir_emit(ctx, nlsname, nlsnamelen, (ino_t) value, DT_UNKNOWN)) { kfree(nlsname); return 0; } kfree(nlsname); } else { if (!dir_emit(ctx, keybuf, keysize, (ino_t) value, DT_UNKNOWN)) return 0; } ctx->pos++; } } static struct inode * befs_alloc_inode(struct super_block *sb) { struct befs_inode_info *bi; bi = alloc_inode_sb(sb, befs_inode_cachep, GFP_KERNEL); if (!bi) return NULL; return &bi->vfs_inode; } static void befs_free_inode(struct inode *inode) { kmem_cache_free(befs_inode_cachep, BEFS_I(inode)); } static void init_once(void *foo) { struct befs_inode_info *bi = (struct befs_inode_info *) foo; inode_init_once(&bi->vfs_inode); } static struct inode *befs_iget(struct super_block *sb, unsigned long ino) { struct buffer_head *bh; befs_inode *raw_inode; struct befs_sb_info *befs_sb = BEFS_SB(sb); struct befs_inode_info *befs_ino; struct inode *inode; befs_debug(sb, "---> %s inode = %lu", __func__, ino); inode = iget_locked(sb, ino); if (!inode) return ERR_PTR(-ENOMEM); if (!(inode->i_state & I_NEW)) return inode; befs_ino = BEFS_I(inode); /* convert from vfs's inode number to befs's inode number */ befs_ino->i_inode_num = blockno2iaddr(sb, inode->i_ino); befs_debug(sb, " real inode number [%u, %hu, %hu]", befs_ino->i_inode_num.allocation_group, befs_ino->i_inode_num.start, befs_ino->i_inode_num.len); bh = sb_bread(sb, inode->i_ino); if (!bh) { befs_error(sb, "unable to read inode block - " "inode = %lu", inode->i_ino); goto unacquire_none; } raw_inode = (befs_inode *) bh->b_data; befs_dump_inode(sb, raw_inode); if (befs_check_inode(sb, raw_inode, inode->i_ino) != BEFS_OK) { befs_error(sb, "Bad inode: %lu", inode->i_ino); goto unacquire_bh; } inode->i_mode = (umode_t) fs32_to_cpu(sb, raw_inode->mode); /* * set uid and gid. But since current BeOS is single user OS, so * you can change by "uid" or "gid" options. */ inode->i_uid = befs_sb->mount_opts.use_uid ? befs_sb->mount_opts.uid : make_kuid(&init_user_ns, fs32_to_cpu(sb, raw_inode->uid)); inode->i_gid = befs_sb->mount_opts.use_gid ? befs_sb->mount_opts.gid : make_kgid(&init_user_ns, fs32_to_cpu(sb, raw_inode->gid)); set_nlink(inode, 1); /* * BEFS's time is 64 bits, but current VFS is 32 bits... * BEFS don't have access time. Nor inode change time. VFS * doesn't have creation time. * Also, the lower 16 bits of the last_modified_time and * create_time are just a counter to help ensure uniqueness * for indexing purposes. (PFD, page 54) */ inode_set_mtime(inode, fs64_to_cpu(sb, raw_inode->last_modified_time) >> 16, 0);/* lower 16 bits are not a time */ inode_set_ctime_to_ts(inode, inode_get_mtime(inode)); inode_set_atime_to_ts(inode, inode_get_mtime(inode)); befs_ino->i_inode_num = fsrun_to_cpu(sb, raw_inode->inode_num); befs_ino->i_parent = fsrun_to_cpu(sb, raw_inode->parent); befs_ino->i_attribute = fsrun_to_cpu(sb, raw_inode->attributes); befs_ino->i_flags = fs32_to_cpu(sb, raw_inode->flags); if (S_ISLNK(inode->i_mode) && !(befs_ino->i_flags & BEFS_LONG_SYMLINK)){ inode->i_size = 0; inode->i_blocks = befs_sb->block_size / VFS_BLOCK_SIZE; strscpy(befs_ino->i_data.symlink, raw_inode->data.symlink, BEFS_SYMLINK_LEN); } else { int num_blks; befs_ino->i_data.ds = fsds_to_cpu(sb, &raw_inode->data.datastream); num_blks = befs_count_blocks(sb, &befs_ino->i_data.ds); inode->i_blocks = num_blks * (befs_sb->block_size / VFS_BLOCK_SIZE); inode->i_size = befs_ino->i_data.ds.size; } inode->i_mapping->a_ops = &befs_aops; if (S_ISREG(inode->i_mode)) { inode->i_fop = &generic_ro_fops; } else if (S_ISDIR(inode->i_mode)) { inode->i_op = &befs_dir_inode_operations; inode->i_fop = &befs_dir_operations; } else if (S_ISLNK(inode->i_mode)) { if (befs_ino->i_flags & BEFS_LONG_SYMLINK) { inode->i_op = &page_symlink_inode_operations; inode_nohighmem(inode); inode->i_mapping->a_ops = &befs_symlink_aops; } else { inode->i_link = befs_ino->i_data.symlink; inode->i_op = &simple_symlink_inode_operations; } } else { befs_error(sb, "Inode %lu is not a regular file, " "directory or symlink. THAT IS WRONG! BeFS has no " "on disk special files", inode->i_ino); goto unacquire_bh; } brelse(bh); befs_debug(sb, "<--- %s", __func__); unlock_new_inode(inode); return inode; unacquire_bh: brelse(bh); unacquire_none: iget_failed(inode); befs_debug(sb, "<--- %s - Bad inode", __func__); return ERR_PTR(-EIO); } /* Initialize the inode cache. Called at fs setup. * * Taken from NFS implementation by Al Viro. */ static int __init befs_init_inodecache(void) { befs_inode_cachep = kmem_cache_create_usercopy("befs_inode_cache", sizeof(struct befs_inode_info), 0, SLAB_RECLAIM_ACCOUNT | SLAB_ACCOUNT, offsetof(struct befs_inode_info, i_data.symlink), sizeof_field(struct befs_inode_info, i_data.symlink), init_once); if (befs_inode_cachep == NULL) return -ENOMEM; return 0; } /* Called at fs teardown. * * Taken from NFS implementation by Al Viro. */ static void befs_destroy_inodecache(void) { /* * Make sure all delayed rcu free inodes are flushed before we * destroy cache. */ rcu_barrier(); kmem_cache_destroy(befs_inode_cachep); } /* * The inode of symbolic link is different to data stream. * The data stream become link name. Unless the LONG_SYMLINK * flag is set. */ static int befs_symlink_read_folio(struct file *unused, struct folio *folio) { struct inode *inode = folio->mapping->host; struct super_block *sb = inode->i_sb; struct befs_inode_info *befs_ino = BEFS_I(inode); befs_data_stream *data = &befs_ino->i_data.ds; befs_off_t len = data->size; char *link = folio_address(folio); int err = -EIO; if (len == 0 || len > PAGE_SIZE) { befs_error(sb, "Long symlink with illegal length"); goto fail; } befs_debug(sb, "Follow long symlink"); if (befs_read_lsymlink(sb, data, link, len) != len) { befs_error(sb, "Failed to read entire long symlink"); goto fail; } link[len - 1] = '\0'; err = 0; fail: folio_end_read(folio, err == 0); return err; } /* * UTF-8 to NLS charset convert routine * * Uses uni2char() / char2uni() rather than the nls tables directly */ static int befs_utf2nls(struct super_block *sb, const char *in, int in_len, char **out, int *out_len) { struct nls_table *nls = BEFS_SB(sb)->nls; int i, o; unicode_t uni; int unilen, utflen; char *result; /* The utf8->nls conversion won't make the final nls string bigger * than the utf one, but if the string is pure ascii they'll have the * same width and an extra char is needed to save the additional \0 */ int maxlen = in_len + 1; befs_debug(sb, "---> %s", __func__); if (!nls) { befs_error(sb, "%s called with no NLS table loaded", __func__); return -EINVAL; } *out = result = kmalloc(maxlen, GFP_NOFS); if (!*out) return -ENOMEM; for (i = o = 0; i < in_len; i += utflen, o += unilen) { /* convert from UTF-8 to Unicode */ utflen = utf8_to_utf32(&in[i], in_len - i, &uni); if (utflen < 0) goto conv_err; /* convert from Unicode to nls */ if (uni > MAX_WCHAR_T) goto conv_err; unilen = nls->uni2char(uni, &result[o], in_len - o); if (unilen < 0) goto conv_err; } result[o] = '\0'; *out_len = o; befs_debug(sb, "<--- %s", __func__); return o; conv_err: befs_error(sb, "Name using character set %s contains a character that " "cannot be converted to unicode.", nls->charset); befs_debug(sb, "<--- %s", __func__); kfree(result); return -EILSEQ; } /** * befs_nls2utf - Convert NLS string to utf8 encodeing * @sb: Superblock * @in: Input string buffer in NLS format * @in_len: Length of input string in bytes * @out: The output string in UTF-8 format * @out_len: Length of the output buffer * * Converts input string @in, which is in the format of the loaded NLS map, * into a utf8 string. * * The destination string @out is allocated by this function and the caller is * responsible for freeing it with kfree() * * On return, *@out_len is the length of @out in bytes. * * On success, the return value is the number of utf8 characters written to * the output buffer @out. * * On Failure, a negative number coresponding to the error code is returned. */ static int befs_nls2utf(struct super_block *sb, const char *in, int in_len, char **out, int *out_len) { struct nls_table *nls = BEFS_SB(sb)->nls; int i, o; wchar_t uni; int unilen, utflen; char *result; /* * There are nls characters that will translate to 3-chars-wide UTF-8 * characters, an additional byte is needed to save the final \0 * in special cases */ int maxlen = (3 * in_len) + 1; befs_debug(sb, "---> %s\n", __func__); if (!nls) { befs_error(sb, "%s called with no NLS table loaded.", __func__); return -EINVAL; } *out = result = kmalloc(maxlen, GFP_NOFS); if (!*out) { *out_len = 0; return -ENOMEM; } for (i = o = 0; i < in_len; i += unilen, o += utflen) { /* convert from nls to unicode */ unilen = nls->char2uni(&in[i], in_len - i, &uni); if (unilen < 0) goto conv_err; /* convert from unicode to UTF-8 */ utflen = utf32_to_utf8(uni, &result[o], 3); if (utflen <= 0) goto conv_err; } result[o] = '\0'; *out_len = o; befs_debug(sb, "<--- %s", __func__); return i; conv_err: befs_error(sb, "Name using character set %s contains a character that " "cannot be converted to unicode.", nls->charset); befs_debug(sb, "<--- %s", __func__); kfree(result); return -EILSEQ; } static struct inode *befs_nfs_get_inode(struct super_block *sb, uint64_t ino, uint32_t generation) { /* No need to handle i_generation */ return befs_iget(sb, ino); } /* * Map a NFS file handle to a corresponding dentry */ static struct dentry *befs_fh_to_dentry(struct super_block *sb, struct fid *fid, int fh_len, int fh_type) { return generic_fh_to_dentry(sb, fid, fh_len, fh_type, befs_nfs_get_inode); } /* * Find the parent for a file specified by NFS handle */ static struct dentry *befs_fh_to_parent(struct super_block *sb, struct fid *fid, int fh_len, int fh_type) { return generic_fh_to_parent(sb, fid, fh_len, fh_type, befs_nfs_get_inode); } static struct dentry *befs_get_parent(struct dentry *child) { struct inode *parent; struct befs_inode_info *befs_ino = BEFS_I(d_inode(child)); parent = befs_iget(child->d_sb, (unsigned long)befs_ino->i_parent.start); return d_obtain_alias(parent); } enum { Opt_uid, Opt_gid, Opt_charset, Opt_debug, }; static const struct fs_parameter_spec befs_param_spec[] = { fsparam_uid ("uid", Opt_uid), fsparam_gid ("gid", Opt_gid), fsparam_string ("iocharset", Opt_charset), fsparam_flag ("debug", Opt_debug), {} }; static int befs_parse_param(struct fs_context *fc, struct fs_parameter *param) { struct befs_mount_options *opts = fc->fs_private; int token; struct fs_parse_result result; /* befs ignores all options on remount */ if (fc->purpose == FS_CONTEXT_FOR_RECONFIGURE) return 0; token = fs_parse(fc, befs_param_spec, param, &result); if (token < 0) return token; switch (token) { case Opt_uid: opts->uid = result.uid; opts->use_uid = 1; break; case Opt_gid: opts->gid = result.gid; opts->use_gid = 1; break; case Opt_charset: kfree(opts->iocharset); opts->iocharset = param->string; param->string = NULL; break; case Opt_debug: opts->debug = 1; break; default: return -EINVAL; } return 0; } static int befs_show_options(struct seq_file *m, struct dentry *root) { struct befs_sb_info *befs_sb = BEFS_SB(root->d_sb); struct befs_mount_options *opts = &befs_sb->mount_opts; if (!uid_eq(opts->uid, GLOBAL_ROOT_UID)) seq_printf(m, ",uid=%u", from_kuid_munged(&init_user_ns, opts->uid)); if (!gid_eq(opts->gid, GLOBAL_ROOT_GID)) seq_printf(m, ",gid=%u", from_kgid_munged(&init_user_ns, opts->gid)); if (opts->iocharset) seq_printf(m, ",charset=%s", opts->iocharset); if (opts->debug) seq_puts(m, ",debug"); return 0; } /* This function has the responsibiltiy of getting the * filesystem ready for unmounting. * Basically, we free everything that we allocated in * befs_read_inode */ static void befs_put_super(struct super_block *sb) { kfree(BEFS_SB(sb)->mount_opts.iocharset); BEFS_SB(sb)->mount_opts.iocharset = NULL; unload_nls(BEFS_SB(sb)->nls); kfree(sb->s_fs_info); sb->s_fs_info = NULL; } /* * Copy the parsed options into the sbi mount_options member */ static void befs_set_options(struct befs_sb_info *sbi, struct befs_mount_options *opts) { sbi->mount_opts.uid = opts->uid; sbi->mount_opts.gid = opts->gid; sbi->mount_opts.use_uid = opts->use_uid; sbi->mount_opts.use_gid = opts->use_gid; sbi->mount_opts.debug = opts->debug; sbi->mount_opts.iocharset = opts->iocharset; opts->iocharset = NULL; } /* Allocate private field of the superblock, fill it. * * Finish filling the public superblock fields * Make the root directory * Load a set of NLS translations if needed. */ static int befs_fill_super(struct super_block *sb, struct fs_context *fc) { struct buffer_head *bh; struct befs_sb_info *befs_sb; befs_super_block *disk_sb; struct inode *root; long ret = -EINVAL; const unsigned long sb_block = 0; const off_t x86_sb_off = 512; int blocksize; struct befs_mount_options *parsed_opts = fc->fs_private; int silent = fc->sb_flags & SB_SILENT; sb->s_fs_info = kzalloc(sizeof(*befs_sb), GFP_KERNEL); if (sb->s_fs_info == NULL) goto unacquire_none; befs_sb = BEFS_SB(sb); befs_set_options(befs_sb, parsed_opts); befs_debug(sb, "---> %s", __func__); if (!sb_rdonly(sb)) { befs_warning(sb, "No write support. Marking filesystem read-only"); sb->s_flags |= SB_RDONLY; } /* * Set dummy blocksize to read super block. * Will be set to real fs blocksize later. * * Linux 2.4.10 and later refuse to read blocks smaller than * the logical block size for the device. But we also need to read at * least 1k to get the second 512 bytes of the volume. */ blocksize = sb_min_blocksize(sb, 1024); if (!blocksize) { if (!silent) befs_error(sb, "unable to set blocksize"); goto unacquire_priv_sbp; } bh = sb_bread(sb, sb_block); if (!bh) { if (!silent) befs_error(sb, "unable to read superblock"); goto unacquire_priv_sbp; } /* account for offset of super block on x86 */ disk_sb = (befs_super_block *) bh->b_data; if ((disk_sb->magic1 == BEFS_SUPER_MAGIC1_LE) || (disk_sb->magic1 == BEFS_SUPER_MAGIC1_BE)) { befs_debug(sb, "Using PPC superblock location"); } else { befs_debug(sb, "Using x86 superblock location"); disk_sb = (befs_super_block *) ((void *) bh->b_data + x86_sb_off); } if ((befs_load_sb(sb, disk_sb) != BEFS_OK) || (befs_check_sb(sb) != BEFS_OK)) goto unacquire_bh; befs_dump_super_block(sb, disk_sb); brelse(bh); if (befs_sb->num_blocks > ~((sector_t)0)) { if (!silent) befs_error(sb, "blocks count: %llu is larger than the host can use", befs_sb->num_blocks); goto unacquire_priv_sbp; } /* * set up enough so that it can read an inode * Fill in kernel superblock fields from private sb */ sb->s_magic = BEFS_SUPER_MAGIC; /* Set real blocksize of fs */ sb_set_blocksize(sb, (ulong) befs_sb->block_size); sb->s_op = &befs_sops; sb->s_export_op = &befs_export_operations; sb->s_time_min = 0; sb->s_time_max = 0xffffffffffffll; root = befs_iget(sb, iaddr2blockno(sb, &(befs_sb->root_dir))); if (IS_ERR(root)) { ret = PTR_ERR(root); goto unacquire_priv_sbp; } sb->s_root = d_make_root(root); if (!sb->s_root) { if (!silent) befs_error(sb, "get root inode failed"); goto unacquire_priv_sbp; } /* load nls library */ if (befs_sb->mount_opts.iocharset) { befs_debug(sb, "Loading nls: %s", befs_sb->mount_opts.iocharset); befs_sb->nls = load_nls(befs_sb->mount_opts.iocharset); if (!befs_sb->nls) { befs_warning(sb, "Cannot load nls %s" " loading default nls", befs_sb->mount_opts.iocharset); befs_sb->nls = load_nls_default(); } /* load default nls if none is specified in mount options */ } else { befs_debug(sb, "Loading default nls"); befs_sb->nls = load_nls_default(); } return 0; unacquire_bh: brelse(bh); unacquire_priv_sbp: kfree(befs_sb->mount_opts.iocharset); kfree(sb->s_fs_info); sb->s_fs_info = NULL; unacquire_none: return ret; } static int befs_reconfigure(struct fs_context *fc) { sync_filesystem(fc->root->d_sb); if (!(fc->sb_flags & SB_RDONLY)) return -EINVAL; return 0; } static int befs_statfs(struct dentry *dentry, struct kstatfs *buf) { struct super_block *sb = dentry->d_sb; u64 id = huge_encode_dev(sb->s_bdev->bd_dev); befs_debug(sb, "---> %s", __func__); buf->f_type = BEFS_SUPER_MAGIC; buf->f_bsize = sb->s_blocksize; buf->f_blocks = BEFS_SB(sb)->num_blocks; buf->f_bfree = BEFS_SB(sb)->num_blocks - BEFS_SB(sb)->used_blocks; buf->f_bavail = buf->f_bfree; buf->f_files = 0; /* UNKNOWN */ buf->f_ffree = 0; /* UNKNOWN */ buf->f_fsid = u64_to_fsid(id); buf->f_namelen = BEFS_NAME_LEN; befs_debug(sb, "<--- %s", __func__); return 0; } static int befs_get_tree(struct fs_context *fc) { return get_tree_bdev(fc, befs_fill_super); } static const struct fs_context_operations befs_context_ops = { .parse_param = befs_parse_param, .get_tree = befs_get_tree, .reconfigure = befs_reconfigure, .free = befs_free_fc, }; static int befs_init_fs_context(struct fs_context *fc) { struct befs_mount_options *opts; opts = kzalloc(sizeof(*opts), GFP_KERNEL); if (!opts) return -ENOMEM; /* Initialize options */ opts->uid = GLOBAL_ROOT_UID; opts->gid = GLOBAL_ROOT_GID; fc->fs_private = opts; fc->ops = &befs_context_ops; return 0; } static void befs_free_fc(struct fs_context *fc) { struct befs_mount_options *opts = fc->fs_private; kfree(opts->iocharset); kfree(fc->fs_private); } static struct file_system_type befs_fs_type = { .owner = THIS_MODULE, .name = "befs", .kill_sb = kill_block_super, .fs_flags = FS_REQUIRES_DEV, .init_fs_context = befs_init_fs_context, .parameters = befs_param_spec, }; MODULE_ALIAS_FS("befs"); static int __init init_befs_fs(void) { int err; pr_info("version: %s\n", BEFS_VERSION); err = befs_init_inodecache(); if (err) goto unacquire_none; err = register_filesystem(&befs_fs_type); if (err) goto unacquire_inodecache; return 0; unacquire_inodecache: befs_destroy_inodecache(); unacquire_none: return err; } static void __exit exit_befs_fs(void) { befs_destroy_inodecache(); unregister_filesystem(&befs_fs_type); } /* * Macros that typecheck the init and exit functions, * ensures that they are called at init and cleanup, * and eliminates warnings about unused functions. */ module_init(init_befs_fs) module_exit(exit_befs_fs) |
| 1 2 1 67 67 67 1 1 13 51 14 1 28 37 4 1 49 2 4 19 41 58 1 1 59 66 67 10 10 36 3 5 17 39 68 3 67 66 66 65 3 67 19 7 12 17 1 1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 | // SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2017-2018 HUAWEI, Inc. * https://www.huawei.com/ * Copyright (C) 2021, Alibaba Cloud */ #include "xattr.h" #include <trace/events/erofs.h> static int erofs_fill_symlink(struct inode *inode, void *kaddr, unsigned int m_pofs) { struct erofs_inode *vi = EROFS_I(inode); loff_t off; m_pofs += vi->xattr_isize; /* check if it cannot be handled with fast symlink scheme */ if (vi->datalayout != EROFS_INODE_FLAT_INLINE || check_add_overflow(m_pofs, inode->i_size, &off) || off > i_blocksize(inode)) return 0; inode->i_link = kmemdup_nul(kaddr + m_pofs, inode->i_size, GFP_KERNEL); return inode->i_link ? 0 : -ENOMEM; } static int erofs_read_inode(struct inode *inode) { struct super_block *sb = inode->i_sb; erofs_blk_t blkaddr = erofs_blknr(sb, erofs_iloc(inode)); unsigned int ofs = erofs_blkoff(sb, erofs_iloc(inode)); bool in_mbox = erofs_inode_in_metabox(inode); struct erofs_buf buf = __EROFS_BUF_INITIALIZER; struct erofs_sb_info *sbi = EROFS_SB(sb); erofs_blk_t addrmask = BIT_ULL(48) - 1; struct erofs_inode *vi = EROFS_I(inode); struct erofs_inode_extended *die, copied; struct erofs_inode_compact *dic; unsigned int ifmt; void *ptr; int err = 0; ptr = erofs_read_metabuf(&buf, sb, erofs_pos(sb, blkaddr), in_mbox); if (IS_ERR(ptr)) { err = PTR_ERR(ptr); erofs_err(sb, "failed to read inode meta block (nid: %llu): %d", vi->nid, err); goto err_out; } dic = ptr + ofs; ifmt = le16_to_cpu(dic->i_format); if (ifmt & ~EROFS_I_ALL) { erofs_err(sb, "unsupported i_format %u of nid %llu", ifmt, vi->nid); err = -EOPNOTSUPP; goto err_out; } vi->datalayout = erofs_inode_datalayout(ifmt); if (vi->datalayout >= EROFS_INODE_DATALAYOUT_MAX) { erofs_err(sb, "unsupported datalayout %u of nid %llu", vi->datalayout, vi->nid); err = -EOPNOTSUPP; goto err_out; } switch (erofs_inode_version(ifmt)) { case EROFS_INODE_LAYOUT_EXTENDED: vi->inode_isize = sizeof(struct erofs_inode_extended); /* check if the extended inode acrosses block boundary */ if (ofs + vi->inode_isize <= sb->s_blocksize) { ofs += vi->inode_isize; die = (struct erofs_inode_extended *)dic; copied.i_u = die->i_u; copied.i_nb = die->i_nb; } else { const unsigned int gotten = sb->s_blocksize - ofs; memcpy(&copied, dic, gotten); ptr = erofs_read_metabuf(&buf, sb, erofs_pos(sb, blkaddr + 1), in_mbox); if (IS_ERR(ptr)) { err = PTR_ERR(ptr); erofs_err(sb, "failed to read inode payload block (nid: %llu): %d", vi->nid, err); goto err_out; } ofs = vi->inode_isize - gotten; memcpy((u8 *)&copied + gotten, ptr, ofs); die = &copied; } vi->xattr_isize = erofs_xattr_ibody_size(die->i_xattr_icount); inode->i_mode = le16_to_cpu(die->i_mode); i_uid_write(inode, le32_to_cpu(die->i_uid)); i_gid_write(inode, le32_to_cpu(die->i_gid)); set_nlink(inode, le32_to_cpu(die->i_nlink)); inode_set_mtime(inode, le64_to_cpu(die->i_mtime), le32_to_cpu(die->i_mtime_nsec)); inode->i_size = le64_to_cpu(die->i_size); break; case EROFS_INODE_LAYOUT_COMPACT: vi->inode_isize = sizeof(struct erofs_inode_compact); ofs += vi->inode_isize; vi->xattr_isize = erofs_xattr_ibody_size(dic->i_xattr_icount); inode->i_mode = le16_to_cpu(dic->i_mode); copied.i_u = dic->i_u; i_uid_write(inode, le16_to_cpu(dic->i_uid)); i_gid_write(inode, le16_to_cpu(dic->i_gid)); if (!S_ISDIR(inode->i_mode) && ((ifmt >> EROFS_I_NLINK_1_BIT) & 1)) { set_nlink(inode, 1); copied.i_nb = dic->i_nb; } else { set_nlink(inode, le16_to_cpu(dic->i_nb.nlink)); copied.i_nb.startblk_hi = 0; addrmask = BIT_ULL(32) - 1; } inode_set_mtime(inode, sbi->epoch + le32_to_cpu(dic->i_mtime), sbi->fixed_nsec); inode->i_size = le32_to_cpu(dic->i_size); break; default: erofs_err(sb, "unsupported on-disk inode version %u of nid %llu", erofs_inode_version(ifmt), vi->nid); err = -EOPNOTSUPP; goto err_out; } if (unlikely(inode->i_size < 0)) { erofs_err(sb, "negative i_size @ nid %llu", vi->nid); err = -EFSCORRUPTED; goto err_out; } switch (inode->i_mode & S_IFMT) { case S_IFDIR: vi->dot_omitted = (ifmt >> EROFS_I_DOT_OMITTED_BIT) & 1; fallthrough; case S_IFREG: case S_IFLNK: vi->startblk = le32_to_cpu(copied.i_u.startblk_lo) | ((u64)le16_to_cpu(copied.i_nb.startblk_hi) << 32); if (vi->datalayout == EROFS_INODE_FLAT_PLAIN && !((vi->startblk ^ EROFS_NULL_ADDR) & addrmask)) vi->startblk = EROFS_NULL_ADDR; if(S_ISLNK(inode->i_mode)) { err = erofs_fill_symlink(inode, ptr, ofs); if (err) goto err_out; } break; case S_IFCHR: case S_IFBLK: inode->i_rdev = new_decode_dev(le32_to_cpu(copied.i_u.rdev)); break; case S_IFIFO: case S_IFSOCK: inode->i_rdev = 0; break; default: erofs_err(sb, "bogus i_mode (%o) @ nid %llu", inode->i_mode, vi->nid); err = -EFSCORRUPTED; goto err_out; } if (erofs_inode_is_data_compressed(vi->datalayout)) inode->i_blocks = le32_to_cpu(copied.i_u.blocks_lo) << (sb->s_blocksize_bits - 9); else inode->i_blocks = round_up(inode->i_size, sb->s_blocksize) >> 9; if (vi->datalayout == EROFS_INODE_CHUNK_BASED) { /* fill chunked inode summary info */ vi->chunkformat = le16_to_cpu(copied.i_u.c.format); if (vi->chunkformat & ~EROFS_CHUNK_FORMAT_ALL) { erofs_err(sb, "unsupported chunk format %x of nid %llu", vi->chunkformat, vi->nid); err = -EOPNOTSUPP; goto err_out; } vi->chunkbits = sb->s_blocksize_bits + (vi->chunkformat & EROFS_CHUNK_FORMAT_BLKBITS_MASK); } inode_set_atime_to_ts(inode, inode_set_ctime_to_ts(inode, inode_get_mtime(inode))); inode->i_flags &= ~S_DAX; if (test_opt(&sbi->opt, DAX_ALWAYS) && S_ISREG(inode->i_mode) && (vi->datalayout == EROFS_INODE_FLAT_PLAIN || vi->datalayout == EROFS_INODE_CHUNK_BASED)) inode->i_flags |= S_DAX; err_out: erofs_put_metabuf(&buf); return err; } static int erofs_fill_inode(struct inode *inode) { struct erofs_inode *vi = EROFS_I(inode); int err; trace_erofs_fill_inode(inode); err = erofs_read_inode(inode); if (err) return err; switch (inode->i_mode & S_IFMT) { case S_IFREG: inode->i_op = &erofs_generic_iops; if (erofs_inode_is_data_compressed(vi->datalayout)) inode->i_fop = &generic_ro_fops; else inode->i_fop = &erofs_file_fops; break; case S_IFDIR: inode->i_op = &erofs_dir_iops; inode->i_fop = &erofs_dir_fops; inode_nohighmem(inode); break; case S_IFLNK: if (inode->i_link) inode->i_op = &erofs_fast_symlink_iops; else inode->i_op = &erofs_symlink_iops; inode_nohighmem(inode); break; default: inode->i_op = &erofs_generic_iops; init_special_inode(inode, inode->i_mode, inode->i_rdev); return 0; } mapping_set_large_folios(inode->i_mapping); if (erofs_inode_is_data_compressed(vi->datalayout)) { #ifdef CONFIG_EROFS_FS_ZIP DO_ONCE_LITE_IF(inode->i_blkbits != PAGE_SHIFT, erofs_info, inode->i_sb, "EXPERIMENTAL EROFS subpage compressed block support in use. Use at your own risk!"); inode->i_mapping->a_ops = &z_erofs_aops; #else err = -EOPNOTSUPP; #endif } else { inode->i_mapping->a_ops = &erofs_aops; #ifdef CONFIG_EROFS_FS_ONDEMAND if (erofs_is_fscache_mode(inode->i_sb)) inode->i_mapping->a_ops = &erofs_fscache_access_aops; #endif #ifdef CONFIG_EROFS_FS_BACKED_BY_FILE if (erofs_is_fileio_mode(EROFS_SB(inode->i_sb))) inode->i_mapping->a_ops = &erofs_fileio_aops; #endif } return err; } /* * ino_t is 32-bits on 32-bit arch. We have to squash the 64-bit value down * so that it will fit. */ static ino_t erofs_squash_ino(struct super_block *sb, erofs_nid_t nid) { u64 ino64 = erofs_nid_to_ino64(EROFS_SB(sb), nid); if (sizeof(ino_t) < sizeof(erofs_nid_t)) ino64 ^= ino64 >> (sizeof(erofs_nid_t) - sizeof(ino_t)) * 8; return (ino_t)ino64; } static int erofs_iget5_eq(struct inode *inode, void *opaque) { return EROFS_I(inode)->nid == *(erofs_nid_t *)opaque; } static int erofs_iget5_set(struct inode *inode, void *opaque) { const erofs_nid_t nid = *(erofs_nid_t *)opaque; inode->i_ino = erofs_squash_ino(inode->i_sb, nid); EROFS_I(inode)->nid = nid; return 0; } struct inode *erofs_iget(struct super_block *sb, erofs_nid_t nid) { struct inode *inode; inode = iget5_locked(sb, erofs_squash_ino(sb, nid), erofs_iget5_eq, erofs_iget5_set, &nid); if (!inode) return ERR_PTR(-ENOMEM); if (inode->i_state & I_NEW) { int err = erofs_fill_inode(inode); if (err) { iget_failed(inode); return ERR_PTR(err); } unlock_new_inode(inode); } return inode; } int erofs_getattr(struct mnt_idmap *idmap, const struct path *path, struct kstat *stat, u32 request_mask, unsigned int query_flags) { struct inode *const inode = d_inode(path->dentry); struct block_device *bdev = inode->i_sb->s_bdev; bool compressed = erofs_inode_is_data_compressed(EROFS_I(inode)->datalayout); if (compressed) stat->attributes |= STATX_ATTR_COMPRESSED; stat->attributes |= STATX_ATTR_IMMUTABLE; stat->attributes_mask |= (STATX_ATTR_COMPRESSED | STATX_ATTR_IMMUTABLE); /* * Return the DIO alignment restrictions if requested. * * In EROFS, STATX_DIOALIGN is only supported in bdev-based mode * and uncompressed inodes, otherwise we report no DIO support. */ if ((request_mask & STATX_DIOALIGN) && S_ISREG(inode->i_mode)) { stat->result_mask |= STATX_DIOALIGN; if (bdev && !compressed) { stat->dio_mem_align = bdev_dma_alignment(bdev) + 1; stat->dio_offset_align = bdev_logical_block_size(bdev); } } generic_fillattr(idmap, request_mask, inode, stat); return 0; } const struct inode_operations erofs_generic_iops = { .getattr = erofs_getattr, .listxattr = erofs_listxattr, .get_inode_acl = erofs_get_acl, .fiemap = erofs_fiemap, }; const struct inode_operations erofs_symlink_iops = { .get_link = page_get_link, .getattr = erofs_getattr, .listxattr = erofs_listxattr, .get_inode_acl = erofs_get_acl, }; const struct inode_operations erofs_fast_symlink_iops = { .get_link = simple_get_link, .getattr = erofs_getattr, .listxattr = erofs_listxattr, .get_inode_acl = erofs_get_acl, }; |
| 519 396 368 289 164 281 171 239 171 239 514 332 516 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 | // SPDX-License-Identifier: GPL-2.0 /* * Copyright (c) 2000,2005 Silicon Graphics, Inc. * All Rights Reserved. */ #include "xfs.h" #include "xfs_fs.h" #include "xfs_shared.h" #include "xfs_format.h" #include "xfs_log_format.h" #include "xfs_trans_resv.h" #include "xfs_mount.h" #include "xfs_inode.h" #include "xfs_trans.h" #include "xfs_trans_priv.h" #include "xfs_inode_item.h" #include <linux/iversion.h> /* * Add a locked inode to the transaction. * * The inode must be locked, and it cannot be associated with any transaction. * If lock_flags is non-zero the inode will be unlocked on transaction commit. */ void xfs_trans_ijoin( struct xfs_trans *tp, struct xfs_inode *ip, uint lock_flags) { struct xfs_inode_log_item *iip; xfs_assert_ilocked(ip, XFS_ILOCK_EXCL); if (ip->i_itemp == NULL) xfs_inode_item_init(ip, ip->i_mount); iip = ip->i_itemp; ASSERT(iip->ili_lock_flags == 0); iip->ili_lock_flags = lock_flags; ASSERT(!xfs_iflags_test(ip, XFS_ISTALE)); /* Reset the per-tx dirty context and add the item to the tx. */ iip->ili_dirty_flags = 0; xfs_trans_add_item(tp, &iip->ili_item); } /* * Transactional inode timestamp update. Requires the inode to be locked and * joined to the transaction supplied. Relies on the transaction subsystem to * track dirty state and update/writeback the inode accordingly. */ void xfs_trans_ichgtime( struct xfs_trans *tp, struct xfs_inode *ip, int flags) { struct inode *inode = VFS_I(ip); struct timespec64 tv; ASSERT(tp); xfs_assert_ilocked(ip, XFS_ILOCK_EXCL); /* If the mtime changes, then ctime must also change */ ASSERT(flags & XFS_ICHGTIME_CHG); tv = inode_set_ctime_current(inode); if (flags & XFS_ICHGTIME_MOD) inode_set_mtime_to_ts(inode, tv); if (flags & XFS_ICHGTIME_ACCESS) inode_set_atime_to_ts(inode, tv); if (flags & XFS_ICHGTIME_CREATE) ip->i_crtime = tv; } /* * This is called to mark the fields indicated in fieldmask as needing to be * logged when the transaction is committed. The inode must already be * associated with the given transaction. All we do here is record where the * inode was dirtied and mark the transaction and inode log item dirty; * everything else is done in the ->precommit log item operation after the * changes in the transaction have been completed. */ void xfs_trans_log_inode( struct xfs_trans *tp, struct xfs_inode *ip, uint flags) { struct xfs_inode_log_item *iip = ip->i_itemp; struct inode *inode = VFS_I(ip); ASSERT(iip); xfs_assert_ilocked(ip, XFS_ILOCK_EXCL); ASSERT(!xfs_iflags_test(ip, XFS_ISTALE)); tp->t_flags |= XFS_TRANS_DIRTY; /* * First time we log the inode in a transaction, bump the inode change * counter if it is configured for this to occur. While we have the * inode locked exclusively for metadata modification, we can usually * avoid setting XFS_ILOG_CORE if no one has queried the value since * the last time it was incremented. If we have XFS_ILOG_CORE already * set however, then go ahead and bump the i_version counter * unconditionally. */ if (!test_and_set_bit(XFS_LI_DIRTY, &iip->ili_item.li_flags)) { if (IS_I_VERSION(inode) && inode_maybe_inc_iversion(inode, flags & XFS_ILOG_CORE)) flags |= XFS_ILOG_IVERSION; } iip->ili_dirty_flags |= flags; } int xfs_trans_roll_inode( struct xfs_trans **tpp, struct xfs_inode *ip) { int error; xfs_trans_log_inode(*tpp, ip, XFS_ILOG_CORE); error = xfs_trans_roll(tpp); if (!error) xfs_trans_ijoin(*tpp, ip, 0); return error; } |
| 415 414 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 | // SPDX-License-Identifier: GPL-2.0 /* * This file contains functions which manage high resolution tick * related events. * * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de> * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar * Copyright(C) 2006-2007, Timesys Corp., Thomas Gleixner */ #include <linux/cpu.h> #include <linux/err.h> #include <linux/hrtimer.h> #include <linux/interrupt.h> #include <linux/percpu.h> #include <linux/profile.h> #include <linux/sched.h> #include "tick-internal.h" /** * tick_program_event - program the CPU local timer device for the next event */ int tick_program_event(ktime_t expires, int force) { struct clock_event_device *dev = __this_cpu_read(tick_cpu_device.evtdev); if (unlikely(expires == KTIME_MAX)) { /* * We don't need the clock event device any more, stop it. */ clockevents_switch_state(dev, CLOCK_EVT_STATE_ONESHOT_STOPPED); dev->next_event = KTIME_MAX; return 0; } if (unlikely(clockevent_state_oneshot_stopped(dev))) { /* * We need the clock event again, configure it in ONESHOT mode * before using it. */ clockevents_switch_state(dev, CLOCK_EVT_STATE_ONESHOT); } return clockevents_program_event(dev, expires, force); } /** * tick_resume_oneshot - resume oneshot mode */ void tick_resume_oneshot(void) { struct clock_event_device *dev = __this_cpu_read(tick_cpu_device.evtdev); clockevents_switch_state(dev, CLOCK_EVT_STATE_ONESHOT); clockevents_program_event(dev, ktime_get(), true); } /** * tick_setup_oneshot - setup the event device for oneshot mode (hres or nohz) */ void tick_setup_oneshot(struct clock_event_device *newdev, void (*handler)(struct clock_event_device *), ktime_t next_event) { newdev->event_handler = handler; clockevents_switch_state(newdev, CLOCK_EVT_STATE_ONESHOT); clockevents_program_event(newdev, next_event, true); } /** * tick_switch_to_oneshot - switch to oneshot mode */ int tick_switch_to_oneshot(void (*handler)(struct clock_event_device *)) { struct tick_device *td = this_cpu_ptr(&tick_cpu_device); struct clock_event_device *dev = td->evtdev; if (!dev || !(dev->features & CLOCK_EVT_FEAT_ONESHOT) || !tick_device_is_functional(dev)) { pr_info("Clockevents: could not switch to one-shot mode:"); if (!dev) { pr_cont(" no tick device\n"); } else { if (!tick_device_is_functional(dev)) pr_cont(" %s is not functional.\n", dev->name); else pr_cont(" %s does not support one-shot mode.\n", dev->name); } return -EINVAL; } td->mode = TICKDEV_MODE_ONESHOT; dev->event_handler = handler; clockevents_switch_state(dev, CLOCK_EVT_STATE_ONESHOT); tick_broadcast_switch_to_oneshot(); return 0; } /** * tick_oneshot_mode_active - check whether the system is in oneshot mode * * returns 1 when either nohz or highres are enabled. otherwise 0. */ int tick_oneshot_mode_active(void) { unsigned long flags; int ret; local_irq_save(flags); ret = __this_cpu_read(tick_cpu_device.mode) == TICKDEV_MODE_ONESHOT; local_irq_restore(flags); return ret; } #ifdef CONFIG_HIGH_RES_TIMERS /** * tick_init_highres - switch to high resolution mode * * Called with interrupts disabled. */ int tick_init_highres(void) { return tick_switch_to_oneshot(hrtimer_interrupt); } #endif |
| 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 | /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __NET_UDP_TUNNEL_H #define __NET_UDP_TUNNEL_H #include <net/ip_tunnels.h> #include <net/udp.h> #if IS_ENABLED(CONFIG_IPV6) #include <net/ipv6.h> #include <net/ipv6_stubs.h> #endif struct udp_port_cfg { u8 family; /* Used only for kernel-created sockets */ union { struct in_addr local_ip; #if IS_ENABLED(CONFIG_IPV6) struct in6_addr local_ip6; #endif }; union { struct in_addr peer_ip; #if IS_ENABLED(CONFIG_IPV6) struct in6_addr peer_ip6; #endif }; __be16 local_udp_port; __be16 peer_udp_port; int bind_ifindex; unsigned int use_udp_checksums:1, use_udp6_tx_checksums:1, use_udp6_rx_checksums:1, ipv6_v6only:1; }; int udp_sock_create4(struct net *net, struct udp_port_cfg *cfg, struct socket **sockp); #if IS_ENABLED(CONFIG_IPV6) int udp_sock_create6(struct net *net, struct udp_port_cfg *cfg, struct socket **sockp); #else static inline int udp_sock_create6(struct net *net, struct udp_port_cfg *cfg, struct socket **sockp) { return 0; } #endif static inline int udp_sock_create(struct net *net, struct udp_port_cfg *cfg, struct socket **sockp) { if (cfg->family == AF_INET) return udp_sock_create4(net, cfg, sockp); if (cfg->family == AF_INET6) return udp_sock_create6(net, cfg, sockp); return -EPFNOSUPPORT; } typedef int (*udp_tunnel_encap_rcv_t)(struct sock *sk, struct sk_buff *skb); typedef int (*udp_tunnel_encap_err_lookup_t)(struct sock *sk, struct sk_buff *skb); typedef void (*udp_tunnel_encap_err_rcv_t)(struct sock *sk, struct sk_buff *skb, int err, __be16 port, u32 info, u8 *payload); typedef void (*udp_tunnel_encap_destroy_t)(struct sock *sk); typedef struct sk_buff *(*udp_tunnel_gro_receive_t)(struct sock *sk, struct list_head *head, struct sk_buff *skb); typedef int (*udp_tunnel_gro_complete_t)(struct sock *sk, struct sk_buff *skb, int nhoff); struct udp_tunnel_sock_cfg { void *sk_user_data; /* user data used by encap_rcv call back */ /* Used for setting up udp_sock fields, see udp.h for details */ __u8 encap_type; udp_tunnel_encap_rcv_t encap_rcv; udp_tunnel_encap_err_lookup_t encap_err_lookup; udp_tunnel_encap_err_rcv_t encap_err_rcv; udp_tunnel_encap_destroy_t encap_destroy; udp_tunnel_gro_receive_t gro_receive; udp_tunnel_gro_complete_t gro_complete; }; /* Setup the given (UDP) sock to receive UDP encapsulated packets */ void setup_udp_tunnel_sock(struct net *net, struct socket *sock, struct udp_tunnel_sock_cfg *sock_cfg); /* -- List of parsable UDP tunnel types -- * * Adding to this list will result in serious debate. The main issue is * that this list is essentially a list of workarounds for either poorly * designed tunnels, or poorly designed device offloads. * * The parsing supported via these types should really be used for Rx * traffic only as the network stack will have already inserted offsets for * the location of the headers in the skb. In addition any ports that are * pushed should be kept within the namespace without leaking to other * devices such as VFs or other ports on the same device. * * It is strongly encouraged to use CHECKSUM_COMPLETE for Rx to avoid the * need to use this for Rx checksum offload. It should not be necessary to * call this function to perform Tx offloads on outgoing traffic. */ enum udp_parsable_tunnel_type { UDP_TUNNEL_TYPE_VXLAN = BIT(0), /* RFC 7348 */ UDP_TUNNEL_TYPE_GENEVE = BIT(1), /* draft-ietf-nvo3-geneve */ UDP_TUNNEL_TYPE_VXLAN_GPE = BIT(2), /* draft-ietf-nvo3-vxlan-gpe */ }; struct udp_tunnel_info { unsigned short type; sa_family_t sa_family; __be16 port; u8 hw_priv; }; /* Notify network devices of offloadable types */ void udp_tunnel_push_rx_port(struct net_device *dev, struct socket *sock, unsigned short type); void udp_tunnel_drop_rx_port(struct net_device *dev, struct socket *sock, unsigned short type); void udp_tunnel_notify_add_rx_port(struct socket *sock, unsigned short type); void udp_tunnel_notify_del_rx_port(struct socket *sock, unsigned short type); /* Transmit the skb using UDP encapsulation. */ void udp_tunnel_xmit_skb(struct rtable *rt, struct sock *sk, struct sk_buff *skb, __be32 src, __be32 dst, __u8 tos, __u8 ttl, __be16 df, __be16 src_port, __be16 dst_port, bool xnet, bool nocheck, u16 ipcb_flags); void udp_tunnel6_xmit_skb(struct dst_entry *dst, struct sock *sk, struct sk_buff *skb, struct net_device *dev, const struct in6_addr *saddr, const struct in6_addr *daddr, __u8 prio, __u8 ttl, __be32 label, __be16 src_port, __be16 dst_port, bool nocheck, u16 ip6cb_flags); void udp_tunnel_sock_release(struct socket *sock); struct rtable *udp_tunnel_dst_lookup(struct sk_buff *skb, struct net_device *dev, struct net *net, int oif, __be32 *saddr, const struct ip_tunnel_key *key, __be16 sport, __be16 dport, u8 tos, struct dst_cache *dst_cache); struct dst_entry *udp_tunnel6_dst_lookup(struct sk_buff *skb, struct net_device *dev, struct net *net, struct socket *sock, int oif, struct in6_addr *saddr, const struct ip_tunnel_key *key, __be16 sport, __be16 dport, u8 dsfield, struct dst_cache *dst_cache); struct metadata_dst *udp_tun_rx_dst(struct sk_buff *skb, unsigned short family, const unsigned long *flags, __be64 tunnel_id, int md_size); #ifdef CONFIG_INET static inline int udp_tunnel_handle_offloads(struct sk_buff *skb, bool udp_csum) { int type = udp_csum ? SKB_GSO_UDP_TUNNEL_CSUM : SKB_GSO_UDP_TUNNEL; return iptunnel_handle_offloads(skb, type); } #endif #if IS_ENABLED(CONFIG_NET_UDP_TUNNEL) void udp_tunnel_update_gro_lookup(struct net *net, struct sock *sk, bool add); void udp_tunnel_update_gro_rcv(struct sock *sk, bool add); #else static inline void udp_tunnel_update_gro_lookup(struct net *net, struct sock *sk, bool add) {} static inline void udp_tunnel_update_gro_rcv(struct sock *sk, bool add) {} #endif static inline void udp_tunnel_cleanup_gro(struct sock *sk) { udp_tunnel_update_gro_rcv(sk, false); udp_tunnel_update_gro_lookup(sock_net(sk), sk, false); } static inline void udp_tunnel_encap_enable(struct sock *sk) { if (udp_test_and_set_bit(ENCAP_ENABLED, sk)) return; #if IS_ENABLED(CONFIG_IPV6) if (READ_ONCE(sk->sk_family) == PF_INET6) ipv6_stub->udpv6_encap_enable(); #endif udp_encap_enable(); } #define UDP_TUNNEL_NIC_MAX_TABLES 4 enum udp_tunnel_nic_info_flags { /* Device only supports offloads when it's open, all ports * will be removed before close and re-added after open. */ UDP_TUNNEL_NIC_INFO_OPEN_ONLY = BIT(0), /* Device supports only IPv4 tunnels */ UDP_TUNNEL_NIC_INFO_IPV4_ONLY = BIT(1), /* Device has hard-coded the IANA VXLAN port (4789) as VXLAN. * This port must not be counted towards n_entries of any table. * Driver will not receive any callback associated with port 4789. */ UDP_TUNNEL_NIC_INFO_STATIC_IANA_VXLAN = BIT(2), }; struct udp_tunnel_nic; #define UDP_TUNNEL_NIC_MAX_SHARING_DEVICES (U16_MAX / 2) struct udp_tunnel_nic_shared { struct udp_tunnel_nic *udp_tunnel_nic_info; struct list_head devices; }; struct udp_tunnel_nic_shared_node { struct net_device *dev; struct list_head list; }; /** * struct udp_tunnel_nic_info - driver UDP tunnel offload information * @set_port: callback for adding a new port * @unset_port: callback for removing a port * @sync_table: callback for syncing the entire port table at once * @shared: reference to device global state (optional) * @flags: device flags from enum udp_tunnel_nic_info_flags * @tables: UDP port tables this device has * @tables.n_entries: number of entries in this table * @tables.tunnel_types: types of tunnels this table accepts * * Drivers are expected to provide either @set_port and @unset_port callbacks * or the @sync_table callback. Callbacks are invoked with rtnl lock held. * * Devices which (misguidedly) share the UDP tunnel port table across multiple * netdevs should allocate an instance of struct udp_tunnel_nic_shared and * point @shared at it. * There must never be more than %UDP_TUNNEL_NIC_MAX_SHARING_DEVICES devices * sharing a table. * * Known limitations: * - UDP tunnel port notifications are fundamentally best-effort - * it is likely the driver will both see skbs which use a UDP tunnel port, * while not being a tunneled skb, and tunnel skbs from other ports - * drivers should only use these ports for non-critical RX-side offloads, * e.g. the checksum offload; * - none of the devices care about the socket family at present, so we don't * track it. Please extend this code if you care. */ struct udp_tunnel_nic_info { /* one-by-one */ int (*set_port)(struct net_device *dev, unsigned int table, unsigned int entry, struct udp_tunnel_info *ti); int (*unset_port)(struct net_device *dev, unsigned int table, unsigned int entry, struct udp_tunnel_info *ti); /* all at once */ int (*sync_table)(struct net_device *dev, unsigned int table); struct udp_tunnel_nic_shared *shared; unsigned int flags; struct udp_tunnel_nic_table_info { unsigned int n_entries; unsigned int tunnel_types; } tables[UDP_TUNNEL_NIC_MAX_TABLES]; }; /* UDP tunnel module dependencies * * Tunnel drivers are expected to have a hard dependency on the udp_tunnel * module. NIC drivers are not, they just attach their * struct udp_tunnel_nic_info to the netdev and wait for callbacks to come. * Loading a tunnel driver will cause the udp_tunnel module to be loaded * and only then will all the required state structures be allocated. * Since we want a weak dependency from the drivers and the core to udp_tunnel * we call things through the following stubs. */ struct udp_tunnel_nic_ops { void (*get_port)(struct net_device *dev, unsigned int table, unsigned int idx, struct udp_tunnel_info *ti); void (*set_port_priv)(struct net_device *dev, unsigned int table, unsigned int idx, u8 priv); void (*add_port)(struct net_device *dev, struct udp_tunnel_info *ti); void (*del_port)(struct net_device *dev, struct udp_tunnel_info *ti); void (*reset_ntf)(struct net_device *dev); size_t (*dump_size)(struct net_device *dev, unsigned int table); int (*dump_write)(struct net_device *dev, unsigned int table, struct sk_buff *skb); void (*assert_locked)(struct net_device *dev); void (*lock)(struct net_device *dev); void (*unlock)(struct net_device *dev); }; #ifdef CONFIG_INET extern const struct udp_tunnel_nic_ops *udp_tunnel_nic_ops; #else #define udp_tunnel_nic_ops ((struct udp_tunnel_nic_ops *)NULL) #endif static inline void udp_tunnel_nic_get_port(struct net_device *dev, unsigned int table, unsigned int idx, struct udp_tunnel_info *ti) { /* This helper is used from .sync_table, we indicate empty entries * by zero'ed @ti. Drivers which need to know the details of a port * when it gets deleted should use the .set_port / .unset_port * callbacks. * Zero out here, otherwise !CONFIG_INET causes uninitilized warnings. */ memset(ti, 0, sizeof(*ti)); if (udp_tunnel_nic_ops) udp_tunnel_nic_ops->get_port(dev, table, idx, ti); } static inline void udp_tunnel_nic_set_port_priv(struct net_device *dev, unsigned int table, unsigned int idx, u8 priv) { if (udp_tunnel_nic_ops) { udp_tunnel_nic_ops->assert_locked(dev); udp_tunnel_nic_ops->set_port_priv(dev, table, idx, priv); } } static inline void udp_tunnel_nic_assert_locked(struct net_device *dev) { if (udp_tunnel_nic_ops) udp_tunnel_nic_ops->assert_locked(dev); } static inline void udp_tunnel_nic_lock(struct net_device *dev) { if (udp_tunnel_nic_ops) udp_tunnel_nic_ops->lock(dev); } static inline void udp_tunnel_nic_unlock(struct net_device *dev) { if (udp_tunnel_nic_ops) udp_tunnel_nic_ops->unlock(dev); } static inline void udp_tunnel_nic_add_port(struct net_device *dev, struct udp_tunnel_info *ti) { if (!(dev->features & NETIF_F_RX_UDP_TUNNEL_PORT)) return; if (udp_tunnel_nic_ops) udp_tunnel_nic_ops->add_port(dev, ti); } static inline void udp_tunnel_nic_del_port(struct net_device *dev, struct udp_tunnel_info *ti) { if (!(dev->features & NETIF_F_RX_UDP_TUNNEL_PORT)) return; if (udp_tunnel_nic_ops) udp_tunnel_nic_ops->del_port(dev, ti); } /** * udp_tunnel_nic_reset_ntf() - device-originating reset notification * @dev: network interface device structure * * Called by the driver to inform the core that the entire UDP tunnel port * state has been lost, usually due to device reset. Core will assume device * forgot all the ports and issue .set_port and .sync_table callbacks as * necessary. * * This function must be called with rtnl lock held, and will issue all * the callbacks before returning. */ static inline void udp_tunnel_nic_reset_ntf(struct net_device *dev) { if (udp_tunnel_nic_ops) udp_tunnel_nic_ops->reset_ntf(dev); } static inline size_t udp_tunnel_nic_dump_size(struct net_device *dev, unsigned int table) { size_t ret; if (!udp_tunnel_nic_ops) return 0; udp_tunnel_nic_ops->lock(dev); ret = udp_tunnel_nic_ops->dump_size(dev, table); udp_tunnel_nic_ops->unlock(dev); return ret; } static inline int udp_tunnel_nic_dump_write(struct net_device *dev, unsigned int table, struct sk_buff *skb) { int ret; if (!udp_tunnel_nic_ops) return 0; udp_tunnel_nic_ops->lock(dev); ret = udp_tunnel_nic_ops->dump_write(dev, table, skb); udp_tunnel_nic_ops->unlock(dev); return ret; } static inline void udp_tunnel_get_rx_info(struct net_device *dev) { ASSERT_RTNL(); if (!(dev->features & NETIF_F_RX_UDP_TUNNEL_PORT)) return; udp_tunnel_nic_assert_locked(dev); call_netdevice_notifiers(NETDEV_UDP_TUNNEL_PUSH_INFO, dev); } static inline void udp_tunnel_drop_rx_info(struct net_device *dev) { ASSERT_RTNL(); if (!(dev->features & NETIF_F_RX_UDP_TUNNEL_PORT)) return; udp_tunnel_nic_assert_locked(dev); call_netdevice_notifiers(NETDEV_UDP_TUNNEL_DROP_INFO, dev); } #endif |
| 3 2 2 2 3 3 1 2 2 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 | // SPDX-License-Identifier: GPL-2.0-or-later /* * Virtual NCI device simulation driver * * Copyright (C) 2020 Samsung Electronics * Bongsu Jeon <bongsu.jeon@samsung.com> */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/miscdevice.h> #include <linux/mutex.h> #include <linux/wait.h> #include <net/nfc/nci_core.h> #define IOCTL_GET_NCIDEV_IDX 0 #define VIRTUAL_NFC_PROTOCOLS (NFC_PROTO_JEWEL_MASK | \ NFC_PROTO_MIFARE_MASK | \ NFC_PROTO_FELICA_MASK | \ NFC_PROTO_ISO14443_MASK | \ NFC_PROTO_ISO14443_B_MASK | \ NFC_PROTO_ISO15693_MASK) struct virtual_nci_dev { struct nci_dev *ndev; struct mutex mtx; struct sk_buff *send_buff; struct wait_queue_head wq; bool running; }; static int virtual_nci_open(struct nci_dev *ndev) { struct virtual_nci_dev *vdev = nci_get_drvdata(ndev); vdev->running = true; return 0; } static int virtual_nci_close(struct nci_dev *ndev) { struct virtual_nci_dev *vdev = nci_get_drvdata(ndev); mutex_lock(&vdev->mtx); kfree_skb(vdev->send_buff); vdev->send_buff = NULL; vdev->running = false; mutex_unlock(&vdev->mtx); return 0; } static int virtual_nci_send(struct nci_dev *ndev, struct sk_buff *skb) { struct virtual_nci_dev *vdev = nci_get_drvdata(ndev); mutex_lock(&vdev->mtx); if (vdev->send_buff || !vdev->running) { mutex_unlock(&vdev->mtx); kfree_skb(skb); return -1; } vdev->send_buff = skb_copy(skb, GFP_KERNEL); if (!vdev->send_buff) { mutex_unlock(&vdev->mtx); kfree_skb(skb); return -1; } mutex_unlock(&vdev->mtx); wake_up_interruptible(&vdev->wq); consume_skb(skb); return 0; } static const struct nci_ops virtual_nci_ops = { .open = virtual_nci_open, .close = virtual_nci_close, .send = virtual_nci_send }; static ssize_t virtual_ncidev_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { struct virtual_nci_dev *vdev = file->private_data; size_t actual_len; mutex_lock(&vdev->mtx); while (!vdev->send_buff) { mutex_unlock(&vdev->mtx); if (wait_event_interruptible(vdev->wq, vdev->send_buff)) return -EFAULT; mutex_lock(&vdev->mtx); } actual_len = min_t(size_t, count, vdev->send_buff->len); if (copy_to_user(buf, vdev->send_buff->data, actual_len)) { mutex_unlock(&vdev->mtx); return -EFAULT; } skb_pull(vdev->send_buff, actual_len); if (vdev->send_buff->len == 0) { consume_skb(vdev->send_buff); vdev->send_buff = NULL; } mutex_unlock(&vdev->mtx); return actual_len; } static ssize_t virtual_ncidev_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { struct virtual_nci_dev *vdev = file->private_data; struct sk_buff *skb; skb = alloc_skb(count, GFP_KERNEL); if (!skb) return -ENOMEM; if (copy_from_user(skb_put(skb, count), buf, count)) { kfree_skb(skb); return -EFAULT; } if (strnlen(skb->data, count) != count) { kfree_skb(skb); return -EINVAL; } nci_recv_frame(vdev->ndev, skb); return count; } static int virtual_ncidev_open(struct inode *inode, struct file *file) { int ret = 0; struct virtual_nci_dev *vdev; vdev = kzalloc(sizeof(*vdev), GFP_KERNEL); if (!vdev) return -ENOMEM; vdev->ndev = nci_allocate_device(&virtual_nci_ops, VIRTUAL_NFC_PROTOCOLS, 0, 0); if (!vdev->ndev) { kfree(vdev); return -ENOMEM; } mutex_init(&vdev->mtx); init_waitqueue_head(&vdev->wq); file->private_data = vdev; nci_set_drvdata(vdev->ndev, vdev); ret = nci_register_device(vdev->ndev); if (ret < 0) { nci_free_device(vdev->ndev); mutex_destroy(&vdev->mtx); kfree(vdev); return ret; } return 0; } static int virtual_ncidev_close(struct inode *inode, struct file *file) { struct virtual_nci_dev *vdev = file->private_data; nci_unregister_device(vdev->ndev); nci_free_device(vdev->ndev); mutex_destroy(&vdev->mtx); kfree(vdev); return 0; } static long virtual_ncidev_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct virtual_nci_dev *vdev = file->private_data; const struct nfc_dev *nfc_dev = vdev->ndev->nfc_dev; void __user *p = (void __user *)arg; if (cmd != IOCTL_GET_NCIDEV_IDX) return -ENOTTY; if (copy_to_user(p, &nfc_dev->idx, sizeof(nfc_dev->idx))) return -EFAULT; return 0; } static const struct file_operations virtual_ncidev_fops = { .owner = THIS_MODULE, .read = virtual_ncidev_read, .write = virtual_ncidev_write, .open = virtual_ncidev_open, .release = virtual_ncidev_close, .unlocked_ioctl = virtual_ncidev_ioctl }; static struct miscdevice miscdev = { .minor = MISC_DYNAMIC_MINOR, .name = "virtual_nci", .fops = &virtual_ncidev_fops, .mode = 0600, }; module_misc_device(miscdev); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Virtual NCI device simulation driver"); MODULE_AUTHOR("Bongsu Jeon <bongsu.jeon@samsung.com>"); |
| 3 2 3 3 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 | /* * linux/drivers/video/console/softcursor.c * * Generic software cursor for frame buffer devices * * Created 14 Nov 2002 by James Simmons * * This file is subject to the terms and conditions of the GNU General * Public License. See the file COPYING in the main directory of this * archive for more details. */ #include <linux/module.h> #include <linux/string.h> #include <linux/fb.h> #include <linux/slab.h> #include <asm/io.h> #include "fbcon.h" int soft_cursor(struct fb_info *info, struct fb_cursor *cursor) { struct fbcon_ops *ops = info->fbcon_par; unsigned int scan_align = info->pixmap.scan_align - 1; unsigned int buf_align = info->pixmap.buf_align - 1; unsigned int i, size, dsize, s_pitch, d_pitch; struct fb_image *image; u8 *src, *dst; if (info->state != FBINFO_STATE_RUNNING) return 0; s_pitch = (cursor->image.width + 7) >> 3; dsize = s_pitch * cursor->image.height; if (dsize + sizeof(struct fb_image) != ops->cursor_size) { kfree(ops->cursor_src); ops->cursor_size = dsize + sizeof(struct fb_image); ops->cursor_src = kmalloc(ops->cursor_size, GFP_ATOMIC); if (!ops->cursor_src) { ops->cursor_size = 0; return -ENOMEM; } } src = ops->cursor_src + sizeof(struct fb_image); image = (struct fb_image *)ops->cursor_src; *image = cursor->image; d_pitch = (s_pitch + scan_align) & ~scan_align; size = d_pitch * image->height + buf_align; size &= ~buf_align; dst = fb_get_buffer_offset(info, &info->pixmap, size); if (cursor->enable) { switch (cursor->rop) { case ROP_XOR: for (i = 0; i < dsize; i++) src[i] = image->data[i] ^ cursor->mask[i]; break; case ROP_COPY: default: for (i = 0; i < dsize; i++) src[i] = image->data[i] & cursor->mask[i]; break; } } else memcpy(src, image->data, dsize); fb_pad_aligned_buffer(dst, d_pitch, src, s_pitch, image->height); image->data = dst; info->fbops->fb_imageblit(info, image); return 0; } |
| 314 314 314 314 313 314 8 11 12 314 314 15 3 12 3 12 15 15 15 12 12 11 11 11 11 4 4 4 4 4 4 305 306 306 11 11 8 313 314 314 314 314 12 1 12 12 12 313 1 315 315 1 315 314 1 315 315 314 315 313 2 2 38 7 38 13 12 11 2 28 28 2 2 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 | // SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2007 Oracle. All rights reserved. */ #include <linux/sched.h> #include <linux/sched/mm.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/completion.h> #include <linux/bug.h> #include <linux/list.h> #include <crypto/hash.h> #include "messages.h" #include "ctree.h" #include "discard.h" #include "disk-io.h" #include "send.h" #include "transaction.h" #include "sysfs.h" #include "volumes.h" #include "space-info.h" #include "block-group.h" #include "qgroup.h" #include "misc.h" #include "fs.h" #include "accessors.h" /* * Structure name Path * -------------------------------------------------------------------------- * btrfs_supported_static_feature_attrs /sys/fs/btrfs/features * btrfs_supported_feature_attrs /sys/fs/btrfs/features and * /sys/fs/btrfs/<uuid>/features * btrfs_attrs /sys/fs/btrfs/<uuid> * devid_attrs /sys/fs/btrfs/<uuid>/devinfo/<devid> * allocation_attrs /sys/fs/btrfs/<uuid>/allocation * qgroup_attrs /sys/fs/btrfs/<uuid>/qgroups/<level>_<qgroupid> * space_info_attrs /sys/fs/btrfs/<uuid>/allocation/<bg-type> * raid_attrs /sys/fs/btrfs/<uuid>/allocation/<bg-type>/<bg-profile> * discard_attrs /sys/fs/btrfs/<uuid>/discard * * When built with BTRFS_CONFIG_DEBUG: * * btrfs_debug_feature_attrs /sys/fs/btrfs/debug * btrfs_debug_mount_attrs /sys/fs/btrfs/<uuid>/debug */ struct btrfs_feature_attr { struct kobj_attribute kobj_attr; enum btrfs_feature_set feature_set; u64 feature_bit; }; /* For raid type sysfs entries */ struct raid_kobject { u64 flags; struct kobject kobj; }; #define __INIT_KOBJ_ATTR(_name, _mode, _show, _store) \ { \ .attr = { .name = __stringify(_name), .mode = _mode }, \ .show = _show, \ .store = _store, \ } #define BTRFS_ATTR_W(_prefix, _name, _store) \ static struct kobj_attribute btrfs_attr_##_prefix##_##_name = \ __INIT_KOBJ_ATTR(_name, 0200, NULL, _store) #define BTRFS_ATTR_RW(_prefix, _name, _show, _store) \ static struct kobj_attribute btrfs_attr_##_prefix##_##_name = \ __INIT_KOBJ_ATTR(_name, 0644, _show, _store) #define BTRFS_ATTR(_prefix, _name, _show) \ static struct kobj_attribute btrfs_attr_##_prefix##_##_name = \ __INIT_KOBJ_ATTR(_name, 0444, _show, NULL) #define BTRFS_ATTR_PTR(_prefix, _name) \ (&btrfs_attr_##_prefix##_##_name.attr) #define BTRFS_FEAT_ATTR(_name, _feature_set, _feature_prefix, _feature_bit) \ static struct btrfs_feature_attr btrfs_attr_features_##_name = { \ .kobj_attr = __INIT_KOBJ_ATTR(_name, S_IRUGO, \ btrfs_feature_attr_show, \ btrfs_feature_attr_store), \ .feature_set = _feature_set, \ .feature_bit = _feature_prefix ##_## _feature_bit, \ } #define BTRFS_FEAT_ATTR_PTR(_name) \ (&btrfs_attr_features_##_name.kobj_attr.attr) #define BTRFS_FEAT_ATTR_COMPAT(name, feature) \ BTRFS_FEAT_ATTR(name, FEAT_COMPAT, BTRFS_FEATURE_COMPAT, feature) #define BTRFS_FEAT_ATTR_COMPAT_RO(name, feature) \ BTRFS_FEAT_ATTR(name, FEAT_COMPAT_RO, BTRFS_FEATURE_COMPAT_RO, feature) #define BTRFS_FEAT_ATTR_INCOMPAT(name, feature) \ BTRFS_FEAT_ATTR(name, FEAT_INCOMPAT, BTRFS_FEATURE_INCOMPAT, feature) static inline struct btrfs_fs_info *to_fs_info(struct kobject *kobj); static inline struct btrfs_fs_devices *to_fs_devs(struct kobject *kobj); static struct kobject *get_btrfs_kobj(struct kobject *kobj); static struct btrfs_feature_attr *to_btrfs_feature_attr(struct kobj_attribute *a) { return container_of(a, struct btrfs_feature_attr, kobj_attr); } static struct kobj_attribute *attr_to_btrfs_attr(struct attribute *attr) { return container_of(attr, struct kobj_attribute, attr); } static struct btrfs_feature_attr *attr_to_btrfs_feature_attr( struct attribute *attr) { return to_btrfs_feature_attr(attr_to_btrfs_attr(attr)); } static u64 get_features(struct btrfs_fs_info *fs_info, enum btrfs_feature_set set) { struct btrfs_super_block *disk_super = fs_info->super_copy; if (set == FEAT_COMPAT) return btrfs_super_compat_flags(disk_super); else if (set == FEAT_COMPAT_RO) return btrfs_super_compat_ro_flags(disk_super); else return btrfs_super_incompat_flags(disk_super); } static void set_features(struct btrfs_fs_info *fs_info, enum btrfs_feature_set set, u64 features) { struct btrfs_super_block *disk_super = fs_info->super_copy; if (set == FEAT_COMPAT) btrfs_set_super_compat_flags(disk_super, features); else if (set == FEAT_COMPAT_RO) btrfs_set_super_compat_ro_flags(disk_super, features); else btrfs_set_super_incompat_flags(disk_super, features); } static int can_modify_feature(struct btrfs_feature_attr *fa) { int val = 0; u64 set, clear; switch (fa->feature_set) { case FEAT_COMPAT: set = BTRFS_FEATURE_COMPAT_SAFE_SET; clear = BTRFS_FEATURE_COMPAT_SAFE_CLEAR; break; case FEAT_COMPAT_RO: set = BTRFS_FEATURE_COMPAT_RO_SAFE_SET; clear = BTRFS_FEATURE_COMPAT_RO_SAFE_CLEAR; break; case FEAT_INCOMPAT: set = BTRFS_FEATURE_INCOMPAT_SAFE_SET; clear = BTRFS_FEATURE_INCOMPAT_SAFE_CLEAR; break; default: btrfs_warn(NULL, "sysfs: unknown feature set %d", fa->feature_set); return 0; } if (set & fa->feature_bit) val |= 1; if (clear & fa->feature_bit) val |= 2; return val; } static ssize_t btrfs_feature_attr_show(struct kobject *kobj, struct kobj_attribute *a, char *buf) { int val = 0; struct btrfs_fs_info *fs_info = to_fs_info(kobj); struct btrfs_feature_attr *fa = to_btrfs_feature_attr(a); if (fs_info) { u64 features = get_features(fs_info, fa->feature_set); if (features & fa->feature_bit) val = 1; } else val = can_modify_feature(fa); return sysfs_emit(buf, "%d\n", val); } static ssize_t btrfs_feature_attr_store(struct kobject *kobj, struct kobj_attribute *a, const char *buf, size_t count) { struct btrfs_fs_info *fs_info; struct btrfs_feature_attr *fa = to_btrfs_feature_attr(a); u64 features, set, clear; unsigned long val; int ret; fs_info = to_fs_info(kobj); if (!fs_info) return -EPERM; if (sb_rdonly(fs_info->sb)) return -EROFS; ret = kstrtoul(skip_spaces(buf), 0, &val); if (ret) return ret; if (fa->feature_set == FEAT_COMPAT) { set = BTRFS_FEATURE_COMPAT_SAFE_SET; clear = BTRFS_FEATURE_COMPAT_SAFE_CLEAR; } else if (fa->feature_set == FEAT_COMPAT_RO) { set = BTRFS_FEATURE_COMPAT_RO_SAFE_SET; clear = BTRFS_FEATURE_COMPAT_RO_SAFE_CLEAR; } else { set = BTRFS_FEATURE_INCOMPAT_SAFE_SET; clear = BTRFS_FEATURE_INCOMPAT_SAFE_CLEAR; } features = get_features(fs_info, fa->feature_set); /* Nothing to do */ if ((val && (features & fa->feature_bit)) || (!val && !(features & fa->feature_bit))) return count; if ((val && !(set & fa->feature_bit)) || (!val && !(clear & fa->feature_bit))) { btrfs_info(fs_info, "%sabling feature %s on mounted fs is not supported.", val ? "En" : "Dis", fa->kobj_attr.attr.name); return -EPERM; } btrfs_info(fs_info, "%s %s feature flag", val ? "Setting" : "Clearing", fa->kobj_attr.attr.name); spin_lock(&fs_info->super_lock); features = get_features(fs_info, fa->feature_set); if (val) features |= fa->feature_bit; else features &= ~fa->feature_bit; set_features(fs_info, fa->feature_set, features); spin_unlock(&fs_info->super_lock); /* * We don't want to do full transaction commit from inside sysfs */ set_bit(BTRFS_FS_NEED_TRANS_COMMIT, &fs_info->flags); wake_up_process(fs_info->transaction_kthread); return count; } static umode_t btrfs_feature_visible(struct kobject *kobj, struct attribute *attr, int unused) { struct btrfs_fs_info *fs_info = to_fs_info(kobj); umode_t mode = attr->mode; if (fs_info) { struct btrfs_feature_attr *fa; u64 features; fa = attr_to_btrfs_feature_attr(attr); features = get_features(fs_info, fa->feature_set); if (can_modify_feature(fa)) mode |= S_IWUSR; else if (!(features & fa->feature_bit)) mode = 0; } return mode; } BTRFS_FEAT_ATTR_INCOMPAT(default_subvol, DEFAULT_SUBVOL); BTRFS_FEAT_ATTR_INCOMPAT(mixed_groups, MIXED_GROUPS); BTRFS_FEAT_ATTR_INCOMPAT(compress_lzo, COMPRESS_LZO); BTRFS_FEAT_ATTR_INCOMPAT(compress_zstd, COMPRESS_ZSTD); BTRFS_FEAT_ATTR_INCOMPAT(extended_iref, EXTENDED_IREF); BTRFS_FEAT_ATTR_INCOMPAT(raid56, RAID56); BTRFS_FEAT_ATTR_INCOMPAT(skinny_metadata, SKINNY_METADATA); BTRFS_FEAT_ATTR_INCOMPAT(no_holes, NO_HOLES); BTRFS_FEAT_ATTR_INCOMPAT(metadata_uuid, METADATA_UUID); BTRFS_FEAT_ATTR_COMPAT_RO(free_space_tree, FREE_SPACE_TREE); BTRFS_FEAT_ATTR_COMPAT_RO(block_group_tree, BLOCK_GROUP_TREE); BTRFS_FEAT_ATTR_INCOMPAT(raid1c34, RAID1C34); BTRFS_FEAT_ATTR_INCOMPAT(simple_quota, SIMPLE_QUOTA); #ifdef CONFIG_BLK_DEV_ZONED BTRFS_FEAT_ATTR_INCOMPAT(zoned, ZONED); #endif #ifdef CONFIG_BTRFS_EXPERIMENTAL /* Remove once support for extent tree v2 is feature complete */ BTRFS_FEAT_ATTR_INCOMPAT(extent_tree_v2, EXTENT_TREE_V2); /* Remove once support for raid stripe tree is feature complete. */ BTRFS_FEAT_ATTR_INCOMPAT(raid_stripe_tree, RAID_STRIPE_TREE); #endif #ifdef CONFIG_FS_VERITY BTRFS_FEAT_ATTR_COMPAT_RO(verity, VERITY); #endif /* * Features which depend on feature bits and may differ between each fs. * * /sys/fs/btrfs/features - all available features implemented by this version * /sys/fs/btrfs/UUID/features - features of the fs which are enabled or * can be changed on a mounted filesystem. */ static struct attribute *btrfs_supported_feature_attrs[] = { BTRFS_FEAT_ATTR_PTR(default_subvol), BTRFS_FEAT_ATTR_PTR(mixed_groups), BTRFS_FEAT_ATTR_PTR(compress_lzo), BTRFS_FEAT_ATTR_PTR(compress_zstd), BTRFS_FEAT_ATTR_PTR(extended_iref), BTRFS_FEAT_ATTR_PTR(raid56), BTRFS_FEAT_ATTR_PTR(skinny_metadata), BTRFS_FEAT_ATTR_PTR(no_holes), BTRFS_FEAT_ATTR_PTR(metadata_uuid), BTRFS_FEAT_ATTR_PTR(free_space_tree), BTRFS_FEAT_ATTR_PTR(raid1c34), BTRFS_FEAT_ATTR_PTR(block_group_tree), BTRFS_FEAT_ATTR_PTR(simple_quota), #ifdef CONFIG_BLK_DEV_ZONED BTRFS_FEAT_ATTR_PTR(zoned), #endif #ifdef CONFIG_BTRFS_EXPERIMENTAL BTRFS_FEAT_ATTR_PTR(extent_tree_v2), BTRFS_FEAT_ATTR_PTR(raid_stripe_tree), #endif #ifdef CONFIG_FS_VERITY BTRFS_FEAT_ATTR_PTR(verity), #endif NULL }; static const struct attribute_group btrfs_feature_attr_group = { .name = "features", .is_visible = btrfs_feature_visible, .attrs = btrfs_supported_feature_attrs, }; static ssize_t rmdir_subvol_show(struct kobject *kobj, struct kobj_attribute *ka, char *buf) { return sysfs_emit(buf, "0\n"); } BTRFS_ATTR(static_feature, rmdir_subvol, rmdir_subvol_show); static ssize_t supported_checksums_show(struct kobject *kobj, struct kobj_attribute *a, char *buf) { ssize_t ret = 0; int i; for (i = 0; i < btrfs_get_num_csums(); i++) { /* * This "trick" only works as long as 'enum btrfs_csum_type' has * no holes in it */ ret += sysfs_emit_at(buf, ret, "%s%s", (i == 0 ? "" : " "), btrfs_super_csum_name(i)); } ret += sysfs_emit_at(buf, ret, "\n"); return ret; } BTRFS_ATTR(static_feature, supported_checksums, supported_checksums_show); static ssize_t send_stream_version_show(struct kobject *kobj, struct kobj_attribute *ka, char *buf) { return sysfs_emit(buf, "%d\n", BTRFS_SEND_STREAM_VERSION); } BTRFS_ATTR(static_feature, send_stream_version, send_stream_version_show); static const char *rescue_opts[] = { "usebackuproot", "nologreplay", "ignorebadroots", "ignoredatacsums", "ignoremetacsums", "ignoresuperflags", "all", }; static ssize_t supported_rescue_options_show(struct kobject *kobj, struct kobj_attribute *a, char *buf) { ssize_t ret = 0; int i; for (i = 0; i < ARRAY_SIZE(rescue_opts); i++) ret += sysfs_emit_at(buf, ret, "%s%s", (i ? " " : ""), rescue_opts[i]); ret += sysfs_emit_at(buf, ret, "\n"); return ret; } BTRFS_ATTR(static_feature, supported_rescue_options, supported_rescue_options_show); static ssize_t supported_sectorsizes_show(struct kobject *kobj, struct kobj_attribute *a, char *buf) { ssize_t ret = 0; if (BTRFS_MIN_BLOCKSIZE != SZ_4K && BTRFS_MIN_BLOCKSIZE != PAGE_SIZE) ret += sysfs_emit_at(buf, ret, "%u ", BTRFS_MIN_BLOCKSIZE); if (PAGE_SIZE > SZ_4K) ret += sysfs_emit_at(buf, ret, "%u ", SZ_4K); ret += sysfs_emit_at(buf, ret, "%lu\n", PAGE_SIZE); return ret; } BTRFS_ATTR(static_feature, supported_sectorsizes, supported_sectorsizes_show); static ssize_t acl_show(struct kobject *kobj, struct kobj_attribute *a, char *buf) { return sysfs_emit(buf, "%d\n", IS_ENABLED(CONFIG_BTRFS_FS_POSIX_ACL)); } BTRFS_ATTR(static_feature, acl, acl_show); static ssize_t temp_fsid_supported_show(struct kobject *kobj, struct kobj_attribute *a, char *buf) { return sysfs_emit(buf, "0\n"); } BTRFS_ATTR(static_feature, temp_fsid, temp_fsid_supported_show); /* * Features which only depend on kernel version. * * These are listed in /sys/fs/btrfs/features along with * btrfs_supported_feature_attrs. */ static struct attribute *btrfs_supported_static_feature_attrs[] = { BTRFS_ATTR_PTR(static_feature, acl), BTRFS_ATTR_PTR(static_feature, rmdir_subvol), BTRFS_ATTR_PTR(static_feature, supported_checksums), BTRFS_ATTR_PTR(static_feature, send_stream_version), BTRFS_ATTR_PTR(static_feature, supported_rescue_options), BTRFS_ATTR_PTR(static_feature, supported_sectorsizes), BTRFS_ATTR_PTR(static_feature, temp_fsid), NULL }; static const struct attribute_group btrfs_static_feature_attr_group = { .name = "features", .attrs = btrfs_supported_static_feature_attrs, }; /* * Discard statistics and tunables */ #define discard_to_fs_info(_kobj) to_fs_info(get_btrfs_kobj(_kobj)) static ssize_t btrfs_discardable_bytes_show(struct kobject *kobj, struct kobj_attribute *a, char *buf) { struct btrfs_fs_info *fs_info = discard_to_fs_info(kobj); return sysfs_emit(buf, "%lld\n", atomic64_read(&fs_info->discard_ctl.discardable_bytes)); } BTRFS_ATTR(discard, discardable_bytes, btrfs_discardable_bytes_show); static ssize_t btrfs_discardable_extents_show(struct kobject *kobj, struct kobj_attribute *a, char *buf) { struct btrfs_fs_info *fs_info = discard_to_fs_info(kobj); return sysfs_emit(buf, "%d\n", atomic_read(&fs_info->discard_ctl.discardable_extents)); } BTRFS_ATTR(discard, discardable_extents, btrfs_discardable_extents_show); static ssize_t btrfs_discard_bitmap_bytes_show(struct kobject *kobj, struct kobj_attribute *a, char *buf) { struct btrfs_fs_info *fs_info = discard_to_fs_info(kobj); return sysfs_emit(buf, "%llu\n", fs_info->discard_ctl.discard_bitmap_bytes); } BTRFS_ATTR(discard, discard_bitmap_bytes, btrfs_discard_bitmap_bytes_show); static ssize_t btrfs_discard_bytes_saved_show(struct kobject *kobj, struct kobj_attribute *a, char *buf) { struct btrfs_fs_info *fs_info = discard_to_fs_info(kobj); return sysfs_emit(buf, "%lld\n", atomic64_read(&fs_info->discard_ctl.discard_bytes_saved)); } BTRFS_ATTR(discard, discard_bytes_saved, btrfs_discard_bytes_saved_show); static ssize_t btrfs_discard_extent_bytes_show(struct kobject *kobj, struct kobj_attribute *a, char *buf) { struct btrfs_fs_info *fs_info = discard_to_fs_info(kobj); return sysfs_emit(buf, "%llu\n", fs_info->discard_ctl.discard_extent_bytes); } BTRFS_ATTR(discard, discard_extent_bytes, btrfs_discard_extent_bytes_show); static ssize_t btrfs_discard_iops_limit_show(struct kobject *kobj, struct kobj_attribute *a, char *buf) { struct btrfs_fs_info *fs_info = discard_to_fs_info(kobj); return sysfs_emit(buf, "%u\n", READ_ONCE(fs_info->discard_ctl.iops_limit)); } static ssize_t btrfs_discard_iops_limit_store(struct kobject *kobj, struct kobj_attribute *a, const char *buf, size_t len) { struct btrfs_fs_info *fs_info = discard_to_fs_info(kobj); struct btrfs_discard_ctl *discard_ctl = &fs_info->discard_ctl; u32 iops_limit; int ret; ret = kstrtou32(buf, 10, &iops_limit); if (ret) return -EINVAL; WRITE_ONCE(discard_ctl->iops_limit, iops_limit); btrfs_discard_calc_delay(discard_ctl); btrfs_discard_schedule_work(discard_ctl, true); return len; } BTRFS_ATTR_RW(discard, iops_limit, btrfs_discard_iops_limit_show, btrfs_discard_iops_limit_store); static ssize_t btrfs_discard_kbps_limit_show(struct kobject *kobj, struct kobj_attribute *a, char *buf) { struct btrfs_fs_info *fs_info = discard_to_fs_info(kobj); return sysfs_emit(buf, "%u\n", READ_ONCE(fs_info->discard_ctl.kbps_limit)); } static ssize_t btrfs_discard_kbps_limit_store(struct kobject *kobj, struct kobj_attribute *a, const char *buf, size_t len) { struct btrfs_fs_info *fs_info = discard_to_fs_info(kobj); struct btrfs_discard_ctl *discard_ctl = &fs_info->discard_ctl; u32 kbps_limit; int ret; ret = kstrtou32(buf, 10, &kbps_limit); if (ret) return -EINVAL; WRITE_ONCE(discard_ctl->kbps_limit, kbps_limit); btrfs_discard_schedule_work(discard_ctl, true); return len; } BTRFS_ATTR_RW(discard, kbps_limit, btrfs_discard_kbps_limit_show, btrfs_discard_kbps_limit_store); static ssize_t btrfs_discard_max_discard_size_show(struct kobject *kobj, struct kobj_attribute *a, char *buf) { struct btrfs_fs_info *fs_info = discard_to_fs_info(kobj); return sysfs_emit(buf, "%llu\n", READ_ONCE(fs_info->discard_ctl.max_discard_size)); } static ssize_t btrfs_discard_max_discard_size_store(struct kobject *kobj, struct kobj_attribute *a, const char *buf, size_t len) { struct btrfs_fs_info *fs_info = discard_to_fs_info(kobj); struct btrfs_discard_ctl *discard_ctl = &fs_info->discard_ctl; u64 max_discard_size; int ret; ret = kstrtou64(buf, 10, &max_discard_size); if (ret) return -EINVAL; WRITE_ONCE(discard_ctl->max_discard_size, max_discard_size); return len; } BTRFS_ATTR_RW(discard, max_discard_size, btrfs_discard_max_discard_size_show, btrfs_discard_max_discard_size_store); /* * Per-filesystem stats for discard (when mounted with discard=async). * * Path: /sys/fs/btrfs/<uuid>/discard/ */ static const struct attribute *discard_attrs[] = { BTRFS_ATTR_PTR(discard, discardable_bytes), BTRFS_ATTR_PTR(discard, discardable_extents), BTRFS_ATTR_PTR(discard, discard_bitmap_bytes), BTRFS_ATTR_PTR(discard, discard_bytes_saved), BTRFS_ATTR_PTR(discard, discard_extent_bytes), BTRFS_ATTR_PTR(discard, iops_limit), BTRFS_ATTR_PTR(discard, kbps_limit), BTRFS_ATTR_PTR(discard, max_discard_size), NULL, }; #ifdef CONFIG_BTRFS_DEBUG /* * Per-filesystem runtime debugging exported via sysfs. * * Path: /sys/fs/btrfs/UUID/debug/ */ static const struct attribute *btrfs_debug_mount_attrs[] = { NULL, }; /* * Runtime debugging exported via sysfs, applies to all mounted filesystems. * * Path: /sys/fs/btrfs/debug */ static struct attribute *btrfs_debug_feature_attrs[] = { NULL }; static const struct attribute_group btrfs_debug_feature_attr_group = { .name = "debug", .attrs = btrfs_debug_feature_attrs, }; #endif static ssize_t btrfs_show_u64(u64 *value_ptr, spinlock_t *lock, char *buf) { u64 val; if (lock) spin_lock(lock); val = *value_ptr; if (lock) spin_unlock(lock); return sysfs_emit(buf, "%llu\n", val); } static ssize_t global_rsv_size_show(struct kobject *kobj, struct kobj_attribute *ka, char *buf) { struct btrfs_fs_info *fs_info = to_fs_info(kobj->parent); struct btrfs_block_rsv *block_rsv = &fs_info->global_block_rsv; return btrfs_show_u64(&block_rsv->size, &block_rsv->lock, buf); } BTRFS_ATTR(allocation, global_rsv_size, global_rsv_size_show); static ssize_t global_rsv_reserved_show(struct kobject *kobj, struct kobj_attribute *a, char *buf) { struct btrfs_fs_info *fs_info = to_fs_info(kobj->parent); struct btrfs_block_rsv *block_rsv = &fs_info->global_block_rsv; return btrfs_show_u64(&block_rsv->reserved, &block_rsv->lock, buf); } BTRFS_ATTR(allocation, global_rsv_reserved, global_rsv_reserved_show); #define to_space_info(_kobj) container_of(_kobj, struct btrfs_space_info, kobj) #define to_raid_kobj(_kobj) container_of(_kobj, struct raid_kobject, kobj) static ssize_t raid_bytes_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf); BTRFS_ATTR(raid, total_bytes, raid_bytes_show); BTRFS_ATTR(raid, used_bytes, raid_bytes_show); static ssize_t raid_bytes_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { struct btrfs_space_info *sinfo = to_space_info(kobj->parent); struct btrfs_block_group *block_group; int index = btrfs_bg_flags_to_raid_index(to_raid_kobj(kobj)->flags); u64 val = 0; down_read(&sinfo->groups_sem); list_for_each_entry(block_group, &sinfo->block_groups[index], list) { if (&attr->attr == BTRFS_ATTR_PTR(raid, total_bytes)) val += block_group->length; else val += block_group->used; } up_read(&sinfo->groups_sem); return sysfs_emit(buf, "%llu\n", val); } /* * Allocation information about block group profiles. * * Path: /sys/fs/btrfs/<uuid>/allocation/<bg-type>/<bg-profile>/ */ static struct attribute *raid_attrs[] = { BTRFS_ATTR_PTR(raid, total_bytes), BTRFS_ATTR_PTR(raid, used_bytes), NULL }; ATTRIBUTE_GROUPS(raid); static void release_raid_kobj(struct kobject *kobj) { kfree(to_raid_kobj(kobj)); } static const struct kobj_type btrfs_raid_ktype = { .sysfs_ops = &kobj_sysfs_ops, .release = release_raid_kobj, .default_groups = raid_groups, }; #define SPACE_INFO_ATTR(field) \ static ssize_t btrfs_space_info_show_##field(struct kobject *kobj, \ struct kobj_attribute *a, \ char *buf) \ { \ struct btrfs_space_info *sinfo = to_space_info(kobj); \ return btrfs_show_u64(&sinfo->field, &sinfo->lock, buf); \ } \ BTRFS_ATTR(space_info, field, btrfs_space_info_show_##field) static ssize_t btrfs_chunk_size_show(struct kobject *kobj, struct kobj_attribute *a, char *buf) { struct btrfs_space_info *sinfo = to_space_info(kobj); return sysfs_emit(buf, "%llu\n", READ_ONCE(sinfo->chunk_size)); } /* * Store new chunk size in space info. Can be called on a read-only filesystem. * * If the new chunk size value is larger than 10% of free space it is reduced * to match that limit. Alignment must be to 256M and the system chunk size * cannot be set. */ static ssize_t btrfs_chunk_size_store(struct kobject *kobj, struct kobj_attribute *a, const char *buf, size_t len) { struct btrfs_space_info *space_info = to_space_info(kobj); struct btrfs_fs_info *fs_info = to_fs_info(get_btrfs_kobj(kobj)); char *retptr; u64 val; if (!capable(CAP_SYS_ADMIN)) return -EPERM; if (!fs_info->fs_devices) return -EINVAL; if (btrfs_is_zoned(fs_info)) return -EINVAL; /* System block type must not be changed. */ if (space_info->flags & BTRFS_BLOCK_GROUP_SYSTEM) return -EPERM; val = memparse(buf, &retptr); /* There could be trailing '\n', also catch any typos after the value */ retptr = skip_spaces(retptr); if (*retptr != 0 || val == 0) return -EINVAL; val = min(val, BTRFS_MAX_DATA_CHUNK_SIZE); /* Limit stripe size to 10% of available space. */ val = min(mult_perc(fs_info->fs_devices->total_rw_bytes, 10), val); /* Must be multiple of 256M. */ val &= ~((u64)SZ_256M - 1); /* Must be at least 256M. */ if (val < SZ_256M) return -EINVAL; btrfs_update_space_info_chunk_size(space_info, val); return len; } static ssize_t btrfs_size_classes_show(struct kobject *kobj, struct kobj_attribute *a, char *buf) { struct btrfs_space_info *sinfo = to_space_info(kobj); struct btrfs_block_group *bg; u32 none = 0; u32 small = 0; u32 medium = 0; u32 large = 0; for (int i = 0; i < BTRFS_NR_RAID_TYPES; ++i) { down_read(&sinfo->groups_sem); list_for_each_entry(bg, &sinfo->block_groups[i], list) { if (!btrfs_block_group_should_use_size_class(bg)) continue; switch (bg->size_class) { case BTRFS_BG_SZ_NONE: none++; break; case BTRFS_BG_SZ_SMALL: small++; break; case BTRFS_BG_SZ_MEDIUM: medium++; break; case BTRFS_BG_SZ_LARGE: large++; break; } } up_read(&sinfo->groups_sem); } return sysfs_emit(buf, "none %u\n" "small %u\n" "medium %u\n" "large %u\n", none, small, medium, large); } #ifdef CONFIG_BTRFS_DEBUG /* * Request chunk allocation with current chunk size. */ static ssize_t btrfs_force_chunk_alloc_store(struct kobject *kobj, struct kobj_attribute *a, const char *buf, size_t len) { struct btrfs_space_info *space_info = to_space_info(kobj); struct btrfs_fs_info *fs_info = to_fs_info(get_btrfs_kobj(kobj)); struct btrfs_trans_handle *trans; bool val; int ret; if (!capable(CAP_SYS_ADMIN)) return -EPERM; if (sb_rdonly(fs_info->sb)) return -EROFS; ret = kstrtobool(buf, &val); if (ret) return ret; if (!val) return -EINVAL; /* * This is unsafe to be called from sysfs context and may cause * unexpected problems. */ trans = btrfs_start_transaction(fs_info->tree_root, 0); if (IS_ERR(trans)) return PTR_ERR(trans); ret = btrfs_force_chunk_alloc(trans, space_info->flags); btrfs_end_transaction(trans); if (ret == 1) return len; return -ENOSPC; } BTRFS_ATTR_W(space_info, force_chunk_alloc, btrfs_force_chunk_alloc_store); #endif SPACE_INFO_ATTR(flags); SPACE_INFO_ATTR(total_bytes); SPACE_INFO_ATTR(bytes_used); SPACE_INFO_ATTR(bytes_pinned); SPACE_INFO_ATTR(bytes_reserved); SPACE_INFO_ATTR(bytes_may_use); SPACE_INFO_ATTR(bytes_readonly); SPACE_INFO_ATTR(bytes_zone_unusable); SPACE_INFO_ATTR(disk_used); SPACE_INFO_ATTR(disk_total); SPACE_INFO_ATTR(reclaim_count); SPACE_INFO_ATTR(reclaim_bytes); SPACE_INFO_ATTR(reclaim_errors); BTRFS_ATTR_RW(space_info, chunk_size, btrfs_chunk_size_show, btrfs_chunk_size_store); BTRFS_ATTR(space_info, size_classes, btrfs_size_classes_show); static ssize_t btrfs_sinfo_bg_reclaim_threshold_show(struct kobject *kobj, struct kobj_attribute *a, char *buf) { struct btrfs_space_info *space_info = to_space_info(kobj); ssize_t ret; spin_lock(&space_info->lock); ret = sysfs_emit(buf, "%d\n", btrfs_calc_reclaim_threshold(space_info)); spin_unlock(&space_info->lock); return ret; } static ssize_t btrfs_sinfo_bg_reclaim_threshold_store(struct kobject *kobj, struct kobj_attribute *a, const char *buf, size_t len) { struct btrfs_space_info *space_info = to_space_info(kobj); int thresh; int ret; if (READ_ONCE(space_info->dynamic_reclaim)) return -EINVAL; ret = kstrtoint(buf, 10, &thresh); if (ret) return ret; if (thresh < 0 || thresh > 100) return -EINVAL; WRITE_ONCE(space_info->bg_reclaim_threshold, thresh); return len; } BTRFS_ATTR_RW(space_info, bg_reclaim_threshold, btrfs_sinfo_bg_reclaim_threshold_show, btrfs_sinfo_bg_reclaim_threshold_store); static ssize_t btrfs_sinfo_dynamic_reclaim_show(struct kobject *kobj, struct kobj_attribute *a, char *buf) { struct btrfs_space_info *space_info = to_space_info(kobj); return sysfs_emit(buf, "%d\n", READ_ONCE(space_info->dynamic_reclaim)); } static ssize_t btrfs_sinfo_dynamic_reclaim_store(struct kobject *kobj, struct kobj_attribute *a, const char *buf, size_t len) { struct btrfs_space_info *space_info = to_space_info(kobj); int dynamic_reclaim; int ret; ret = kstrtoint(buf, 10, &dynamic_reclaim); if (ret) return ret; if (dynamic_reclaim < 0) return -EINVAL; WRITE_ONCE(space_info->dynamic_reclaim, dynamic_reclaim != 0); return len; } BTRFS_ATTR_RW(space_info, dynamic_reclaim, btrfs_sinfo_dynamic_reclaim_show, btrfs_sinfo_dynamic_reclaim_store); static ssize_t btrfs_sinfo_periodic_reclaim_show(struct kobject *kobj, struct kobj_attribute *a, char *buf) { struct btrfs_space_info *space_info = to_space_info(kobj); return sysfs_emit(buf, "%d\n", READ_ONCE(space_info->periodic_reclaim)); } static ssize_t btrfs_sinfo_periodic_reclaim_store(struct kobject *kobj, struct kobj_attribute *a, const char *buf, size_t len) { struct btrfs_space_info *space_info = to_space_info(kobj); int periodic_reclaim; int ret; ret = kstrtoint(buf, 10, &periodic_reclaim); if (ret) return ret; if (periodic_reclaim < 0) return -EINVAL; WRITE_ONCE(space_info->periodic_reclaim, periodic_reclaim != 0); return len; } BTRFS_ATTR_RW(space_info, periodic_reclaim, btrfs_sinfo_periodic_reclaim_show, btrfs_sinfo_periodic_reclaim_store); /* * Allocation information about block group types. * * Path: /sys/fs/btrfs/<uuid>/allocation/<bg-type>/ */ static struct attribute *space_info_attrs[] = { BTRFS_ATTR_PTR(space_info, flags), BTRFS_ATTR_PTR(space_info, total_bytes), BTRFS_ATTR_PTR(space_info, bytes_used), BTRFS_ATTR_PTR(space_info, bytes_pinned), BTRFS_ATTR_PTR(space_info, bytes_reserved), BTRFS_ATTR_PTR(space_info, bytes_may_use), BTRFS_ATTR_PTR(space_info, bytes_readonly), BTRFS_ATTR_PTR(space_info, bytes_zone_unusable), BTRFS_ATTR_PTR(space_info, disk_used), BTRFS_ATTR_PTR(space_info, disk_total), BTRFS_ATTR_PTR(space_info, bg_reclaim_threshold), BTRFS_ATTR_PTR(space_info, dynamic_reclaim), BTRFS_ATTR_PTR(space_info, chunk_size), BTRFS_ATTR_PTR(space_info, size_classes), BTRFS_ATTR_PTR(space_info, reclaim_count), BTRFS_ATTR_PTR(space_info, reclaim_bytes), BTRFS_ATTR_PTR(space_info, reclaim_errors), BTRFS_ATTR_PTR(space_info, periodic_reclaim), #ifdef CONFIG_BTRFS_DEBUG BTRFS_ATTR_PTR(space_info, force_chunk_alloc), #endif NULL, }; ATTRIBUTE_GROUPS(space_info); static void space_info_release(struct kobject *kobj) { struct btrfs_space_info *sinfo = to_space_info(kobj); kfree(sinfo); } static const struct kobj_type space_info_ktype = { .sysfs_ops = &kobj_sysfs_ops, .release = space_info_release, .default_groups = space_info_groups, }; /* * Allocation information about block groups. * * Path: /sys/fs/btrfs/<uuid>/allocation/ */ static const struct attribute *allocation_attrs[] = { BTRFS_ATTR_PTR(allocation, global_rsv_reserved), BTRFS_ATTR_PTR(allocation, global_rsv_size), NULL, }; static ssize_t btrfs_label_show(struct kobject *kobj, struct kobj_attribute *a, char *buf) { struct btrfs_fs_info *fs_info = to_fs_info(kobj); char *label = fs_info->super_copy->label; ssize_t ret; spin_lock(&fs_info->super_lock); ret = sysfs_emit(buf, label[0] ? "%s\n" : "%s", label); spin_unlock(&fs_info->super_lock); return ret; } static ssize_t btrfs_label_store(struct kobject *kobj, struct kobj_attribute *a, const char *buf, size_t len) { struct btrfs_fs_info *fs_info = to_fs_info(kobj); size_t p_len; if (!fs_info) return -EPERM; if (sb_rdonly(fs_info->sb)) return -EROFS; /* * p_len is the len until the first occurrence of either * '\n' or '\0' */ p_len = strcspn(buf, "\n"); if (p_len >= BTRFS_LABEL_SIZE) return -EINVAL; spin_lock(&fs_info->super_lock); memset(fs_info->super_copy->label, 0, BTRFS_LABEL_SIZE); memcpy(fs_info->super_copy->label, buf, p_len); spin_unlock(&fs_info->super_lock); /* * We don't want to do full transaction commit from inside sysfs */ set_bit(BTRFS_FS_NEED_TRANS_COMMIT, &fs_info->flags); wake_up_process(fs_info->transaction_kthread); return len; } BTRFS_ATTR_RW(, label, btrfs_label_show, btrfs_label_store); static ssize_t btrfs_nodesize_show(struct kobject *kobj, struct kobj_attribute *a, char *buf) { struct btrfs_fs_info *fs_info = to_fs_info(kobj); return sysfs_emit(buf, "%u\n", fs_info->nodesize); } BTRFS_ATTR(, nodesize, btrfs_nodesize_show); static ssize_t btrfs_sectorsize_show(struct kobject *kobj, struct kobj_attribute *a, char *buf) { struct btrfs_fs_info *fs_info = to_fs_info(kobj); return sysfs_emit(buf, "%u\n", fs_info->sectorsize); } BTRFS_ATTR(, sectorsize, btrfs_sectorsize_show); static ssize_t btrfs_commit_stats_show(struct kobject *kobj, struct kobj_attribute *a, char *buf) { struct btrfs_fs_info *fs_info = to_fs_info(kobj); u64 now = ktime_get_ns(); u64 start_time = fs_info->commit_stats.critical_section_start_time; u64 pending = 0; if (start_time) pending = now - start_time; return sysfs_emit(buf, "commits %llu\n" "cur_commit_ms %llu\n" "last_commit_ms %llu\n" "max_commit_ms %llu\n" "total_commit_ms %llu\n", fs_info->commit_stats.commit_count, div_u64(pending, NSEC_PER_MSEC), div_u64(fs_info->commit_stats.last_commit_dur, NSEC_PER_MSEC), div_u64(fs_info->commit_stats.max_commit_dur, NSEC_PER_MSEC), div_u64(fs_info->commit_stats.total_commit_dur, NSEC_PER_MSEC)); } static ssize_t btrfs_commit_stats_store(struct kobject *kobj, struct kobj_attribute *a, const char *buf, size_t len) { struct btrfs_fs_info *fs_info = to_fs_info(kobj); unsigned long val; int ret; if (!fs_info) return -EPERM; if (!capable(CAP_SYS_RESOURCE)) return -EPERM; ret = kstrtoul(buf, 10, &val); if (ret) return ret; if (val) return -EINVAL; WRITE_ONCE(fs_info->commit_stats.max_commit_dur, 0); return len; } BTRFS_ATTR_RW(, commit_stats, btrfs_commit_stats_show, btrfs_commit_stats_store); static ssize_t btrfs_clone_alignment_show(struct kobject *kobj, struct kobj_attribute *a, char *buf) { struct btrfs_fs_info *fs_info = to_fs_info(kobj); return sysfs_emit(buf, "%u\n", fs_info->sectorsize); } BTRFS_ATTR(, clone_alignment, btrfs_clone_alignment_show); static ssize_t quota_override_show(struct kobject *kobj, struct kobj_attribute *a, char *buf) { struct btrfs_fs_info *fs_info = to_fs_info(kobj); int quota_override; quota_override = test_bit(BTRFS_FS_QUOTA_OVERRIDE, &fs_info->flags); return sysfs_emit(buf, "%d\n", quota_override); } static ssize_t quota_override_store(struct kobject *kobj, struct kobj_attribute *a, const char *buf, size_t len) { struct btrfs_fs_info *fs_info = to_fs_info(kobj); unsigned long knob; int ret; if (!fs_info) return -EPERM; if (!capable(CAP_SYS_RESOURCE)) return -EPERM; ret = kstrtoul(buf, 10, &knob); if (ret) return ret; if (knob > 1) return -EINVAL; if (knob) set_bit(BTRFS_FS_QUOTA_OVERRIDE, &fs_info->flags); else clear_bit(BTRFS_FS_QUOTA_OVERRIDE, &fs_info->flags); return len; } BTRFS_ATTR_RW(, quota_override, quota_override_show, quota_override_store); static ssize_t btrfs_metadata_uuid_show(struct kobject *kobj, struct kobj_attribute *a, char *buf) { struct btrfs_fs_info *fs_info = to_fs_info(kobj); return sysfs_emit(buf, "%pU\n", fs_info->fs_devices->metadata_uuid); } BTRFS_ATTR(, metadata_uuid, btrfs_metadata_uuid_show); static ssize_t btrfs_checksum_show(struct kobject *kobj, struct kobj_attribute *a, char *buf) { struct btrfs_fs_info *fs_info = to_fs_info(kobj); u16 csum_type = btrfs_super_csum_type(fs_info->super_copy); return sysfs_emit(buf, "%s (%s)\n", btrfs_super_csum_name(csum_type), crypto_shash_driver_name(fs_info->csum_shash)); } BTRFS_ATTR(, checksum, btrfs_checksum_show); static ssize_t btrfs_exclusive_operation_show(struct kobject *kobj, struct kobj_attribute *a, char *buf) { struct btrfs_fs_info *fs_info = to_fs_info(kobj); const char *str; switch (READ_ONCE(fs_info->exclusive_operation)) { case BTRFS_EXCLOP_NONE: str = "none\n"; break; case BTRFS_EXCLOP_BALANCE: str = "balance\n"; break; case BTRFS_EXCLOP_BALANCE_PAUSED: str = "balance paused\n"; break; case BTRFS_EXCLOP_DEV_ADD: str = "device add\n"; break; case BTRFS_EXCLOP_DEV_REMOVE: str = "device remove\n"; break; case BTRFS_EXCLOP_DEV_REPLACE: str = "device replace\n"; break; case BTRFS_EXCLOP_RESIZE: str = "resize\n"; break; case BTRFS_EXCLOP_SWAP_ACTIVATE: str = "swap activate\n"; break; default: str = "UNKNOWN\n"; break; } return sysfs_emit(buf, "%s", str); } BTRFS_ATTR(, exclusive_operation, btrfs_exclusive_operation_show); static ssize_t btrfs_generation_show(struct kobject *kobj, struct kobj_attribute *a, char *buf) { struct btrfs_fs_info *fs_info = to_fs_info(kobj); return sysfs_emit(buf, "%llu\n", btrfs_get_fs_generation(fs_info)); } BTRFS_ATTR(, generation, btrfs_generation_show); static ssize_t btrfs_temp_fsid_show(struct kobject *kobj, struct kobj_attribute *a, char *buf) { struct btrfs_fs_info *fs_info = to_fs_info(kobj); return sysfs_emit(buf, "%d\n", fs_info->fs_devices->temp_fsid); } BTRFS_ATTR(, temp_fsid, btrfs_temp_fsid_show); static const char *btrfs_read_policy_name[] = { "pid", #ifdef CONFIG_BTRFS_EXPERIMENTAL "round-robin", "devid", #endif }; #ifdef CONFIG_BTRFS_EXPERIMENTAL /* Global module configuration parameters. */ static char *read_policy; char *btrfs_get_mod_read_policy(void) { return read_policy; } /* Set perms to 0, disable /sys/module/btrfs/parameter/read_policy interface. */ module_param(read_policy, charp, 0); MODULE_PARM_DESC(read_policy, "Global read policy: pid (default), round-robin[:<min_contig_read>], devid[:<devid>]"); #endif int btrfs_read_policy_to_enum(const char *str, s64 *value_ret) { char param[32]; char __maybe_unused *value_str; if (!str || strlen(str) == 0) return 0; strscpy(param, str); #ifdef CONFIG_BTRFS_EXPERIMENTAL /* Separate value from input in policy:value format. */ value_str = strchr(param, ':'); if (value_str) { char *retptr; *value_str = 0; value_str++; if (!value_ret) return -EINVAL; *value_ret = memparse(value_str, &retptr); /* There could be any trailing typos after the value. */ retptr = skip_spaces(retptr); if (*retptr != 0 || *value_ret <= 0) return -EINVAL; } #endif return sysfs_match_string(btrfs_read_policy_name, param); } #ifdef CONFIG_BTRFS_EXPERIMENTAL int __init btrfs_read_policy_init(void) { s64 value; if (btrfs_read_policy_to_enum(read_policy, &value) == -EINVAL) { btrfs_err(NULL, "invalid read policy or value %s", read_policy); return -EINVAL; } return 0; } #endif static ssize_t btrfs_read_policy_show(struct kobject *kobj, struct kobj_attribute *a, char *buf) { struct btrfs_fs_devices *fs_devices = to_fs_devs(kobj); const enum btrfs_read_policy policy = READ_ONCE(fs_devices->read_policy); ssize_t ret = 0; int i; for (i = 0; i < BTRFS_NR_READ_POLICY; i++) { if (ret != 0) ret += sysfs_emit_at(buf, ret, " "); if (i == policy) ret += sysfs_emit_at(buf, ret, "["); ret += sysfs_emit_at(buf, ret, "%s", btrfs_read_policy_name[i]); #ifdef CONFIG_BTRFS_EXPERIMENTAL if (i == BTRFS_READ_POLICY_RR) ret += sysfs_emit_at(buf, ret, ":%u", READ_ONCE(fs_devices->rr_min_contig_read)); if (i == BTRFS_READ_POLICY_DEVID) ret += sysfs_emit_at(buf, ret, ":%llu", READ_ONCE(fs_devices->read_devid)); #endif if (i == policy) ret += sysfs_emit_at(buf, ret, "]"); } ret += sysfs_emit_at(buf, ret, "\n"); return ret; } static ssize_t btrfs_read_policy_store(struct kobject *kobj, struct kobj_attribute *a, const char *buf, size_t len) { struct btrfs_fs_devices *fs_devices = to_fs_devs(kobj); int index; s64 value = -1; index = btrfs_read_policy_to_enum(buf, &value); if (index < 0) return -EINVAL; #ifdef CONFIG_BTRFS_EXPERIMENTAL /* If moving from RR then disable collecting fs stats. */ if (fs_devices->read_policy == BTRFS_READ_POLICY_RR && index != BTRFS_READ_POLICY_RR) fs_devices->collect_fs_stats = false; if (index == BTRFS_READ_POLICY_RR) { if (value != -1) { const u32 sectorsize = fs_devices->fs_info->sectorsize; if (!IS_ALIGNED(value, sectorsize)) { u64 temp_value = round_up(value, sectorsize); btrfs_debug(fs_devices->fs_info, "read_policy: min contig read %lld should be multiple of sectorsize %u, rounded to %llu", value, sectorsize, temp_value); value = temp_value; } } else { value = BTRFS_DEFAULT_RR_MIN_CONTIG_READ; } if (index != READ_ONCE(fs_devices->read_policy) || value != READ_ONCE(fs_devices->rr_min_contig_read)) { WRITE_ONCE(fs_devices->read_policy, index); WRITE_ONCE(fs_devices->rr_min_contig_read, value); btrfs_info(fs_devices->fs_info, "read policy set to '%s:%lld'", btrfs_read_policy_name[index], value); } fs_devices->collect_fs_stats = true; return len; } if (index == BTRFS_READ_POLICY_DEVID) { if (value != -1) { BTRFS_DEV_LOOKUP_ARGS(args); /* Validate input devid. */ args.devid = value; if (btrfs_find_device(fs_devices, &args) == NULL) return -EINVAL; } else { /* Set default devid to the devid of the latest device. */ value = fs_devices->latest_dev->devid; } if (index != READ_ONCE(fs_devices->read_policy) || value != READ_ONCE(fs_devices->read_devid)) { WRITE_ONCE(fs_devices->read_policy, index); WRITE_ONCE(fs_devices->read_devid, value); btrfs_info(fs_devices->fs_info, "read policy set to '%s:%llu'", btrfs_read_policy_name[index], value); } return len; } #endif if (index != READ_ONCE(fs_devices->read_policy)) { WRITE_ONCE(fs_devices->read_policy, index); btrfs_info(fs_devices->fs_info, "read policy set to '%s'", btrfs_read_policy_name[index]); } return len; } BTRFS_ATTR_RW(, read_policy, btrfs_read_policy_show, btrfs_read_policy_store); static ssize_t btrfs_bg_reclaim_threshold_show(struct kobject *kobj, struct kobj_attribute *a, char *buf) { struct btrfs_fs_info *fs_info = to_fs_info(kobj); return sysfs_emit(buf, "%d\n", READ_ONCE(fs_info->bg_reclaim_threshold)); } static ssize_t btrfs_bg_reclaim_threshold_store(struct kobject *kobj, struct kobj_attribute *a, const char *buf, size_t len) { struct btrfs_fs_info *fs_info = to_fs_info(kobj); int thresh; int ret; ret = kstrtoint(buf, 10, &thresh); if (ret) return ret; #ifdef CONFIG_BTRFS_DEBUG if (thresh != 0 && (thresh > 100)) return -EINVAL; #else if (thresh != 0 && (thresh <= 50 || thresh > 100)) return -EINVAL; #endif WRITE_ONCE(fs_info->bg_reclaim_threshold, thresh); return len; } BTRFS_ATTR_RW(, bg_reclaim_threshold, btrfs_bg_reclaim_threshold_show, btrfs_bg_reclaim_threshold_store); #ifdef CONFIG_BTRFS_EXPERIMENTAL static ssize_t btrfs_offload_csum_show(struct kobject *kobj, struct kobj_attribute *a, char *buf) { struct btrfs_fs_devices *fs_devices = to_fs_devs(kobj); switch (READ_ONCE(fs_devices->offload_csum_mode)) { case BTRFS_OFFLOAD_CSUM_AUTO: return sysfs_emit(buf, "auto\n"); case BTRFS_OFFLOAD_CSUM_FORCE_ON: return sysfs_emit(buf, "1\n"); case BTRFS_OFFLOAD_CSUM_FORCE_OFF: return sysfs_emit(buf, "0\n"); default: WARN_ON(1); return -EINVAL; } } static ssize_t btrfs_offload_csum_store(struct kobject *kobj, struct kobj_attribute *a, const char *buf, size_t len) { struct btrfs_fs_devices *fs_devices = to_fs_devs(kobj); int ret; bool val; ret = kstrtobool(buf, &val); if (ret == 0) WRITE_ONCE(fs_devices->offload_csum_mode, val ? BTRFS_OFFLOAD_CSUM_FORCE_ON : BTRFS_OFFLOAD_CSUM_FORCE_OFF); else if (ret == -EINVAL && sysfs_streq(buf, "auto")) WRITE_ONCE(fs_devices->offload_csum_mode, BTRFS_OFFLOAD_CSUM_AUTO); else return -EINVAL; return len; } BTRFS_ATTR_RW(, offload_csum, btrfs_offload_csum_show, btrfs_offload_csum_store); #endif /* * Per-filesystem information and stats. * * Path: /sys/fs/btrfs/<uuid>/ */ static const struct attribute *btrfs_attrs[] = { BTRFS_ATTR_PTR(, label), BTRFS_ATTR_PTR(, nodesize), BTRFS_ATTR_PTR(, sectorsize), BTRFS_ATTR_PTR(, clone_alignment), BTRFS_ATTR_PTR(, quota_override), BTRFS_ATTR_PTR(, metadata_uuid), BTRFS_ATTR_PTR(, checksum), BTRFS_ATTR_PTR(, exclusive_operation), BTRFS_ATTR_PTR(, generation), BTRFS_ATTR_PTR(, read_policy), BTRFS_ATTR_PTR(, bg_reclaim_threshold), BTRFS_ATTR_PTR(, commit_stats), BTRFS_ATTR_PTR(, temp_fsid), #ifdef CONFIG_BTRFS_EXPERIMENTAL BTRFS_ATTR_PTR(, offload_csum), #endif NULL, }; static void btrfs_release_fsid_kobj(struct kobject *kobj) { struct btrfs_fs_devices *fs_devs = to_fs_devs(kobj); memset(&fs_devs->fsid_kobj, 0, sizeof(struct kobject)); complete(&fs_devs->kobj_unregister); } static const struct kobj_type btrfs_ktype = { .sysfs_ops = &kobj_sysfs_ops, .release = btrfs_release_fsid_kobj, }; static inline struct btrfs_fs_devices *to_fs_devs(struct kobject *kobj) { if (kobj->ktype != &btrfs_ktype) return NULL; return container_of(kobj, struct btrfs_fs_devices, fsid_kobj); } static inline struct btrfs_fs_info *to_fs_info(struct kobject *kobj) { if (kobj->ktype != &btrfs_ktype) return NULL; return to_fs_devs(kobj)->fs_info; } static struct kobject *get_btrfs_kobj(struct kobject *kobj) { while (kobj) { if (kobj->ktype == &btrfs_ktype) return kobj; kobj = kobj->parent; } return NULL; } #define NUM_FEATURE_BITS 64 #define BTRFS_FEATURE_NAME_MAX 13 static char btrfs_unknown_feature_names[FEAT_MAX][NUM_FEATURE_BITS][BTRFS_FEATURE_NAME_MAX]; static struct btrfs_feature_attr btrfs_feature_attrs[FEAT_MAX][NUM_FEATURE_BITS]; static_assert(ARRAY_SIZE(btrfs_unknown_feature_names) == ARRAY_SIZE(btrfs_feature_attrs)); static_assert(ARRAY_SIZE(btrfs_unknown_feature_names[0]) == ARRAY_SIZE(btrfs_feature_attrs[0])); static const u64 supported_feature_masks[FEAT_MAX] = { [FEAT_COMPAT] = BTRFS_FEATURE_COMPAT_SUPP, [FEAT_COMPAT_RO] = BTRFS_FEATURE_COMPAT_RO_SUPP, [FEAT_INCOMPAT] = BTRFS_FEATURE_INCOMPAT_SUPP, }; static int addrm_unknown_feature_attrs(struct btrfs_fs_info *fs_info, bool add) { int set; for (set = 0; set < FEAT_MAX; set++) { int i; struct attribute *attrs[2]; struct attribute_group agroup = { .name = "features", .attrs = attrs, }; u64 features = get_features(fs_info, set); features &= ~supported_feature_masks[set]; if (!features) continue; attrs[1] = NULL; for (i = 0; i < NUM_FEATURE_BITS; i++) { struct btrfs_feature_attr *fa; if (!(features & (1ULL << i))) continue; fa = &btrfs_feature_attrs[set][i]; attrs[0] = &fa->kobj_attr.attr; if (add) { int ret; ret = sysfs_merge_group(&fs_info->fs_devices->fsid_kobj, &agroup); if (ret) return ret; } else sysfs_unmerge_group(&fs_info->fs_devices->fsid_kobj, &agroup); } } return 0; } static void __btrfs_sysfs_remove_fsid(struct btrfs_fs_devices *fs_devs) { if (fs_devs->devinfo_kobj) { kobject_del(fs_devs->devinfo_kobj); kobject_put(fs_devs->devinfo_kobj); fs_devs->devinfo_kobj = NULL; } if (fs_devs->devices_kobj) { kobject_del(fs_devs->devices_kobj); kobject_put(fs_devs->devices_kobj); fs_devs->devices_kobj = NULL; } if (fs_devs->fsid_kobj.state_initialized) { kobject_del(&fs_devs->fsid_kobj); kobject_put(&fs_devs->fsid_kobj); wait_for_completion(&fs_devs->kobj_unregister); } } /* when fs_devs is NULL it will remove all fsid kobject */ void btrfs_sysfs_remove_fsid(struct btrfs_fs_devices *fs_devs) { struct list_head *fs_uuids = btrfs_get_fs_uuids(); if (fs_devs) { __btrfs_sysfs_remove_fsid(fs_devs); return; } list_for_each_entry(fs_devs, fs_uuids, fs_list) { __btrfs_sysfs_remove_fsid(fs_devs); } } static void btrfs_sysfs_remove_fs_devices(struct btrfs_fs_devices *fs_devices) { struct btrfs_device *device; struct btrfs_fs_devices *seed; list_for_each_entry(device, &fs_devices->devices, dev_list) btrfs_sysfs_remove_device(device); list_for_each_entry(seed, &fs_devices->seed_list, seed_list) { list_for_each_entry(device, &seed->devices, dev_list) btrfs_sysfs_remove_device(device); } } void btrfs_sysfs_remove_mounted(struct btrfs_fs_info *fs_info) { struct kobject *fsid_kobj = &fs_info->fs_devices->fsid_kobj; sysfs_remove_link(fsid_kobj, "bdi"); if (fs_info->space_info_kobj) { sysfs_remove_files(fs_info->space_info_kobj, allocation_attrs); kobject_del(fs_info->space_info_kobj); kobject_put(fs_info->space_info_kobj); } if (fs_info->discard_kobj) { sysfs_remove_files(fs_info->discard_kobj, discard_attrs); kobject_del(fs_info->discard_kobj); kobject_put(fs_info->discard_kobj); } #ifdef CONFIG_BTRFS_DEBUG if (fs_info->debug_kobj) { sysfs_remove_files(fs_info->debug_kobj, btrfs_debug_mount_attrs); kobject_del(fs_info->debug_kobj); kobject_put(fs_info->debug_kobj); } #endif addrm_unknown_feature_attrs(fs_info, false); sysfs_remove_group(fsid_kobj, &btrfs_feature_attr_group); sysfs_remove_files(fsid_kobj, btrfs_attrs); btrfs_sysfs_remove_fs_devices(fs_info->fs_devices); } static const char * const btrfs_feature_set_names[FEAT_MAX] = { [FEAT_COMPAT] = "compat", [FEAT_COMPAT_RO] = "compat_ro", [FEAT_INCOMPAT] = "incompat", }; const char *btrfs_feature_set_name(enum btrfs_feature_set set) { return btrfs_feature_set_names[set]; } char *btrfs_printable_features(enum btrfs_feature_set set, u64 flags) { size_t bufsize = 4096; /* safe max, 64 names * 64 bytes */ int len = 0; int i; char *str; str = kmalloc(bufsize, GFP_KERNEL); if (!str) return str; for (i = 0; i < ARRAY_SIZE(btrfs_feature_attrs[set]); i++) { const char *name; if (!(flags & (1ULL << i))) continue; name = btrfs_feature_attrs[set][i].kobj_attr.attr.name; len += scnprintf(str + len, bufsize - len, "%s%s", len ? "," : "", name); } return str; } static void init_feature_attrs(void) { struct btrfs_feature_attr *fa; int set, i; memset(btrfs_feature_attrs, 0, sizeof(btrfs_feature_attrs)); memset(btrfs_unknown_feature_names, 0, sizeof(btrfs_unknown_feature_names)); for (i = 0; btrfs_supported_feature_attrs[i]; i++) { struct btrfs_feature_attr *sfa; struct attribute *a = btrfs_supported_feature_attrs[i]; int bit; sfa = attr_to_btrfs_feature_attr(a); bit = ilog2(sfa->feature_bit); fa = &btrfs_feature_attrs[sfa->feature_set][bit]; fa->kobj_attr.attr.name = sfa->kobj_attr.attr.name; } for (set = 0; set < FEAT_MAX; set++) { for (i = 0; i < ARRAY_SIZE(btrfs_feature_attrs[set]); i++) { char *name = btrfs_unknown_feature_names[set][i]; fa = &btrfs_feature_attrs[set][i]; if (fa->kobj_attr.attr.name) continue; snprintf(name, BTRFS_FEATURE_NAME_MAX, "%s:%u", btrfs_feature_set_names[set], i); fa->kobj_attr.attr.name = name; fa->kobj_attr.attr.mode = S_IRUGO; fa->feature_set = set; fa->feature_bit = 1ULL << i; } } } /* * Create a sysfs entry for a given block group type at path * /sys/fs/btrfs/UUID/allocation/data/TYPE */ void btrfs_sysfs_add_block_group_type(struct btrfs_block_group *cache) { struct btrfs_fs_info *fs_info = cache->fs_info; struct btrfs_space_info *space_info = cache->space_info; struct raid_kobject *rkobj; const int index = btrfs_bg_flags_to_raid_index(cache->flags); unsigned int nofs_flag; int ret; /* * Setup a NOFS context because kobject_add(), deep in its call chain, * does GFP_KERNEL allocations, and we are often called in a context * where if reclaim is triggered we can deadlock (we are either holding * a transaction handle or some lock required for a transaction * commit). */ nofs_flag = memalloc_nofs_save(); rkobj = kzalloc(sizeof(*rkobj), GFP_NOFS); if (!rkobj) { memalloc_nofs_restore(nofs_flag); btrfs_warn(cache->fs_info, "couldn't alloc memory for raid level kobject"); return; } rkobj->flags = cache->flags; kobject_init(&rkobj->kobj, &btrfs_raid_ktype); /* * We call this either on mount, or if we've created a block group for a * new index type while running (i.e. when restriping). The running * case is tricky because we could race with other threads, so we need * to have this check to make sure we didn't already init the kobject. * * We don't have to protect on the free side because it only happens on * unmount. */ spin_lock(&space_info->lock); if (space_info->block_group_kobjs[index]) { spin_unlock(&space_info->lock); kobject_put(&rkobj->kobj); return; } else { space_info->block_group_kobjs[index] = &rkobj->kobj; } spin_unlock(&space_info->lock); ret = kobject_add(&rkobj->kobj, &space_info->kobj, "%s", btrfs_bg_type_to_raid_name(rkobj->flags)); memalloc_nofs_restore(nofs_flag); if (ret) { spin_lock(&space_info->lock); space_info->block_group_kobjs[index] = NULL; spin_unlock(&space_info->lock); kobject_put(&rkobj->kobj); btrfs_warn(fs_info, "failed to add kobject for block cache, ignoring"); return; } } /* * Remove sysfs directories for all block group types of a given space info and * the space info as well */ void btrfs_sysfs_remove_space_info(struct btrfs_space_info *space_info) { int i; for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) { struct kobject *kobj; kobj = space_info->block_group_kobjs[i]; space_info->block_group_kobjs[i] = NULL; if (kobj) { kobject_del(kobj); kobject_put(kobj); } } kobject_del(&space_info->kobj); kobject_put(&space_info->kobj); } static const char *alloc_name(struct btrfs_space_info *space_info) { u64 flags = space_info->flags; switch (flags) { case BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA: return "mixed"; case BTRFS_BLOCK_GROUP_METADATA: switch (space_info->subgroup_id) { case BTRFS_SUB_GROUP_PRIMARY: return "metadata"; case BTRFS_SUB_GROUP_TREELOG: return "metadata-treelog"; default: WARN_ON_ONCE(1); return "metadata (unknown sub-group)"; } case BTRFS_BLOCK_GROUP_DATA: switch (space_info->subgroup_id) { case BTRFS_SUB_GROUP_PRIMARY: return "data"; case BTRFS_SUB_GROUP_DATA_RELOC: return "data-reloc"; default: WARN_ON_ONCE(1); return "data (unknown sub-group)"; } case BTRFS_BLOCK_GROUP_SYSTEM: ASSERT(space_info->subgroup_id == BTRFS_SUB_GROUP_PRIMARY); return "system"; default: WARN_ON(1); return "invalid-combination"; } } /* * Create a sysfs entry for a space info type at path * /sys/fs/btrfs/UUID/allocation/TYPE */ int btrfs_sysfs_add_space_info_type(struct btrfs_fs_info *fs_info, struct btrfs_space_info *space_info) { int ret; ret = kobject_init_and_add(&space_info->kobj, &space_info_ktype, fs_info->space_info_kobj, "%s", alloc_name(space_info)); if (ret) { kobject_put(&space_info->kobj); return ret; } return 0; } void btrfs_sysfs_remove_device(struct btrfs_device *device) { struct kobject *devices_kobj; /* * Seed fs_devices devices_kobj aren't used, fetch kobject from the * fs_info::fs_devices. */ devices_kobj = device->fs_info->fs_devices->devices_kobj; ASSERT(devices_kobj); if (device->bdev) sysfs_remove_link(devices_kobj, bdev_kobj(device->bdev)->name); if (device->devid_kobj.state_initialized) { kobject_del(&device->devid_kobj); kobject_put(&device->devid_kobj); wait_for_completion(&device->kobj_unregister); } } static ssize_t btrfs_devinfo_in_fs_metadata_show(struct kobject *kobj, struct kobj_attribute *a, char *buf) { int val; struct btrfs_device *device = container_of(kobj, struct btrfs_device, devid_kobj); val = !!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state); return sysfs_emit(buf, "%d\n", val); } BTRFS_ATTR(devid, in_fs_metadata, btrfs_devinfo_in_fs_metadata_show); static ssize_t btrfs_devinfo_missing_show(struct kobject *kobj, struct kobj_attribute *a, char *buf) { int val; struct btrfs_device *device = container_of(kobj, struct btrfs_device, devid_kobj); val = !!test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state); return sysfs_emit(buf, "%d\n", val); } BTRFS_ATTR(devid, missing, btrfs_devinfo_missing_show); static ssize_t btrfs_devinfo_replace_target_show(struct kobject *kobj, struct kobj_attribute *a, char *buf) { int val; struct btrfs_device *device = container_of(kobj, struct btrfs_device, devid_kobj); val = !!test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state); return sysfs_emit(buf, "%d\n", val); } BTRFS_ATTR(devid, replace_target, btrfs_devinfo_replace_target_show); static ssize_t btrfs_devinfo_scrub_speed_max_show(struct kobject *kobj, struct kobj_attribute *a, char *buf) { struct btrfs_device *device = container_of(kobj, struct btrfs_device, devid_kobj); return sysfs_emit(buf, "%llu\n", READ_ONCE(device->scrub_speed_max)); } static ssize_t btrfs_devinfo_scrub_speed_max_store(struct kobject *kobj, struct kobj_attribute *a, const char *buf, size_t len) { struct btrfs_device *device = container_of(kobj, struct btrfs_device, devid_kobj); char *endptr; unsigned long long limit; limit = memparse(buf, &endptr); /* There could be trailing '\n', also catch any typos after the value. */ endptr = skip_spaces(endptr); if (*endptr != 0) return -EINVAL; WRITE_ONCE(device->scrub_speed_max, limit); return len; } BTRFS_ATTR_RW(devid, scrub_speed_max, btrfs_devinfo_scrub_speed_max_show, btrfs_devinfo_scrub_speed_max_store); static ssize_t btrfs_devinfo_writeable_show(struct kobject *kobj, struct kobj_attribute *a, char *buf) { int val; struct btrfs_device *device = container_of(kobj, struct btrfs_device, devid_kobj); val = !!test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state); return sysfs_emit(buf, "%d\n", val); } BTRFS_ATTR(devid, writeable, btrfs_devinfo_writeable_show); static ssize_t btrfs_devinfo_fsid_show(struct kobject *kobj, struct kobj_attribute *a, char *buf) { struct btrfs_device *device = container_of(kobj, struct btrfs_device, devid_kobj); return sysfs_emit(buf, "%pU\n", device->fs_devices->fsid); } BTRFS_ATTR(devid, fsid, btrfs_devinfo_fsid_show); static ssize_t btrfs_devinfo_error_stats_show(struct kobject *kobj, struct kobj_attribute *a, char *buf) { struct btrfs_device *device = container_of(kobj, struct btrfs_device, devid_kobj); if (!device->dev_stats_valid) return sysfs_emit(buf, "invalid\n"); /* * Print all at once so we get a snapshot of all values from the same * time. Keep them in sync and in order of definition of * btrfs_dev_stat_values. */ return sysfs_emit(buf, "write_errs %d\n" "read_errs %d\n" "flush_errs %d\n" "corruption_errs %d\n" "generation_errs %d\n", btrfs_dev_stat_read(device, BTRFS_DEV_STAT_WRITE_ERRS), btrfs_dev_stat_read(device, BTRFS_DEV_STAT_READ_ERRS), btrfs_dev_stat_read(device, BTRFS_DEV_STAT_FLUSH_ERRS), btrfs_dev_stat_read(device, BTRFS_DEV_STAT_CORRUPTION_ERRS), btrfs_dev_stat_read(device, BTRFS_DEV_STAT_GENERATION_ERRS)); } BTRFS_ATTR(devid, error_stats, btrfs_devinfo_error_stats_show); /* * Information about one device. * * Path: /sys/fs/btrfs/<uuid>/devinfo/<devid>/ */ static struct attribute *devid_attrs[] = { BTRFS_ATTR_PTR(devid, error_stats), BTRFS_ATTR_PTR(devid, fsid), BTRFS_ATTR_PTR(devid, in_fs_metadata), BTRFS_ATTR_PTR(devid, missing), BTRFS_ATTR_PTR(devid, replace_target), BTRFS_ATTR_PTR(devid, scrub_speed_max), BTRFS_ATTR_PTR(devid, writeable), NULL }; ATTRIBUTE_GROUPS(devid); static void btrfs_release_devid_kobj(struct kobject *kobj) { struct btrfs_device *device = container_of(kobj, struct btrfs_device, devid_kobj); memset(&device->devid_kobj, 0, sizeof(struct kobject)); complete(&device->kobj_unregister); } static const struct kobj_type devid_ktype = { .sysfs_ops = &kobj_sysfs_ops, .default_groups = devid_groups, .release = btrfs_release_devid_kobj, }; int btrfs_sysfs_add_device(struct btrfs_device *device) { int ret; unsigned int nofs_flag; struct kobject *devices_kobj; struct kobject *devinfo_kobj; /* * Make sure we use the fs_info::fs_devices to fetch the kobjects even * for the seed fs_devices */ devices_kobj = device->fs_info->fs_devices->devices_kobj; devinfo_kobj = device->fs_info->fs_devices->devinfo_kobj; ASSERT(devices_kobj); ASSERT(devinfo_kobj); nofs_flag = memalloc_nofs_save(); if (device->bdev) { struct kobject *disk_kobj = bdev_kobj(device->bdev); ret = sysfs_create_link(devices_kobj, disk_kobj, disk_kobj->name); if (ret) { btrfs_warn(device->fs_info, "creating sysfs device link for devid %llu failed: %d", device->devid, ret); goto out; } } init_completion(&device->kobj_unregister); ret = kobject_init_and_add(&device->devid_kobj, &devid_ktype, devinfo_kobj, "%llu", device->devid); if (ret) { kobject_put(&device->devid_kobj); btrfs_warn(device->fs_info, "devinfo init for devid %llu failed: %d", device->devid, ret); } out: memalloc_nofs_restore(nofs_flag); return ret; } static int btrfs_sysfs_add_fs_devices(struct btrfs_fs_devices *fs_devices) { int ret; struct btrfs_device *device; struct btrfs_fs_devices *seed; list_for_each_entry(device, &fs_devices->devices, dev_list) { ret = btrfs_sysfs_add_device(device); if (ret) goto fail; } list_for_each_entry(seed, &fs_devices->seed_list, seed_list) { list_for_each_entry(device, &seed->devices, dev_list) { ret = btrfs_sysfs_add_device(device); if (ret) goto fail; } } return 0; fail: btrfs_sysfs_remove_fs_devices(fs_devices); return ret; } void btrfs_kobject_uevent(struct block_device *bdev, enum kobject_action action) { int ret; ret = kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, action); if (ret) btrfs_warn(NULL, "sending event %d to kobject: '%s' (%p): failed", action, kobject_name(&disk_to_dev(bdev->bd_disk)->kobj), &disk_to_dev(bdev->bd_disk)->kobj); } void btrfs_sysfs_update_sprout_fsid(struct btrfs_fs_devices *fs_devices) { char fsid_buf[BTRFS_UUID_UNPARSED_SIZE]; /* * Sprouting changes fsid of the mounted filesystem, rename the fsid * directory */ snprintf(fsid_buf, BTRFS_UUID_UNPARSED_SIZE, "%pU", fs_devices->fsid); if (kobject_rename(&fs_devices->fsid_kobj, fsid_buf)) btrfs_warn(fs_devices->fs_info, "sysfs: failed to create fsid for sprout"); } void btrfs_sysfs_update_devid(struct btrfs_device *device) { char tmp[24]; snprintf(tmp, sizeof(tmp), "%llu", device->devid); if (kobject_rename(&device->devid_kobj, tmp)) btrfs_warn(device->fs_devices->fs_info, "sysfs: failed to update devid for %llu", device->devid); } /* /sys/fs/btrfs/ entry */ static struct kset *btrfs_kset; /* * Creates: * /sys/fs/btrfs/UUID * * Can be called by the device discovery thread. */ int btrfs_sysfs_add_fsid(struct btrfs_fs_devices *fs_devs) { int ret; init_completion(&fs_devs->kobj_unregister); fs_devs->fsid_kobj.kset = btrfs_kset; ret = kobject_init_and_add(&fs_devs->fsid_kobj, &btrfs_ktype, NULL, "%pU", fs_devs->fsid); if (ret) { kobject_put(&fs_devs->fsid_kobj); return ret; } fs_devs->devices_kobj = kobject_create_and_add("devices", &fs_devs->fsid_kobj); if (!fs_devs->devices_kobj) { btrfs_err(fs_devs->fs_info, "failed to init sysfs device interface"); btrfs_sysfs_remove_fsid(fs_devs); return -ENOMEM; } fs_devs->devinfo_kobj = kobject_create_and_add("devinfo", &fs_devs->fsid_kobj); if (!fs_devs->devinfo_kobj) { btrfs_err(fs_devs->fs_info, "failed to init sysfs devinfo kobject"); btrfs_sysfs_remove_fsid(fs_devs); return -ENOMEM; } return 0; } int btrfs_sysfs_add_mounted(struct btrfs_fs_info *fs_info) { int ret; struct btrfs_fs_devices *fs_devs = fs_info->fs_devices; struct kobject *fsid_kobj = &fs_devs->fsid_kobj; ret = btrfs_sysfs_add_fs_devices(fs_devs); if (ret) return ret; ret = sysfs_create_files(fsid_kobj, btrfs_attrs); if (ret) { btrfs_sysfs_remove_fs_devices(fs_devs); return ret; } ret = sysfs_create_group(fsid_kobj, &btrfs_feature_attr_group); if (ret) goto failure; #ifdef CONFIG_BTRFS_DEBUG fs_info->debug_kobj = kobject_create_and_add("debug", fsid_kobj); if (!fs_info->debug_kobj) { ret = -ENOMEM; goto failure; } ret = sysfs_create_files(fs_info->debug_kobj, btrfs_debug_mount_attrs); if (ret) goto failure; #endif /* Discard directory */ fs_info->discard_kobj = kobject_create_and_add("discard", fsid_kobj); if (!fs_info->discard_kobj) { ret = -ENOMEM; goto failure; } ret = sysfs_create_files(fs_info->discard_kobj, discard_attrs); if (ret) goto failure; ret = addrm_unknown_feature_attrs(fs_info, true); if (ret) goto failure; ret = sysfs_create_link(fsid_kobj, &fs_info->sb->s_bdi->dev->kobj, "bdi"); if (ret) goto failure; fs_info->space_info_kobj = kobject_create_and_add("allocation", fsid_kobj); if (!fs_info->space_info_kobj) { ret = -ENOMEM; goto failure; } ret = sysfs_create_files(fs_info->space_info_kobj, allocation_attrs); if (ret) goto failure; return 0; failure: btrfs_sysfs_remove_mounted(fs_info); return ret; } static ssize_t qgroup_enabled_show(struct kobject *qgroups_kobj, struct kobj_attribute *a, char *buf) { struct btrfs_fs_info *fs_info = to_fs_info(qgroups_kobj->parent); bool enabled; spin_lock(&fs_info->qgroup_lock); enabled = fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_ON; spin_unlock(&fs_info->qgroup_lock); return sysfs_emit(buf, "%d\n", enabled); } BTRFS_ATTR(qgroups, enabled, qgroup_enabled_show); static ssize_t qgroup_mode_show(struct kobject *qgroups_kobj, struct kobj_attribute *a, char *buf) { struct btrfs_fs_info *fs_info = to_fs_info(qgroups_kobj->parent); ssize_t ret = 0; spin_lock(&fs_info->qgroup_lock); ASSERT(btrfs_qgroup_enabled(fs_info)); switch (btrfs_qgroup_mode(fs_info)) { case BTRFS_QGROUP_MODE_FULL: ret = sysfs_emit(buf, "qgroup\n"); break; case BTRFS_QGROUP_MODE_SIMPLE: ret = sysfs_emit(buf, "squota\n"); break; default: btrfs_warn(fs_info, "unexpected qgroup mode %d\n", btrfs_qgroup_mode(fs_info)); break; } spin_unlock(&fs_info->qgroup_lock); return ret; } BTRFS_ATTR(qgroups, mode, qgroup_mode_show); static ssize_t qgroup_inconsistent_show(struct kobject *qgroups_kobj, struct kobj_attribute *a, char *buf) { struct btrfs_fs_info *fs_info = to_fs_info(qgroups_kobj->parent); bool inconsistent; spin_lock(&fs_info->qgroup_lock); inconsistent = (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT); spin_unlock(&fs_info->qgroup_lock); return sysfs_emit(buf, "%d\n", inconsistent); } BTRFS_ATTR(qgroups, inconsistent, qgroup_inconsistent_show); static ssize_t qgroup_drop_subtree_thres_show(struct kobject *qgroups_kobj, struct kobj_attribute *a, char *buf) { struct btrfs_fs_info *fs_info = to_fs_info(qgroups_kobj->parent); u8 result; spin_lock(&fs_info->qgroup_lock); result = fs_info->qgroup_drop_subtree_thres; spin_unlock(&fs_info->qgroup_lock); return sysfs_emit(buf, "%d\n", result); } static ssize_t qgroup_drop_subtree_thres_store(struct kobject *qgroups_kobj, struct kobj_attribute *a, const char *buf, size_t len) { struct btrfs_fs_info *fs_info = to_fs_info(qgroups_kobj->parent); u8 new_thres; int ret; ret = kstrtou8(buf, 10, &new_thres); if (ret) return -EINVAL; if (new_thres > BTRFS_MAX_LEVEL) return -EINVAL; spin_lock(&fs_info->qgroup_lock); fs_info->qgroup_drop_subtree_thres = new_thres; spin_unlock(&fs_info->qgroup_lock); return len; } BTRFS_ATTR_RW(qgroups, drop_subtree_threshold, qgroup_drop_subtree_thres_show, qgroup_drop_subtree_thres_store); /* * Qgroups global info * * Path: /sys/fs/btrfs/<uuid>/qgroups/ */ static struct attribute *qgroups_attrs[] = { BTRFS_ATTR_PTR(qgroups, enabled), BTRFS_ATTR_PTR(qgroups, inconsistent), BTRFS_ATTR_PTR(qgroups, drop_subtree_threshold), BTRFS_ATTR_PTR(qgroups, mode), NULL }; ATTRIBUTE_GROUPS(qgroups); static void qgroups_release(struct kobject *kobj) { kfree(kobj); } static const struct kobj_type qgroups_ktype = { .sysfs_ops = &kobj_sysfs_ops, .default_groups = qgroups_groups, .release = qgroups_release, }; static inline struct btrfs_fs_info *qgroup_kobj_to_fs_info(struct kobject *kobj) { return to_fs_info(kobj->parent->parent); } #define QGROUP_ATTR(_member, _show_name) \ static ssize_t btrfs_qgroup_show_##_member(struct kobject *qgroup_kobj, \ struct kobj_attribute *a, \ char *buf) \ { \ struct btrfs_fs_info *fs_info = qgroup_kobj_to_fs_info(qgroup_kobj); \ struct btrfs_qgroup *qgroup = container_of(qgroup_kobj, \ struct btrfs_qgroup, kobj); \ return btrfs_show_u64(&qgroup->_member, &fs_info->qgroup_lock, buf); \ } \ BTRFS_ATTR(qgroup, _show_name, btrfs_qgroup_show_##_member) #define QGROUP_RSV_ATTR(_name, _type) \ static ssize_t btrfs_qgroup_rsv_show_##_name(struct kobject *qgroup_kobj, \ struct kobj_attribute *a, \ char *buf) \ { \ struct btrfs_fs_info *fs_info = qgroup_kobj_to_fs_info(qgroup_kobj); \ struct btrfs_qgroup *qgroup = container_of(qgroup_kobj, \ struct btrfs_qgroup, kobj); \ return btrfs_show_u64(&qgroup->rsv.values[_type], \ &fs_info->qgroup_lock, buf); \ } \ BTRFS_ATTR(qgroup, rsv_##_name, btrfs_qgroup_rsv_show_##_name) QGROUP_ATTR(rfer, referenced); QGROUP_ATTR(excl, exclusive); QGROUP_ATTR(max_rfer, max_referenced); QGROUP_ATTR(max_excl, max_exclusive); QGROUP_ATTR(lim_flags, limit_flags); QGROUP_RSV_ATTR(data, BTRFS_QGROUP_RSV_DATA); QGROUP_RSV_ATTR(meta_pertrans, BTRFS_QGROUP_RSV_META_PERTRANS); QGROUP_RSV_ATTR(meta_prealloc, BTRFS_QGROUP_RSV_META_PREALLOC); /* * Qgroup information. * * Path: /sys/fs/btrfs/<uuid>/qgroups/<level>_<qgroupid>/ */ static struct attribute *qgroup_attrs[] = { BTRFS_ATTR_PTR(qgroup, referenced), BTRFS_ATTR_PTR(qgroup, exclusive), BTRFS_ATTR_PTR(qgroup, max_referenced), BTRFS_ATTR_PTR(qgroup, max_exclusive), BTRFS_ATTR_PTR(qgroup, limit_flags), BTRFS_ATTR_PTR(qgroup, rsv_data), BTRFS_ATTR_PTR(qgroup, rsv_meta_pertrans), BTRFS_ATTR_PTR(qgroup, rsv_meta_prealloc), NULL }; ATTRIBUTE_GROUPS(qgroup); static void qgroup_release(struct kobject *kobj) { struct btrfs_qgroup *qgroup = container_of(kobj, struct btrfs_qgroup, kobj); memset(&qgroup->kobj, 0, sizeof(*kobj)); } static const struct kobj_type qgroup_ktype = { .sysfs_ops = &kobj_sysfs_ops, .release = qgroup_release, .default_groups = qgroup_groups, }; int btrfs_sysfs_add_one_qgroup(struct btrfs_fs_info *fs_info, struct btrfs_qgroup *qgroup) { struct kobject *qgroups_kobj = fs_info->qgroups_kobj; int ret; if (btrfs_is_testing(fs_info)) return 0; if (qgroup->kobj.state_initialized) return 0; if (!qgroups_kobj) return -EINVAL; ret = kobject_init_and_add(&qgroup->kobj, &qgroup_ktype, qgroups_kobj, "%hu_%llu", btrfs_qgroup_level(qgroup->qgroupid), btrfs_qgroup_subvolid(qgroup->qgroupid)); if (ret < 0) kobject_put(&qgroup->kobj); return ret; } void btrfs_sysfs_del_qgroups(struct btrfs_fs_info *fs_info) { struct btrfs_qgroup *qgroup; struct btrfs_qgroup *next; if (btrfs_is_testing(fs_info)) return; rbtree_postorder_for_each_entry_safe(qgroup, next, &fs_info->qgroup_tree, node) btrfs_sysfs_del_one_qgroup(fs_info, qgroup); if (fs_info->qgroups_kobj) { kobject_del(fs_info->qgroups_kobj); kobject_put(fs_info->qgroups_kobj); fs_info->qgroups_kobj = NULL; } } /* Called when qgroups get initialized, thus there is no need for locking */ int btrfs_sysfs_add_qgroups(struct btrfs_fs_info *fs_info) { struct kobject *fsid_kobj = &fs_info->fs_devices->fsid_kobj; struct btrfs_qgroup *qgroup; struct btrfs_qgroup *next; int ret = 0; if (btrfs_is_testing(fs_info)) return 0; ASSERT(fsid_kobj); if (fs_info->qgroups_kobj) return 0; fs_info->qgroups_kobj = kzalloc(sizeof(struct kobject), GFP_KERNEL); if (!fs_info->qgroups_kobj) return -ENOMEM; ret = kobject_init_and_add(fs_info->qgroups_kobj, &qgroups_ktype, fsid_kobj, "qgroups"); if (ret < 0) goto out; rbtree_postorder_for_each_entry_safe(qgroup, next, &fs_info->qgroup_tree, node) { ret = btrfs_sysfs_add_one_qgroup(fs_info, qgroup); if (ret < 0) goto out; } out: if (ret < 0) btrfs_sysfs_del_qgroups(fs_info); return ret; } void btrfs_sysfs_del_one_qgroup(struct btrfs_fs_info *fs_info, struct btrfs_qgroup *qgroup) { if (btrfs_is_testing(fs_info)) return; if (qgroup->kobj.state_initialized) { kobject_del(&qgroup->kobj); kobject_put(&qgroup->kobj); } } /* * Change per-fs features in /sys/fs/btrfs/UUID/features to match current * values in superblock. Call after any changes to incompat/compat_ro flags */ void btrfs_sysfs_feature_update(struct btrfs_fs_info *fs_info) { struct kobject *fsid_kobj; int ret; if (!fs_info) return; fsid_kobj = &fs_info->fs_devices->fsid_kobj; if (!fsid_kobj->state_initialized) return; ret = sysfs_update_group(fsid_kobj, &btrfs_feature_attr_group); if (ret < 0) btrfs_warn(fs_info, "failed to update /sys/fs/btrfs/%pU/features: %d", fs_info->fs_devices->fsid, ret); } int __init btrfs_init_sysfs(void) { int ret; btrfs_kset = kset_create_and_add("btrfs", NULL, fs_kobj); if (!btrfs_kset) return -ENOMEM; init_feature_attrs(); ret = sysfs_create_group(&btrfs_kset->kobj, &btrfs_feature_attr_group); if (ret) goto out2; ret = sysfs_merge_group(&btrfs_kset->kobj, &btrfs_static_feature_attr_group); if (ret) goto out_remove_group; #ifdef CONFIG_BTRFS_DEBUG ret = sysfs_create_group(&btrfs_kset->kobj, &btrfs_debug_feature_attr_group); if (ret) { sysfs_unmerge_group(&btrfs_kset->kobj, &btrfs_static_feature_attr_group); goto out_remove_group; } #endif return 0; out_remove_group: sysfs_remove_group(&btrfs_kset->kobj, &btrfs_feature_attr_group); out2: kset_unregister(btrfs_kset); return ret; } void __cold btrfs_exit_sysfs(void) { sysfs_unmerge_group(&btrfs_kset->kobj, &btrfs_static_feature_attr_group); sysfs_remove_group(&btrfs_kset->kobj, &btrfs_feature_attr_group); #ifdef CONFIG_BTRFS_DEBUG sysfs_remove_group(&btrfs_kset->kobj, &btrfs_debug_feature_attr_group); #endif kset_unregister(btrfs_kset); } |
| 2 2 2 2 2 2 2 2 2 2 2 2 3 2 3 2 2 3 2 2 2 2 2 2 2 3 3 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 | // SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar * Copyright (C) 2005-2006, Thomas Gleixner, Russell King * * This file contains the core interrupt handling code, for irq-chip based * architectures. Detailed information is available in * Documentation/core-api/genericirq.rst */ #include <linux/irq.h> #include <linux/msi.h> #include <linux/module.h> #include <linux/interrupt.h> #include <linux/kernel_stat.h> #include <linux/irqdomain.h> #include <trace/events/irq.h> #include "internals.h" static irqreturn_t bad_chained_irq(int irq, void *dev_id) { WARN_ONCE(1, "Chained irq %d should not call an action\n", irq); return IRQ_NONE; } /* * Chained handlers should never call action on their IRQ. This default * action will emit warning if such thing happens. */ struct irqaction chained_action = { .handler = bad_chained_irq, }; /** * irq_set_chip - set the irq chip for an irq * @irq: irq number * @chip: pointer to irq chip description structure */ int irq_set_chip(unsigned int irq, const struct irq_chip *chip) { int ret = -EINVAL; scoped_irqdesc_get_and_lock(irq, 0) { scoped_irqdesc->irq_data.chip = (struct irq_chip *)(chip ?: &no_irq_chip); ret = 0; } /* For !CONFIG_SPARSE_IRQ make the irq show up in allocated_irqs. */ if (!ret) irq_mark_irq(irq); return ret; } EXPORT_SYMBOL(irq_set_chip); /** * irq_set_irq_type - set the irq trigger type for an irq * @irq: irq number * @type: IRQ_TYPE_{LEVEL,EDGE}_* value - see include/linux/irq.h */ int irq_set_irq_type(unsigned int irq, unsigned int type) { scoped_irqdesc_get_and_buslock(irq, IRQ_GET_DESC_CHECK_GLOBAL) return __irq_set_trigger(scoped_irqdesc, type); return -EINVAL; } EXPORT_SYMBOL(irq_set_irq_type); /** * irq_set_handler_data - set irq handler data for an irq * @irq: Interrupt number * @data: Pointer to interrupt specific data * * Set the hardware irq controller data for an irq */ int irq_set_handler_data(unsigned int irq, void *data) { scoped_irqdesc_get_and_lock(irq, 0) { scoped_irqdesc->irq_common_data.handler_data = data; return 0; } return -EINVAL; } EXPORT_SYMBOL(irq_set_handler_data); /** * irq_set_msi_desc_off - set MSI descriptor data for an irq at offset * @irq_base: Interrupt number base * @irq_offset: Interrupt number offset * @entry: Pointer to MSI descriptor data * * Set the MSI descriptor entry for an irq at offset */ int irq_set_msi_desc_off(unsigned int irq_base, unsigned int irq_offset, struct msi_desc *entry) { scoped_irqdesc_get_and_lock(irq_base + irq_offset, IRQ_GET_DESC_CHECK_GLOBAL) { scoped_irqdesc->irq_common_data.msi_desc = entry; if (entry && !irq_offset) entry->irq = irq_base; return 0; } return -EINVAL; } /** * irq_set_msi_desc - set MSI descriptor data for an irq * @irq: Interrupt number * @entry: Pointer to MSI descriptor data * * Set the MSI descriptor entry for an irq */ int irq_set_msi_desc(unsigned int irq, struct msi_desc *entry) { return irq_set_msi_desc_off(irq, 0, entry); } /** * irq_set_chip_data - set irq chip data for an irq * @irq: Interrupt number * @data: Pointer to chip specific data * * Set the hardware irq chip data for an irq */ int irq_set_chip_data(unsigned int irq, void *data) { scoped_irqdesc_get_and_lock(irq, 0) { scoped_irqdesc->irq_data.chip_data = data; return 0; } return -EINVAL; } EXPORT_SYMBOL(irq_set_chip_data); struct irq_data *irq_get_irq_data(unsigned int irq) { struct irq_desc *desc = irq_to_desc(irq); return desc ? &desc->irq_data : NULL; } EXPORT_SYMBOL_GPL(irq_get_irq_data); static void irq_state_clr_disabled(struct irq_desc *desc) { irqd_clear(&desc->irq_data, IRQD_IRQ_DISABLED); } static void irq_state_clr_masked(struct irq_desc *desc) { irqd_clear(&desc->irq_data, IRQD_IRQ_MASKED); } static void irq_state_clr_started(struct irq_desc *desc) { irqd_clear(&desc->irq_data, IRQD_IRQ_STARTED); } static void irq_state_set_started(struct irq_desc *desc) { irqd_set(&desc->irq_data, IRQD_IRQ_STARTED); } enum { IRQ_STARTUP_NORMAL, IRQ_STARTUP_MANAGED, IRQ_STARTUP_ABORT, }; #ifdef CONFIG_SMP static int __irq_startup_managed(struct irq_desc *desc, const struct cpumask *aff, bool force) { struct irq_data *d = irq_desc_get_irq_data(desc); if (!irqd_affinity_is_managed(d)) return IRQ_STARTUP_NORMAL; irqd_clr_managed_shutdown(d); if (!cpumask_intersects(aff, cpu_online_mask)) { /* * Catch code which fiddles with enable_irq() on a managed * and potentially shutdown IRQ. Chained interrupt * installment or irq auto probing should not happen on * managed irqs either. */ if (WARN_ON_ONCE(force)) return IRQ_STARTUP_ABORT; /* * The interrupt was requested, but there is no online CPU * in it's affinity mask. Put it into managed shutdown * state and let the cpu hotplug mechanism start it up once * a CPU in the mask becomes available. */ return IRQ_STARTUP_ABORT; } /* * Managed interrupts have reserved resources, so this should not * happen. */ if (WARN_ON(irq_domain_activate_irq(d, false))) return IRQ_STARTUP_ABORT; return IRQ_STARTUP_MANAGED; } void irq_startup_managed(struct irq_desc *desc) { struct irq_data *d = irq_desc_get_irq_data(desc); /* * Clear managed-shutdown flag, so we don't repeat managed-startup for * multiple hotplugs, and cause imbalanced disable depth. */ irqd_clr_managed_shutdown(d); /* * Only start it up when the disable depth is 1, so that a disable, * hotunplug, hotplug sequence does not end up enabling it during * hotplug unconditionally. */ desc->depth--; if (!desc->depth) irq_startup(desc, IRQ_RESEND, IRQ_START_COND); } #else static __always_inline int __irq_startup_managed(struct irq_desc *desc, const struct cpumask *aff, bool force) { return IRQ_STARTUP_NORMAL; } #endif static void irq_enable(struct irq_desc *desc) { if (!irqd_irq_disabled(&desc->irq_data)) { unmask_irq(desc); } else { irq_state_clr_disabled(desc); if (desc->irq_data.chip->irq_enable) { desc->irq_data.chip->irq_enable(&desc->irq_data); irq_state_clr_masked(desc); } else { unmask_irq(desc); } } } static int __irq_startup(struct irq_desc *desc) { struct irq_data *d = irq_desc_get_irq_data(desc); int ret = 0; /* Warn if this interrupt is not activated but try nevertheless */ WARN_ON_ONCE(!irqd_is_activated(d)); if (d->chip->irq_startup) { ret = d->chip->irq_startup(d); irq_state_clr_disabled(desc); irq_state_clr_masked(desc); } else { irq_enable(desc); } irq_state_set_started(desc); return ret; } int irq_startup(struct irq_desc *desc, bool resend, bool force) { struct irq_data *d = irq_desc_get_irq_data(desc); const struct cpumask *aff = irq_data_get_affinity_mask(d); int ret = 0; desc->depth = 0; if (irqd_is_started(d)) { irq_enable(desc); } else { switch (__irq_startup_managed(desc, aff, force)) { case IRQ_STARTUP_NORMAL: if (d->chip->flags & IRQCHIP_AFFINITY_PRE_STARTUP) irq_setup_affinity(desc); ret = __irq_startup(desc); if (!(d->chip->flags & IRQCHIP_AFFINITY_PRE_STARTUP)) irq_setup_affinity(desc); break; case IRQ_STARTUP_MANAGED: irq_do_set_affinity(d, aff, false); ret = __irq_startup(desc); break; case IRQ_STARTUP_ABORT: desc->depth = 1; irqd_set_managed_shutdown(d); return 0; } } if (resend) check_irq_resend(desc, false); return ret; } int irq_activate(struct irq_desc *desc) { struct irq_data *d = irq_desc_get_irq_data(desc); if (!irqd_affinity_is_managed(d)) return irq_domain_activate_irq(d, false); return 0; } int irq_activate_and_startup(struct irq_desc *desc, bool resend) { if (WARN_ON(irq_activate(desc))) return 0; return irq_startup(desc, resend, IRQ_START_FORCE); } static void __irq_disable(struct irq_desc *desc, bool mask); void irq_shutdown(struct irq_desc *desc) { if (irqd_is_started(&desc->irq_data)) { clear_irq_resend(desc); /* * Increment disable depth, so that a managed shutdown on * CPU hotunplug preserves the actual disabled state when the * CPU comes back online. See irq_startup_managed(). */ desc->depth++; if (desc->irq_data.chip->irq_shutdown) { desc->irq_data.chip->irq_shutdown(&desc->irq_data); irq_state_set_disabled(desc); irq_state_set_masked(desc); } else { __irq_disable(desc, true); } irq_state_clr_started(desc); } } void irq_shutdown_and_deactivate(struct irq_desc *desc) { irq_shutdown(desc); /* * This must be called even if the interrupt was never started up, * because the activation can happen before the interrupt is * available for request/startup. It has it's own state tracking so * it's safe to call it unconditionally. */ irq_domain_deactivate_irq(&desc->irq_data); } static void __irq_disable(struct irq_desc *desc, bool mask) { if (irqd_irq_disabled(&desc->irq_data)) { if (mask) mask_irq(desc); } else { irq_state_set_disabled(desc); if (desc->irq_data.chip->irq_disable) { desc->irq_data.chip->irq_disable(&desc->irq_data); irq_state_set_masked(desc); } else if (mask) { mask_irq(desc); } } } /** * irq_disable - Mark interrupt disabled * @desc: irq descriptor which should be disabled * * If the chip does not implement the irq_disable callback, we * use a lazy disable approach. That means we mark the interrupt * disabled, but leave the hardware unmasked. That's an * optimization because we avoid the hardware access for the * common case where no interrupt happens after we marked it * disabled. If an interrupt happens, then the interrupt flow * handler masks the line at the hardware level and marks it * pending. * * If the interrupt chip does not implement the irq_disable callback, * a driver can disable the lazy approach for a particular irq line by * calling 'irq_set_status_flags(irq, IRQ_DISABLE_UNLAZY)'. This can * be used for devices which cannot disable the interrupt at the * device level under certain circumstances and have to use * disable_irq[_nosync] instead. */ void irq_disable(struct irq_desc *desc) { __irq_disable(desc, irq_settings_disable_unlazy(desc)); } void irq_percpu_enable(struct irq_desc *desc, unsigned int cpu) { if (desc->irq_data.chip->irq_enable) desc->irq_data.chip->irq_enable(&desc->irq_data); else desc->irq_data.chip->irq_unmask(&desc->irq_data); cpumask_set_cpu(cpu, desc->percpu_enabled); } void irq_percpu_disable(struct irq_desc *desc, unsigned int cpu) { if (desc->irq_data.chip->irq_disable) desc->irq_data.chip->irq_disable(&desc->irq_data); else desc->irq_data.chip->irq_mask(&desc->irq_data); cpumask_clear_cpu(cpu, desc->percpu_enabled); } static inline void mask_ack_irq(struct irq_desc *desc) { if (desc->irq_data.chip->irq_mask_ack) { desc->irq_data.chip->irq_mask_ack(&desc->irq_data); irq_state_set_masked(desc); } else { mask_irq(desc); if (desc->irq_data.chip->irq_ack) desc->irq_data.chip->irq_ack(&desc->irq_data); } } void mask_irq(struct irq_desc *desc) { if (irqd_irq_masked(&desc->irq_data)) return; if (desc->irq_data.chip->irq_mask) { desc->irq_data.chip->irq_mask(&desc->irq_data); irq_state_set_masked(desc); } } void unmask_irq(struct irq_desc *desc) { if (!irqd_irq_masked(&desc->irq_data)) return; if (desc->irq_data.chip->irq_unmask) { desc->irq_data.chip->irq_unmask(&desc->irq_data); irq_state_clr_masked(desc); } } void unmask_threaded_irq(struct irq_desc *desc) { struct irq_chip *chip = desc->irq_data.chip; if (chip->flags & IRQCHIP_EOI_THREADED) chip->irq_eoi(&desc->irq_data); unmask_irq(desc); } /* Busy wait until INPROGRESS is cleared */ static bool irq_wait_on_inprogress(struct irq_desc *desc) { if (IS_ENABLED(CONFIG_SMP)) { do { raw_spin_unlock(&desc->lock); while (irqd_irq_inprogress(&desc->irq_data)) cpu_relax(); raw_spin_lock(&desc->lock); } while (irqd_irq_inprogress(&desc->irq_data)); /* Might have been disabled in meantime */ return !irqd_irq_disabled(&desc->irq_data) && desc->action; } return false; } static bool irq_can_handle_pm(struct irq_desc *desc) { struct irq_data *irqd = &desc->irq_data; const struct cpumask *aff; /* * If the interrupt is not in progress and is not an armed * wakeup interrupt, proceed. */ if (!irqd_has_set(irqd, IRQD_IRQ_INPROGRESS | IRQD_WAKEUP_ARMED)) return true; /* * If the interrupt is an armed wakeup source, mark it pending * and suspended, disable it and notify the pm core about the * event. */ if (unlikely(irqd_has_set(irqd, IRQD_WAKEUP_ARMED))) { irq_pm_handle_wakeup(desc); return false; } /* Check whether the interrupt is polled on another CPU */ if (unlikely(desc->istate & IRQS_POLL_INPROGRESS)) { if (WARN_ONCE(irq_poll_cpu == smp_processor_id(), "irq poll in progress on cpu %d for irq %d\n", smp_processor_id(), desc->irq_data.irq)) return false; return irq_wait_on_inprogress(desc); } /* The below works only for single target interrupts */ if (!IS_ENABLED(CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK) || !irqd_is_single_target(irqd) || desc->handle_irq != handle_edge_irq) return false; /* * If the interrupt affinity was moved to this CPU and the * interrupt is currently handled on the previous target CPU, then * busy wait for INPROGRESS to be cleared. Otherwise for edge type * interrupts the handler might get stuck on the previous target: * * CPU 0 CPU 1 (new target) * handle_edge_irq() * repeat: * handle_event() handle_edge_irq() * if (INPROGESS) { * set(PENDING); * mask(); * return; * } * if (PENDING) { * clear(PENDING); * unmask(); * goto repeat; * } * * This happens when the device raises interrupts with a high rate * and always before handle_event() completes and the CPU0 handler * can clear INPROGRESS. This has been observed in virtual machines. */ aff = irq_data_get_effective_affinity_mask(irqd); if (cpumask_first(aff) != smp_processor_id()) return false; return irq_wait_on_inprogress(desc); } static inline bool irq_can_handle_actions(struct irq_desc *desc) { desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) { desc->istate |= IRQS_PENDING; return false; } return true; } static inline bool irq_can_handle(struct irq_desc *desc) { if (!irq_can_handle_pm(desc)) return false; return irq_can_handle_actions(desc); } /** * handle_nested_irq - Handle a nested irq from a irq thread * @irq: the interrupt number * * Handle interrupts which are nested into a threaded interrupt * handler. The handler function is called inside the calling threads * context. */ void handle_nested_irq(unsigned int irq) { struct irq_desc *desc = irq_to_desc(irq); struct irqaction *action; irqreturn_t action_ret; might_sleep(); scoped_guard(raw_spinlock_irq, &desc->lock) { if (!irq_can_handle_actions(desc)) return; action = desc->action; kstat_incr_irqs_this_cpu(desc); atomic_inc(&desc->threads_active); } action_ret = IRQ_NONE; for_each_action_of_desc(desc, action) action_ret |= action->thread_fn(action->irq, action->dev_id); if (!irq_settings_no_debug(desc)) note_interrupt(desc, action_ret); wake_threads_waitq(desc); } EXPORT_SYMBOL_GPL(handle_nested_irq); /** * handle_simple_irq - Simple and software-decoded IRQs. * @desc: the interrupt description structure for this irq * * Simple interrupts are either sent from a demultiplexing interrupt * handler or come from hardware, where no interrupt hardware control is * necessary. * * Note: The caller is expected to handle the ack, clear, mask and unmask * issues if necessary. */ void handle_simple_irq(struct irq_desc *desc) { guard(raw_spinlock)(&desc->lock); if (!irq_can_handle_pm(desc)) { if (irqd_needs_resend_when_in_progress(&desc->irq_data)) desc->istate |= IRQS_PENDING; return; } if (!irq_can_handle_actions(desc)) return; kstat_incr_irqs_this_cpu(desc); handle_irq_event(desc); } EXPORT_SYMBOL_GPL(handle_simple_irq); /** * handle_untracked_irq - Simple and software-decoded IRQs. * @desc: the interrupt description structure for this irq * * Untracked interrupts are sent from a demultiplexing interrupt handler * when the demultiplexer does not know which device it its multiplexed irq * domain generated the interrupt. IRQ's handled through here are not * subjected to stats tracking, randomness, or spurious interrupt * detection. * * Note: Like handle_simple_irq, the caller is expected to handle the ack, * clear, mask and unmask issues if necessary. */ void handle_untracked_irq(struct irq_desc *desc) { scoped_guard(raw_spinlock, &desc->lock) { if (!irq_can_handle(desc)) return; desc->istate &= ~IRQS_PENDING; irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS); } __handle_irq_event_percpu(desc); scoped_guard(raw_spinlock, &desc->lock) irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS); } EXPORT_SYMBOL_GPL(handle_untracked_irq); /* * Called unconditionally from handle_level_irq() and only for oneshot * interrupts from handle_fasteoi_irq() */ static void cond_unmask_irq(struct irq_desc *desc) { /* * We need to unmask in the following cases: * - Standard level irq (IRQF_ONESHOT is not set) * - Oneshot irq which did not wake the thread (caused by a * spurious interrupt or a primary handler handling it * completely). */ if (!irqd_irq_disabled(&desc->irq_data) && irqd_irq_masked(&desc->irq_data) && !desc->threads_oneshot) unmask_irq(desc); } /** * handle_level_irq - Level type irq handler * @desc: the interrupt description structure for this irq * * Level type interrupts are active as long as the hardware line has the * active level. This may require to mask the interrupt and unmask it after * the associated handler has acknowledged the device, so the interrupt * line is back to inactive. */ void handle_level_irq(struct irq_desc *desc) { guard(raw_spinlock)(&desc->lock); mask_ack_irq(desc); if (!irq_can_handle(desc)) return; kstat_incr_irqs_this_cpu(desc); handle_irq_event(desc); cond_unmask_irq(desc); } EXPORT_SYMBOL_GPL(handle_level_irq); static void cond_unmask_eoi_irq(struct irq_desc *desc, struct irq_chip *chip) { if (!(desc->istate & IRQS_ONESHOT)) { chip->irq_eoi(&desc->irq_data); return; } /* * We need to unmask in the following cases: * - Oneshot irq which did not wake the thread (caused by a * spurious interrupt or a primary handler handling it * completely). */ if (!irqd_irq_disabled(&desc->irq_data) && irqd_irq_masked(&desc->irq_data) && !desc->threads_oneshot) { chip->irq_eoi(&desc->irq_data); unmask_irq(desc); } else if (!(chip->flags & IRQCHIP_EOI_THREADED)) { chip->irq_eoi(&desc->irq_data); } } static inline void cond_eoi_irq(struct irq_chip *chip, struct irq_data *data) { if (!(chip->flags & IRQCHIP_EOI_IF_HANDLED)) chip->irq_eoi(data); } /** * handle_fasteoi_irq - irq handler for transparent controllers * @desc: the interrupt description structure for this irq * * Only a single callback will be issued to the chip: an ->eoi() call when * the interrupt has been serviced. This enables support for modern forms * of interrupt handlers, which handle the flow details in hardware, * transparently. */ void handle_fasteoi_irq(struct irq_desc *desc) { struct irq_chip *chip = desc->irq_data.chip; guard(raw_spinlock)(&desc->lock); /* * When an affinity change races with IRQ handling, the next interrupt * can arrive on the new CPU before the original CPU has completed * handling the previous one - it may need to be resent. */ if (!irq_can_handle_pm(desc)) { if (irqd_needs_resend_when_in_progress(&desc->irq_data)) desc->istate |= IRQS_PENDING; cond_eoi_irq(chip, &desc->irq_data); return; } if (!irq_can_handle_actions(desc)) { mask_irq(desc); cond_eoi_irq(chip, &desc->irq_data); return; } kstat_incr_irqs_this_cpu(desc); if (desc->istate & IRQS_ONESHOT) mask_irq(desc); handle_irq_event(desc); cond_unmask_eoi_irq(desc, chip); /* * When the race described above happens this will resend the interrupt. */ if (unlikely(desc->istate & IRQS_PENDING)) check_irq_resend(desc, false); } EXPORT_SYMBOL_GPL(handle_fasteoi_irq); /** * handle_fasteoi_nmi - irq handler for NMI interrupt lines * @desc: the interrupt description structure for this irq * * A simple NMI-safe handler, considering the restrictions * from request_nmi. * * Only a single callback will be issued to the chip: an ->eoi() * call when the interrupt has been serviced. This enables support * for modern forms of interrupt handlers, which handle the flow * details in hardware, transparently. */ void handle_fasteoi_nmi(struct irq_desc *desc) { struct irq_chip *chip = irq_desc_get_chip(desc); struct irqaction *action = desc->action; unsigned int irq = irq_desc_get_irq(desc); irqreturn_t res; __kstat_incr_irqs_this_cpu(desc); trace_irq_handler_entry(irq, action); /* * NMIs cannot be shared, there is only one action. */ res = action->handler(irq, action->dev_id); trace_irq_handler_exit(irq, action, res); if (chip->irq_eoi) chip->irq_eoi(&desc->irq_data); } EXPORT_SYMBOL_GPL(handle_fasteoi_nmi); /** * handle_edge_irq - edge type IRQ handler * @desc: the interrupt description structure for this irq * * Interrupt occurs on the falling and/or rising edge of a hardware * signal. The occurrence is latched into the irq controller hardware and * must be acked in order to be reenabled. After the ack another interrupt * can happen on the same source even before the first one is handled by * the associated event handler. If this happens it might be necessary to * disable (mask) the interrupt depending on the controller hardware. This * requires to reenable the interrupt inside of the loop which handles the * interrupts which have arrived while the handler was running. If all * pending interrupts are handled, the loop is left. */ void handle_edge_irq(struct irq_desc *desc) { guard(raw_spinlock)(&desc->lock); if (!irq_can_handle(desc)) { desc->istate |= IRQS_PENDING; mask_ack_irq(desc); return; } kstat_incr_irqs_this_cpu(desc); /* Start handling the irq */ desc->irq_data.chip->irq_ack(&desc->irq_data); do { if (unlikely(!desc->action)) { mask_irq(desc); return; } /* * When another irq arrived while we were handling * one, we could have masked the irq. * Reenable it, if it was not disabled in meantime. */ if (unlikely(desc->istate & IRQS_PENDING)) { if (!irqd_irq_disabled(&desc->irq_data) && irqd_irq_masked(&desc->irq_data)) unmask_irq(desc); } handle_irq_event(desc); } while ((desc->istate & IRQS_PENDING) && !irqd_irq_disabled(&desc->irq_data)); } EXPORT_SYMBOL(handle_edge_irq); /** * handle_percpu_irq - Per CPU local irq handler * @desc: the interrupt description structure for this irq * * Per CPU interrupts on SMP machines without locking requirements */ void handle_percpu_irq(struct irq_desc *desc) { struct irq_chip *chip = irq_desc_get_chip(desc); /* * PER CPU interrupts are not serialized. Do not touch * desc->tot_count. */ __kstat_incr_irqs_this_cpu(desc); if (chip->irq_ack) chip->irq_ack(&desc->irq_data); handle_irq_event_percpu(desc); if (chip->irq_eoi) chip->irq_eoi(&desc->irq_data); } /** * handle_percpu_devid_irq - Per CPU local irq handler with per cpu dev ids * @desc: the interrupt description structure for this irq * * Per CPU interrupts on SMP machines without locking requirements. Same as * handle_percpu_irq() above but with the following extras: * * action->percpu_dev_id is a pointer to percpu variables which * contain the real device id for the cpu on which this handler is * called */ void handle_percpu_devid_irq(struct irq_desc *desc) { struct irq_chip *chip = irq_desc_get_chip(desc); struct irqaction *action = desc->action; unsigned int irq = irq_desc_get_irq(desc); irqreturn_t res; /* * PER CPU interrupts are not serialized. Do not touch * desc->tot_count. */ __kstat_incr_irqs_this_cpu(desc); if (chip->irq_ack) chip->irq_ack(&desc->irq_data); if (likely(action)) { trace_irq_handler_entry(irq, action); res = action->handler(irq, raw_cpu_ptr(action->percpu_dev_id)); trace_irq_handler_exit(irq, action, res); } else { unsigned int cpu = smp_processor_id(); bool enabled = cpumask_test_cpu(cpu, desc->percpu_enabled); if (enabled) irq_percpu_disable(desc, cpu); pr_err_once("Spurious%s percpu IRQ%u on CPU%u\n", enabled ? " and unmasked" : "", irq, cpu); } if (chip->irq_eoi) chip->irq_eoi(&desc->irq_data); } /** * handle_percpu_devid_fasteoi_nmi - Per CPU local NMI handler with per cpu * dev ids * @desc: the interrupt description structure for this irq * * Similar to handle_fasteoi_nmi, but handling the dev_id cookie * as a percpu pointer. */ void handle_percpu_devid_fasteoi_nmi(struct irq_desc *desc) { struct irq_chip *chip = irq_desc_get_chip(desc); struct irqaction *action = desc->action; unsigned int irq = irq_desc_get_irq(desc); irqreturn_t res; __kstat_incr_irqs_this_cpu(desc); trace_irq_handler_entry(irq, action); res = action->handler(irq, raw_cpu_ptr(action->percpu_dev_id)); trace_irq_handler_exit(irq, action, res); if (chip->irq_eoi) chip->irq_eoi(&desc->irq_data); } static void __irq_do_set_handler(struct irq_desc *desc, irq_flow_handler_t handle, int is_chained, const char *name) { if (!handle) { handle = handle_bad_irq; } else { struct irq_data *irq_data = &desc->irq_data; #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY /* * With hierarchical domains we might run into a * situation where the outermost chip is not yet set * up, but the inner chips are there. Instead of * bailing we install the handler, but obviously we * cannot enable/startup the interrupt at this point. */ while (irq_data) { if (irq_data->chip != &no_irq_chip) break; /* * Bail out if the outer chip is not set up * and the interrupt supposed to be started * right away. */ if (WARN_ON(is_chained)) return; /* Try the parent */ irq_data = irq_data->parent_data; } #endif if (WARN_ON(!irq_data || irq_data->chip == &no_irq_chip)) return; } /* Uninstall? */ if (handle == handle_bad_irq) { if (desc->irq_data.chip != &no_irq_chip) mask_ack_irq(desc); irq_state_set_disabled(desc); if (is_chained) { desc->action = NULL; WARN_ON(irq_chip_pm_put(irq_desc_get_irq_data(desc))); } desc->depth = 1; } desc->handle_irq = handle; desc->name = name; if (handle != handle_bad_irq && is_chained) { unsigned int type = irqd_get_trigger_type(&desc->irq_data); /* * We're about to start this interrupt immediately, * hence the need to set the trigger configuration. * But the .set_type callback may have overridden the * flow handler, ignoring that we're dealing with a * chained interrupt. Reset it immediately because we * do know better. */ if (type != IRQ_TYPE_NONE) { __irq_set_trigger(desc, type); desc->handle_irq = handle; } irq_settings_set_noprobe(desc); irq_settings_set_norequest(desc); irq_settings_set_nothread(desc); desc->action = &chained_action; WARN_ON(irq_chip_pm_get(irq_desc_get_irq_data(desc))); irq_activate_and_startup(desc, IRQ_RESEND); } } void __irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained, const char *name) { scoped_irqdesc_get_and_lock(irq, 0) __irq_do_set_handler(scoped_irqdesc, handle, is_chained, name); } EXPORT_SYMBOL_GPL(__irq_set_handler); void irq_set_chained_handler_and_data(unsigned int irq, irq_flow_handler_t handle, void *data) { scoped_irqdesc_get_and_buslock(irq, 0) { struct irq_desc *desc = scoped_irqdesc; desc->irq_common_data.handler_data = data; __irq_do_set_handler(desc, handle, 1, NULL); } } EXPORT_SYMBOL_GPL(irq_set_chained_handler_and_data); void irq_set_chip_and_handler_name(unsigned int irq, const struct irq_chip *chip, irq_flow_handler_t handle, const char *name) { irq_set_chip(irq, chip); __irq_set_handler(irq, handle, 0, name); } EXPORT_SYMBOL_GPL(irq_set_chip_and_handler_name); void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set) { scoped_irqdesc_get_and_lock(irq, 0) { struct irq_desc *desc = scoped_irqdesc; unsigned long trigger, tmp; /* * Warn when a driver sets the no autoenable flag on an already * active interrupt. */ WARN_ON_ONCE(!desc->depth && (set & _IRQ_NOAUTOEN)); irq_settings_clr_and_set(desc, clr, set); trigger = irqd_get_trigger_type(&desc->irq_data); irqd_clear(&desc->irq_data, IRQD_NO_BALANCING | IRQD_PER_CPU | IRQD_TRIGGER_MASK | IRQD_LEVEL); if (irq_settings_has_no_balance_set(desc)) irqd_set(&desc->irq_data, IRQD_NO_BALANCING); if (irq_settings_is_per_cpu(desc)) irqd_set(&desc->irq_data, IRQD_PER_CPU); if (irq_settings_is_level(desc)) irqd_set(&desc->irq_data, IRQD_LEVEL); tmp = irq_settings_get_trigger_mask(desc); if (tmp != IRQ_TYPE_NONE) trigger = tmp; irqd_set(&desc->irq_data, trigger); } } EXPORT_SYMBOL_GPL(irq_modify_status); #ifdef CONFIG_DEPRECATED_IRQ_CPU_ONOFFLINE /** * irq_cpu_online - Invoke all irq_cpu_online functions. * * Iterate through all irqs and invoke the chip.irq_cpu_online() * for each. */ void irq_cpu_online(void) { unsigned int irq; for_each_active_irq(irq) { struct irq_desc *desc = irq_to_desc(irq); struct irq_chip *chip; if (!desc) continue; guard(raw_spinlock_irqsave)(&desc->lock); chip = irq_data_get_irq_chip(&desc->irq_data); if (chip && chip->irq_cpu_online && (!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) || !irqd_irq_disabled(&desc->irq_data))) chip->irq_cpu_online(&desc->irq_data); } } /** * irq_cpu_offline - Invoke all irq_cpu_offline functions. * * Iterate through all irqs and invoke the chip.irq_cpu_offline() * for each. */ void irq_cpu_offline(void) { unsigned int irq; for_each_active_irq(irq) { struct irq_desc *desc = irq_to_desc(irq); struct irq_chip *chip; if (!desc) continue; guard(raw_spinlock_irqsave)(&desc->lock); chip = irq_data_get_irq_chip(&desc->irq_data); if (chip && chip->irq_cpu_offline && (!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) || !irqd_irq_disabled(&desc->irq_data))) chip->irq_cpu_offline(&desc->irq_data); } } #endif #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY #ifdef CONFIG_IRQ_FASTEOI_HIERARCHY_HANDLERS /** * handle_fasteoi_ack_irq - irq handler for edge hierarchy stacked on * transparent controllers * * @desc: the interrupt description structure for this irq * * Like handle_fasteoi_irq(), but for use with hierarchy where the irq_chip * also needs to have its ->irq_ack() function called. */ void handle_fasteoi_ack_irq(struct irq_desc *desc) { struct irq_chip *chip = desc->irq_data.chip; guard(raw_spinlock)(&desc->lock); if (!irq_can_handle_pm(desc)) { cond_eoi_irq(chip, &desc->irq_data); return; } if (unlikely(!irq_can_handle_actions(desc))) { mask_irq(desc); cond_eoi_irq(chip, &desc->irq_data); return; } kstat_incr_irqs_this_cpu(desc); if (desc->istate & IRQS_ONESHOT) mask_irq(desc); desc->irq_data.chip->irq_ack(&desc->irq_data); handle_irq_event(desc); cond_unmask_eoi_irq(desc, chip); } EXPORT_SYMBOL_GPL(handle_fasteoi_ack_irq); /** * handle_fasteoi_mask_irq - irq handler for level hierarchy stacked on * transparent controllers * * @desc: the interrupt description structure for this irq * * Like handle_fasteoi_irq(), but for use with hierarchy where the irq_chip * also needs to have its ->irq_mask_ack() function called. */ void handle_fasteoi_mask_irq(struct irq_desc *desc) { struct irq_chip *chip = desc->irq_data.chip; guard(raw_spinlock)(&desc->lock); mask_ack_irq(desc); if (!irq_can_handle(desc)) { cond_eoi_irq(chip, &desc->irq_data); return; } kstat_incr_irqs_this_cpu(desc); handle_irq_event(desc); cond_unmask_eoi_irq(desc, chip); } EXPORT_SYMBOL_GPL(handle_fasteoi_mask_irq); #endif /* CONFIG_IRQ_FASTEOI_HIERARCHY_HANDLERS */ /** * irq_chip_set_parent_state - set the state of a parent interrupt. * * @data: Pointer to interrupt specific data * @which: State to be restored (one of IRQCHIP_STATE_*) * @val: Value corresponding to @which * * Conditional success, if the underlying irqchip does not implement it. */ int irq_chip_set_parent_state(struct irq_data *data, enum irqchip_irq_state which, bool val) { data = data->parent_data; if (!data || !data->chip->irq_set_irqchip_state) return 0; return data->chip->irq_set_irqchip_state(data, which, val); } EXPORT_SYMBOL_GPL(irq_chip_set_parent_state); /** * irq_chip_get_parent_state - get the state of a parent interrupt. * * @data: Pointer to interrupt specific data * @which: one of IRQCHIP_STATE_* the caller wants to know * @state: a pointer to a boolean where the state is to be stored * * Conditional success, if the underlying irqchip does not implement it. */ int irq_chip_get_parent_state(struct irq_data *data, enum irqchip_irq_state which, bool *state) { data = data->parent_data; if (!data || !data->chip->irq_get_irqchip_state) return 0; return data->chip->irq_get_irqchip_state(data, which, state); } EXPORT_SYMBOL_GPL(irq_chip_get_parent_state); /** * irq_chip_enable_parent - Enable the parent interrupt (defaults to unmask if * NULL) * @data: Pointer to interrupt specific data */ void irq_chip_enable_parent(struct irq_data *data) { data = data->parent_data; if (data->chip->irq_enable) data->chip->irq_enable(data); else data->chip->irq_unmask(data); } EXPORT_SYMBOL_GPL(irq_chip_enable_parent); /** * irq_chip_disable_parent - Disable the parent interrupt (defaults to mask if * NULL) * @data: Pointer to interrupt specific data */ void irq_chip_disable_parent(struct irq_data *data) { data = data->parent_data; if (data->chip->irq_disable) data->chip->irq_disable(data); else data->chip->irq_mask(data); } EXPORT_SYMBOL_GPL(irq_chip_disable_parent); /** * irq_chip_ack_parent - Acknowledge the parent interrupt * @data: Pointer to interrupt specific data */ void irq_chip_ack_parent(struct irq_data *data) { data = data->parent_data; data->chip->irq_ack(data); } EXPORT_SYMBOL_GPL(irq_chip_ack_parent); /** * irq_chip_mask_parent - Mask the parent interrupt * @data: Pointer to interrupt specific data */ void irq_chip_mask_parent(struct irq_data *data) { data = data->parent_data; data->chip->irq_mask(data); } EXPORT_SYMBOL_GPL(irq_chip_mask_parent); /** * irq_chip_mask_ack_parent - Mask and acknowledge the parent interrupt * @data: Pointer to interrupt specific data */ void irq_chip_mask_ack_parent(struct irq_data *data) { data = data->parent_data; data->chip->irq_mask_ack(data); } EXPORT_SYMBOL_GPL(irq_chip_mask_ack_parent); /** * irq_chip_unmask_parent - Unmask the parent interrupt * @data: Pointer to interrupt specific data */ void irq_chip_unmask_parent(struct irq_data *data) { data = data->parent_data; data->chip->irq_unmask(data); } EXPORT_SYMBOL_GPL(irq_chip_unmask_parent); /** * irq_chip_eoi_parent - Invoke EOI on the parent interrupt * @data: Pointer to interrupt specific data */ void irq_chip_eoi_parent(struct irq_data *data) { data = data->parent_data; data->chip->irq_eoi(data); } EXPORT_SYMBOL_GPL(irq_chip_eoi_parent); /** * irq_chip_set_affinity_parent - Set affinity on the parent interrupt * @data: Pointer to interrupt specific data * @dest: The affinity mask to set * @force: Flag to enforce setting (disable online checks) * * Conditional, as the underlying parent chip might not implement it. */ int irq_chip_set_affinity_parent(struct irq_data *data, const struct cpumask *dest, bool force) { data = data->parent_data; if (data->chip->irq_set_affinity) return data->chip->irq_set_affinity(data, dest, force); return -ENOSYS; } EXPORT_SYMBOL_GPL(irq_chip_set_affinity_parent); /** * irq_chip_set_type_parent - Set IRQ type on the parent interrupt * @data: Pointer to interrupt specific data * @type: IRQ_TYPE_{LEVEL,EDGE}_* value - see include/linux/irq.h * * Conditional, as the underlying parent chip might not implement it. */ int irq_chip_set_type_parent(struct irq_data *data, unsigned int type) { data = data->parent_data; if (data->chip->irq_set_type) return data->chip->irq_set_type(data, type); return -ENOSYS; } EXPORT_SYMBOL_GPL(irq_chip_set_type_parent); /** * irq_chip_retrigger_hierarchy - Retrigger an interrupt in hardware * @data: Pointer to interrupt specific data * * Iterate through the domain hierarchy of the interrupt and check * whether a hw retrigger function exists. If yes, invoke it. */ int irq_chip_retrigger_hierarchy(struct irq_data *data) { for (data = data->parent_data; data; data = data->parent_data) if (data->chip && data->chip->irq_retrigger) return data->chip->irq_retrigger(data); return 0; } EXPORT_SYMBOL_GPL(irq_chip_retrigger_hierarchy); /** * irq_chip_set_vcpu_affinity_parent - Set vcpu affinity on the parent interrupt * @data: Pointer to interrupt specific data * @vcpu_info: The vcpu affinity information */ int irq_chip_set_vcpu_affinity_parent(struct irq_data *data, void *vcpu_info) { data = data->parent_data; if (data->chip->irq_set_vcpu_affinity) return data->chip->irq_set_vcpu_affinity(data, vcpu_info); return -ENOSYS; } EXPORT_SYMBOL_GPL(irq_chip_set_vcpu_affinity_parent); /** * irq_chip_set_wake_parent - Set/reset wake-up on the parent interrupt * @data: Pointer to interrupt specific data * @on: Whether to set or reset the wake-up capability of this irq * * Conditional, as the underlying parent chip might not implement it. */ int irq_chip_set_wake_parent(struct irq_data *data, unsigned int on) { data = data->parent_data; if (data->chip->flags & IRQCHIP_SKIP_SET_WAKE) return 0; if (data->chip->irq_set_wake) return data->chip->irq_set_wake(data, on); return -ENOSYS; } EXPORT_SYMBOL_GPL(irq_chip_set_wake_parent); /** * irq_chip_request_resources_parent - Request resources on the parent interrupt * @data: Pointer to interrupt specific data */ int irq_chip_request_resources_parent(struct irq_data *data) { data = data->parent_data; if (data->chip->irq_request_resources) return data->chip->irq_request_resources(data); /* no error on missing optional irq_chip::irq_request_resources */ return 0; } EXPORT_SYMBOL_GPL(irq_chip_request_resources_parent); /** * irq_chip_release_resources_parent - Release resources on the parent interrupt * @data: Pointer to interrupt specific data */ void irq_chip_release_resources_parent(struct irq_data *data) { data = data->parent_data; if (data->chip->irq_release_resources) data->chip->irq_release_resources(data); } EXPORT_SYMBOL_GPL(irq_chip_release_resources_parent); #endif /** * irq_chip_compose_msi_msg - Compose msi message for a irq chip * @data: Pointer to interrupt specific data * @msg: Pointer to the MSI message * * For hierarchical domains we find the first chip in the hierarchy * which implements the irq_compose_msi_msg callback. For non * hierarchical we use the top level chip. */ int irq_chip_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) { struct irq_data *pos; for (pos = NULL; !pos && data; data = irqd_get_parent_data(data)) { if (data->chip && data->chip->irq_compose_msi_msg) pos = data; } if (!pos) return -ENOSYS; pos->chip->irq_compose_msi_msg(pos, msg); return 0; } static struct device *irq_get_pm_device(struct irq_data *data) { if (data->domain) return data->domain->pm_dev; return NULL; } /** * irq_chip_pm_get - Enable power for an IRQ chip * @data: Pointer to interrupt specific data * * Enable the power to the IRQ chip referenced by the interrupt data * structure. */ int irq_chip_pm_get(struct irq_data *data) { struct device *dev = irq_get_pm_device(data); int retval = 0; if (IS_ENABLED(CONFIG_PM) && dev) retval = pm_runtime_resume_and_get(dev); return retval; } /** * irq_chip_pm_put - Disable power for an IRQ chip * @data: Pointer to interrupt specific data * * Disable the power to the IRQ chip referenced by the interrupt data * structure, belongs. Note that power will only be disabled, once this * function has been called for all IRQs that have called irq_chip_pm_get(). */ int irq_chip_pm_put(struct irq_data *data) { struct device *dev = irq_get_pm_device(data); int retval = 0; if (IS_ENABLED(CONFIG_PM) && dev) retval = pm_runtime_put(dev); return (retval < 0) ? retval : 0; } |
| 3 3 3 3 3 3 3 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 | /* SPDX-License-Identifier: GPL-2.0-only * * Generic bitmap / 8 bpp image bitstreamer for packed pixel framebuffers * * Rewritten by: * Copyright (C) 2025 Zsolt Kajtar (soci@c64.rulez.org) * * Based on previous work of: * Copyright (C) June 1999 James Simmons * Anton Vorontsov <avorontsov@ru.mvista.com> * Pavel Pisa <pisa@cmp.felk.cvut.cz> * Antonino A. Daplas <adaplas@gmail.com> * and others * * NOTES: * * Handles native and foreign byte order on both endians, standard and * reverse pixel order in a byte (<8 BPP), word length of 32/64 bits, * bits per pixel from 1 to the word length. Handles line lengths at byte * granularity while maintaining aligned accesses. * * Optimized routines for word aligned 1, 2, 4 pixel per word for high * bpp modes and 4 pixel at a time operation for low bpp. * * The color image is expected to be one byte per pixel, and values should * not exceed the bitdepth or the pseudo palette (if used). */ #include "fb_draw.h" /* bitmap image iterator, one pixel at a time */ struct fb_bitmap_iter { const u8 *data; unsigned long colors[2]; int width, i; }; static bool fb_bitmap_image(void *iterator, unsigned long *pixels, int *bits) { struct fb_bitmap_iter *iter = iterator; if (iter->i < iter->width) { int bit = ~iter->i & (BITS_PER_BYTE-1); int byte = iter->i++ / BITS_PER_BYTE; *pixels = iter->colors[(iter->data[byte] >> bit) & 1]; return true; } iter->data += BITS_TO_BYTES(iter->width); iter->i = 0; return false; } /* color image iterator, one pixel at a time */ struct fb_color_iter { const u8 *data; const u32 *palette; struct fb_reverse reverse; int shift; int width, i; }; static bool fb_color_image(void *iterator, unsigned long *pixels, int *bits) { struct fb_color_iter *iter = iterator; if (iter->i < iter->width) { unsigned long color = iter->data[iter->i++]; if (iter->palette) color = iter->palette[color]; *pixels = color << iter->shift; if (iter->reverse.pixel) *pixels = fb_reverse_bits_long(*pixels); return true; } iter->data += iter->width; iter->i = 0; return false; } /* bitmap image iterator, 4 pixels at a time */ struct fb_bitmap4x_iter { const u8 *data; u32 fgxcolor, bgcolor; int width, i; const u32 *expand; int bpp; bool top; }; static bool fb_bitmap4x_image(void *iterator, unsigned long *pixels, int *bits) { struct fb_bitmap4x_iter *iter = iterator; u8 data; if (iter->i >= BITS_PER_BYTE/2) { iter->i -= BITS_PER_BYTE/2; iter->top = !iter->top; if (iter->top) data = *iter->data++ >> BITS_PER_BYTE/2; else data = iter->data[-1] & ((1 << BITS_PER_BYTE/2)-1); } else if (iter->i != 0) { *bits = iter->bpp * iter->i; if (iter->top) data = iter->data[-1] & ((1 << BITS_PER_BYTE/2)-1); else data = *iter->data++ >> BITS_PER_BYTE/2; #ifndef __LITTLE_ENDIAN data >>= BITS_PER_BYTE/2 - iter->i; #endif iter->i = 0; } else { *bits = iter->bpp * BITS_PER_BYTE/2; iter->i = iter->width; iter->top = false; return false; } *pixels = (iter->fgxcolor & iter->expand[data]) ^ iter->bgcolor; #ifndef __LITTLE_ENDIAN *pixels <<= BITS_PER_LONG - *bits; #endif return true; } /* draw a line a group of pixels at a time */ static __always_inline void fb_bitblit(bool (*get)(void *iter, unsigned long *pixels, int *bits), void *iter, int bits, struct fb_address *dst, struct fb_reverse reverse) { unsigned long pixels, val, mask, old; int offset = 0; int shift = dst->bits; if (shift) { old = fb_read_offset(0, dst); val = fb_reverse_long(old, reverse); val &= ~fb_right(~0UL, shift); } else { old = 0; val = 0; } while (get(iter, &pixels, &bits)) { val |= fb_right(pixels, shift); shift += bits; if (shift < BITS_PER_LONG) continue; val = fb_reverse_long(val, reverse); fb_write_offset(val, offset++, dst); shift &= BITS_PER_LONG - 1; val = !shift ? 0 : fb_left(pixels, bits - shift); } if (shift) { mask = ~fb_pixel_mask(shift, reverse); val = fb_reverse_long(val, reverse); if (offset || !dst->bits) old = fb_read_offset(offset, dst); fb_write_offset(fb_comp(val, old, mask), offset, dst); } } /* draw a color image a pixel at a time */ static inline void fb_color_imageblit(const struct fb_image *image, struct fb_address *dst, unsigned int bits_per_line, const u32 *palette, int bpp, struct fb_reverse reverse) { struct fb_color_iter iter; u32 height; iter.data = (const u8 *)image->data; iter.palette = palette; iter.reverse = reverse; #ifdef __LITTLE_ENDIAN if (reverse.pixel) iter.shift = BITS_PER_BYTE - bpp; else iter.shift = 0; #else if (reverse.pixel) iter.shift = BITS_PER_LONG - BITS_PER_BYTE; else iter.shift = BITS_PER_LONG - bpp; #endif iter.width = image->width; iter.i = 0; height = image->height; while (height--) { fb_bitblit(fb_color_image, &iter, bpp, dst, reverse); fb_address_forward(dst, bits_per_line); } } #ifdef __LITTLE_ENDIAN #define FB_GEN(a, b) (((a)/8+(((a)&4)<<((b)-2)) \ +(((a)&2)<<((b)*2-1))+(((a)&1u)<<((b)*3)))*((1<<(b))-1)) #define FB_GEN1(a) ((a)/8+((a)&4)/2+((a)&2)*2+((a)&1)*8) #else #define FB_GEN(a, b) (((((a)/8)<<((b)*3))+(((a)&4)<<((b)*2-2)) \ +(((a)&2)<<(b-1))+((a)&1u))*((1<<(b))-1)) #define FB_GEN1(a) (a) #endif #define FB_GENx(a) { FB_GEN(0, (a)), FB_GEN(1, (a)), FB_GEN(2, (a)), FB_GEN(3, (a)), \ FB_GEN(4, (a)), FB_GEN(5, (a)), FB_GEN(6, (a)), FB_GEN(7, (a)), \ FB_GEN(8, (a)), FB_GEN(9, (a)), FB_GEN(10, (a)), FB_GEN(11, (a)), \ FB_GEN(12, (a)), FB_GEN(13, (a)), FB_GEN(14, (a)), FB_GEN(15, (a)) } /* draw a 2 color image four pixels at a time (for 1-8 bpp only) */ static inline void fb_bitmap4x_imageblit(const struct fb_image *image, struct fb_address *dst, unsigned long fgcolor, unsigned long bgcolor, int bpp, unsigned int bits_per_line, struct fb_reverse reverse) { static const u32 mul[BITS_PER_BYTE] = { 0xf, 0x55, 0x249, 0x1111, 0x8421, 0x41041, 0x204081, 0x01010101 }; static const u32 expand[BITS_PER_BYTE][1 << 4] = { { FB_GEN1(0), FB_GEN1(1), FB_GEN1(2), FB_GEN1(3), FB_GEN1(4), FB_GEN1(5), FB_GEN1(6), FB_GEN1(7), FB_GEN1(8), FB_GEN1(9), FB_GEN1(10), FB_GEN1(11), FB_GEN1(12), FB_GEN1(13), FB_GEN1(14), FB_GEN1(15) }, FB_GENx(2), FB_GENx(3), FB_GENx(4), FB_GENx(5), FB_GENx(6), FB_GENx(7), FB_GENx(8), }; struct fb_bitmap4x_iter iter; u32 height; iter.data = (const u8 *)image->data; if (reverse.pixel) { fgcolor = fb_reverse_bits_long(fgcolor << (BITS_PER_BYTE - bpp)); bgcolor = fb_reverse_bits_long(bgcolor << (BITS_PER_BYTE - bpp)); } iter.fgxcolor = (fgcolor ^ bgcolor) * mul[bpp-1]; iter.bgcolor = bgcolor * mul[bpp-1]; iter.width = image->width; iter.i = image->width; iter.expand = expand[bpp-1]; iter.bpp = bpp; iter.top = false; height = image->height; while (height--) { fb_bitblit(fb_bitmap4x_image, &iter, bpp * BITS_PER_BYTE/2, dst, reverse); fb_address_forward(dst, bits_per_line); } } /* draw a bitmap image 1 pixel at a time (for >8 bpp) */ static inline void fb_bitmap1x_imageblit(const struct fb_image *image, struct fb_address *dst, unsigned long fgcolor, unsigned long bgcolor, int bpp, unsigned int bits_per_line, struct fb_reverse reverse) { struct fb_bitmap_iter iter; u32 height; iter.colors[0] = bgcolor; iter.colors[1] = fgcolor; #ifndef __LITTLE_ENDIAN iter.colors[0] <<= BITS_PER_LONG - bpp; iter.colors[1] <<= BITS_PER_LONG - bpp; #endif iter.data = (const u8 *)image->data; iter.width = image->width; iter.i = 0; height = image->height; while (height--) { fb_bitblit(fb_bitmap_image, &iter, bpp, dst, reverse); fb_address_forward(dst, bits_per_line); } } /* one pixel per word, 64/32 bpp blitting */ static inline void fb_bitmap_1ppw(const struct fb_image *image, struct fb_address *dst, unsigned long fgcolor, unsigned long bgcolor, int words_per_line, struct fb_reverse reverse) { unsigned long tab[2]; const u8 *src = (u8 *)image->data; int width = image->width; int offset; u32 height; if (reverse.byte) { tab[0] = swab_long(bgcolor); tab[1] = swab_long(fgcolor); } else { tab[0] = bgcolor; tab[1] = fgcolor; } height = image->height; while (height--) { for (offset = 0; offset + 8 <= width; offset += 8) { unsigned int srcbyte = *src++; fb_write_offset(tab[(srcbyte >> 7) & 1], offset + 0, dst); fb_write_offset(tab[(srcbyte >> 6) & 1], offset + 1, dst); fb_write_offset(tab[(srcbyte >> 5) & 1], offset + 2, dst); fb_write_offset(tab[(srcbyte >> 4) & 1], offset + 3, dst); fb_write_offset(tab[(srcbyte >> 3) & 1], offset + 4, dst); fb_write_offset(tab[(srcbyte >> 2) & 1], offset + 5, dst); fb_write_offset(tab[(srcbyte >> 1) & 1], offset + 6, dst); fb_write_offset(tab[(srcbyte >> 0) & 1], offset + 7, dst); } if (offset < width) { unsigned int srcbyte = *src++; while (offset < width) { fb_write_offset(tab[(srcbyte >> 7) & 1], offset, dst); srcbyte <<= 1; offset++; } } fb_address_move_long(dst, words_per_line); } } static inline unsigned long fb_pack(unsigned long left, unsigned long right, int bits) { #ifdef __LITTLE_ENDIAN return left | right << bits; #else return right | left << bits; #endif } /* aligned 32/16 bpp blitting */ static inline void fb_bitmap_2ppw(const struct fb_image *image, struct fb_address *dst, unsigned long fgcolor, unsigned long bgcolor, int words_per_line, struct fb_reverse reverse) { unsigned long tab[4]; const u8 *src = (u8 *)image->data; int width = image->width / 2; int offset; u32 height; tab[0] = fb_pack(bgcolor, bgcolor, BITS_PER_LONG/2); tab[1] = fb_pack(bgcolor, fgcolor, BITS_PER_LONG/2); tab[2] = fb_pack(fgcolor, bgcolor, BITS_PER_LONG/2); tab[3] = fb_pack(fgcolor, fgcolor, BITS_PER_LONG/2); if (reverse.byte) { tab[0] = swab_long(tab[0]); tab[1] = swab_long(tab[1]); tab[2] = swab_long(tab[2]); tab[3] = swab_long(tab[3]); } height = image->height; while (height--) { for (offset = 0; offset + 4 <= width; offset += 4) { unsigned int srcbyte = *src++; fb_write_offset(tab[(srcbyte >> 6) & 3], offset + 0, dst); fb_write_offset(tab[(srcbyte >> 4) & 3], offset + 1, dst); fb_write_offset(tab[(srcbyte >> 2) & 3], offset + 2, dst); fb_write_offset(tab[(srcbyte >> 0) & 3], offset + 3, dst); } if (offset < width) { unsigned int srcbyte = *src++; while (offset < width) { fb_write_offset(tab[(srcbyte >> 6) & 3], offset, dst); srcbyte <<= 2; offset++; } } fb_address_move_long(dst, words_per_line); } } #define FB_PATP(a, b) (((a)<<((b)*BITS_PER_LONG/4))*((1UL<<BITS_PER_LONG/4)-1UL)) #define FB_PAT4(a) (FB_PATP((a)&1, 0)|FB_PATP(((a)&2)/2, 1)| \ FB_PATP(((a)&4)/4, 2)|FB_PATP(((a)&8)/8, 3)) /* aligned 16/8 bpp blitting */ static inline void fb_bitmap_4ppw(const struct fb_image *image, struct fb_address *dst, unsigned long fgcolor, unsigned long bgcolor, int words_per_line, struct fb_reverse reverse) { static const unsigned long tab16_be[] = { 0, FB_PAT4(1UL), FB_PAT4(2UL), FB_PAT4(3UL), FB_PAT4(4UL), FB_PAT4(5UL), FB_PAT4(6UL), FB_PAT4(7UL), FB_PAT4(8UL), FB_PAT4(9UL), FB_PAT4(10UL), FB_PAT4(11UL), FB_PAT4(12UL), FB_PAT4(13UL), FB_PAT4(14UL), ~0UL }; static const unsigned long tab16_le[] = { 0, FB_PAT4(8UL), FB_PAT4(4UL), FB_PAT4(12UL), FB_PAT4(2UL), FB_PAT4(10UL), FB_PAT4(6UL), FB_PAT4(14UL), FB_PAT4(1UL), FB_PAT4(9UL), FB_PAT4(5UL), FB_PAT4(13UL), FB_PAT4(3UL), FB_PAT4(11UL), FB_PAT4(7UL), ~0UL }; const unsigned long *tab; const u8 *src = (u8 *)image->data; int width = image->width / 4; int offset; u32 height; fgcolor = fgcolor | fgcolor << BITS_PER_LONG/4; bgcolor = bgcolor | bgcolor << BITS_PER_LONG/4; fgcolor = fgcolor | fgcolor << BITS_PER_LONG/2; bgcolor = bgcolor | bgcolor << BITS_PER_LONG/2; fgcolor ^= bgcolor; if (BITS_PER_LONG/4 > BITS_PER_BYTE && reverse.byte) { fgcolor = swab_long(fgcolor); bgcolor = swab_long(bgcolor); } #ifdef __LITTLE_ENDIAN tab = reverse.byte ? tab16_be : tab16_le; #else tab = reverse.byte ? tab16_le : tab16_be; #endif height = image->height; while (height--) { for (offset = 0; offset + 2 <= width; offset += 2, src++) { fb_write_offset((fgcolor & tab[*src >> 4]) ^ bgcolor, offset + 0, dst); fb_write_offset((fgcolor & tab[*src & 0xf]) ^ bgcolor, offset + 1, dst); } if (offset < width) fb_write_offset((fgcolor & tab[*src++ >> 4]) ^ bgcolor, offset, dst); fb_address_move_long(dst, words_per_line); } } static inline void fb_bitmap_imageblit(const struct fb_image *image, struct fb_address *dst, unsigned int bits_per_line, const u32 *palette, int bpp, struct fb_reverse reverse) { unsigned long fgcolor, bgcolor; if (palette) { fgcolor = palette[image->fg_color]; bgcolor = palette[image->bg_color]; } else { fgcolor = image->fg_color; bgcolor = image->bg_color; } if (!dst->bits && !(bits_per_line & (BITS_PER_LONG-1))) { if (bpp == BITS_PER_LONG && BITS_PER_LONG == 32) { fb_bitmap_1ppw(image, dst, fgcolor, bgcolor, bits_per_line / BITS_PER_LONG, reverse); return; } if (bpp == BITS_PER_LONG/2 && !(image->width & 1)) { fb_bitmap_2ppw(image, dst, fgcolor, bgcolor, bits_per_line / BITS_PER_LONG, reverse); return; } if (bpp == BITS_PER_LONG/4 && !(image->width & 3)) { fb_bitmap_4ppw(image, dst, fgcolor, bgcolor, bits_per_line / BITS_PER_LONG, reverse); return; } } if (bpp > 0 && bpp <= BITS_PER_BYTE) fb_bitmap4x_imageblit(image, dst, fgcolor, bgcolor, bpp, bits_per_line, reverse); else if (bpp > BITS_PER_BYTE && bpp <= BITS_PER_LONG) fb_bitmap1x_imageblit(image, dst, fgcolor, bgcolor, bpp, bits_per_line, reverse); } static inline void fb_imageblit(struct fb_info *p, const struct fb_image *image) { int bpp = p->var.bits_per_pixel; unsigned int bits_per_line = BYTES_TO_BITS(p->fix.line_length); struct fb_address dst = fb_address_init(p); struct fb_reverse reverse = fb_reverse_init(p); const u32 *palette = fb_palette(p); fb_address_forward(&dst, image->dy * bits_per_line + image->dx * bpp); if (image->depth == 1) fb_bitmap_imageblit(image, &dst, bits_per_line, palette, bpp, reverse); else fb_color_imageblit(image, &dst, bits_per_line, palette, bpp, reverse); } |
| 5 5 5 5 5 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 | /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * netprio_cgroup.h Control Group Priority set * * Authors: Neil Horman <nhorman@tuxdriver.com> */ #ifndef _NETPRIO_CGROUP_H #define _NETPRIO_CGROUP_H #include <linux/cgroup.h> #include <linux/hardirq.h> #include <linux/rcupdate.h> #if IS_ENABLED(CONFIG_CGROUP_NET_PRIO) struct netprio_map { struct rcu_head rcu; u32 priomap_len; u32 priomap[]; }; static inline u32 task_netprioidx(struct task_struct *p) { struct cgroup_subsys_state *css; u32 idx; rcu_read_lock(); css = task_css(p, net_prio_cgrp_id); idx = css->id; rcu_read_unlock(); return idx; } static inline void sock_update_netprioidx(struct sock_cgroup_data *skcd) { if (in_interrupt()) return; sock_cgroup_set_prioidx(skcd, task_netprioidx(current)); } #else /* !CONFIG_CGROUP_NET_PRIO */ static inline u32 task_netprioidx(struct task_struct *p) { return 0; } static inline void sock_update_netprioidx(struct sock_cgroup_data *skcd) { } #endif /* CONFIG_CGROUP_NET_PRIO */ #endif /* _NET_CLS_CGROUP_H */ |
| 193 13 63 64 4 62 56 12 64 4 1 3 4 4 148 148 9 9 4 4 2 1 1 2 77 2 75 65 10 25 6 7 28 28 59 59 59 48 48 48 27 27 22 5 5 14 14 16 16 14 14 2 14 10 55 30 30 30 3 5 22 22 3 21 21 18 2 19 27 3 8 22 8 22 21 10 29 2 4 4 4 6 4 6 6 2 5 6 6 6 6 2 4 6 2 4 31 31 28 29 2 4 5 1 6 6 6 6 6 70 70 70 44 31 29 6 6 47 7 40 46 39 10 4 41 12 9 3 8 4 9 3 9 50 50 50 37 13 39 11 49 1 1 49 39 17 6 27 49 22 5 10 12 24 1 2 46 33 4 6 4 1 2 6 3 1 10 34 44 46 22 27 38 10 1 5 44 49 54 51 3 292 218 291 18 13 1 4 17 17 13 4 17 4 15 1 18 25 25 5 21 14 14 14 8 6 1 5 5 5 25 15 5 6 1 7 7 2 6 6 8 3 11 3 7 3 27 27 2 2 1 8 17 25 25 25 8 11 21 3 12 11 6 6 32 1 30 31 31 23 8 7 25 25 32 5 5 1 4 3 35 1 2 18 14 1 76 16 48 16 16 14 16 11 10 70 12 68 2 2 12 66 69 68 1 1 68 64 12 12 12 68 1 70 70 73 73 1 2 68 2 45 34 70 2 2 68 68 67 14 54 4 24 24 67 67 25 25 1 8 1 15 16 24 24 8 11 7 1 1 2 6 8 22 1 6 3 13 3 13 16 4 50 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 | // SPDX-License-Identifier: GPL-2.0-or-later /* * file.c * * File open, close, extend, truncate * * Copyright (C) 2002, 2004 Oracle. All rights reserved. */ #include <linux/capability.h> #include <linux/fs.h> #include <linux/types.h> #include <linux/slab.h> #include <linux/highmem.h> #include <linux/pagemap.h> #include <linux/uio.h> #include <linux/sched.h> #include <linux/splice.h> #include <linux/mount.h> #include <linux/writeback.h> #include <linux/falloc.h> #include <linux/quotaops.h> #include <linux/blkdev.h> #include <linux/backing-dev.h> #include <cluster/masklog.h> #include "ocfs2.h" #include "alloc.h" #include "aops.h" #include "dir.h" #include "dlmglue.h" #include "extent_map.h" #include "file.h" #include "sysfile.h" #include "inode.h" #include "ioctl.h" #include "journal.h" #include "locks.h" #include "mmap.h" #include "suballoc.h" #include "super.h" #include "xattr.h" #include "acl.h" #include "quota.h" #include "refcounttree.h" #include "ocfs2_trace.h" #include "buffer_head_io.h" static int ocfs2_init_file_private(struct inode *inode, struct file *file) { struct ocfs2_file_private *fp; fp = kzalloc(sizeof(struct ocfs2_file_private), GFP_KERNEL); if (!fp) return -ENOMEM; fp->fp_file = file; mutex_init(&fp->fp_mutex); ocfs2_file_lock_res_init(&fp->fp_flock, fp); file->private_data = fp; return 0; } static void ocfs2_free_file_private(struct inode *inode, struct file *file) { struct ocfs2_file_private *fp = file->private_data; struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); if (fp) { ocfs2_simple_drop_lockres(osb, &fp->fp_flock); ocfs2_lock_res_free(&fp->fp_flock); kfree(fp); file->private_data = NULL; } } static int ocfs2_file_open(struct inode *inode, struct file *file) { int status; int mode = file->f_flags; struct ocfs2_inode_info *oi = OCFS2_I(inode); trace_ocfs2_file_open(inode, file, file->f_path.dentry, (unsigned long long)oi->ip_blkno, file->f_path.dentry->d_name.len, file->f_path.dentry->d_name.name, mode); if (file->f_mode & FMODE_WRITE) { status = dquot_initialize(inode); if (status) goto leave; } spin_lock(&oi->ip_lock); /* Check that the inode hasn't been wiped from disk by another * node. If it hasn't then we're safe as long as we hold the * spin lock until our increment of open count. */ if (oi->ip_flags & OCFS2_INODE_DELETED) { spin_unlock(&oi->ip_lock); status = -ENOENT; goto leave; } if (mode & O_DIRECT) oi->ip_flags |= OCFS2_INODE_OPEN_DIRECT; oi->ip_open_count++; spin_unlock(&oi->ip_lock); status = ocfs2_init_file_private(inode, file); if (status) { /* * We want to set open count back if we're failing the * open. */ spin_lock(&oi->ip_lock); oi->ip_open_count--; spin_unlock(&oi->ip_lock); } file->f_mode |= FMODE_NOWAIT; leave: return status; } static int ocfs2_file_release(struct inode *inode, struct file *file) { struct ocfs2_inode_info *oi = OCFS2_I(inode); spin_lock(&oi->ip_lock); if (!--oi->ip_open_count) oi->ip_flags &= ~OCFS2_INODE_OPEN_DIRECT; trace_ocfs2_file_release(inode, file, file->f_path.dentry, oi->ip_blkno, file->f_path.dentry->d_name.len, file->f_path.dentry->d_name.name, oi->ip_open_count); spin_unlock(&oi->ip_lock); ocfs2_free_file_private(inode, file); return 0; } static int ocfs2_dir_open(struct inode *inode, struct file *file) { return ocfs2_init_file_private(inode, file); } static int ocfs2_dir_release(struct inode *inode, struct file *file) { ocfs2_free_file_private(inode, file); return 0; } static int ocfs2_sync_file(struct file *file, loff_t start, loff_t end, int datasync) { int err = 0; struct inode *inode = file->f_mapping->host; struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); struct ocfs2_inode_info *oi = OCFS2_I(inode); journal_t *journal = osb->journal->j_journal; int ret; tid_t commit_tid; bool needs_barrier = false; trace_ocfs2_sync_file(inode, file, file->f_path.dentry, oi->ip_blkno, file->f_path.dentry->d_name.len, file->f_path.dentry->d_name.name, (unsigned long long)datasync); if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb)) return -EROFS; err = file_write_and_wait_range(file, start, end); if (err) return err; commit_tid = datasync ? oi->i_datasync_tid : oi->i_sync_tid; if (journal->j_flags & JBD2_BARRIER && !jbd2_trans_will_send_data_barrier(journal, commit_tid)) needs_barrier = true; err = jbd2_complete_transaction(journal, commit_tid); if (needs_barrier) { ret = blkdev_issue_flush(inode->i_sb->s_bdev); if (!err) err = ret; } if (err) mlog_errno(err); return (err < 0) ? -EIO : 0; } int ocfs2_should_update_atime(struct inode *inode, struct vfsmount *vfsmnt) { struct timespec64 now; struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb)) return 0; if ((inode->i_flags & S_NOATIME) || ((inode->i_sb->s_flags & SB_NODIRATIME) && S_ISDIR(inode->i_mode))) return 0; /* * We can be called with no vfsmnt structure - NFSD will * sometimes do this. * * Note that our action here is different than touch_atime() - * if we can't tell whether this is a noatime mount, then we * don't know whether to trust the value of s_atime_quantum. */ if (vfsmnt == NULL) return 0; if ((vfsmnt->mnt_flags & MNT_NOATIME) || ((vfsmnt->mnt_flags & MNT_NODIRATIME) && S_ISDIR(inode->i_mode))) return 0; if (vfsmnt->mnt_flags & MNT_RELATIME) { struct timespec64 ctime = inode_get_ctime(inode); struct timespec64 atime = inode_get_atime(inode); struct timespec64 mtime = inode_get_mtime(inode); if ((timespec64_compare(&atime, &mtime) <= 0) || (timespec64_compare(&atime, &ctime) <= 0)) return 1; return 0; } now = current_time(inode); if ((now.tv_sec - inode_get_atime_sec(inode) <= osb->s_atime_quantum)) return 0; else return 1; } int ocfs2_update_inode_atime(struct inode *inode, struct buffer_head *bh) { int ret; struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); handle_t *handle; struct ocfs2_dinode *di = (struct ocfs2_dinode *) bh->b_data; handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS); if (IS_ERR(handle)) { ret = PTR_ERR(handle); mlog_errno(ret); goto out; } ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), bh, OCFS2_JOURNAL_ACCESS_WRITE); if (ret) { mlog_errno(ret); goto out_commit; } /* * Don't use ocfs2_mark_inode_dirty() here as we don't always * have i_rwsem to guard against concurrent changes to other * inode fields. */ inode_set_atime_to_ts(inode, current_time(inode)); di->i_atime = cpu_to_le64(inode_get_atime_sec(inode)); di->i_atime_nsec = cpu_to_le32(inode_get_atime_nsec(inode)); ocfs2_update_inode_fsync_trans(handle, inode, 0); ocfs2_journal_dirty(handle, bh); out_commit: ocfs2_commit_trans(osb, handle); out: return ret; } int ocfs2_set_inode_size(handle_t *handle, struct inode *inode, struct buffer_head *fe_bh, u64 new_i_size) { int status; i_size_write(inode, new_i_size); inode->i_blocks = ocfs2_inode_sector_count(inode); inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode)); status = ocfs2_mark_inode_dirty(handle, inode, fe_bh); if (status < 0) { mlog_errno(status); goto bail; } bail: return status; } int ocfs2_simple_size_update(struct inode *inode, struct buffer_head *di_bh, u64 new_i_size) { int ret; struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); handle_t *handle = NULL; handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS); if (IS_ERR(handle)) { ret = PTR_ERR(handle); mlog_errno(ret); goto out; } ret = ocfs2_set_inode_size(handle, inode, di_bh, new_i_size); if (ret < 0) mlog_errno(ret); ocfs2_update_inode_fsync_trans(handle, inode, 0); ocfs2_commit_trans(osb, handle); out: return ret; } static int ocfs2_cow_file_pos(struct inode *inode, struct buffer_head *fe_bh, u64 offset) { int status; u32 phys, cpos = offset >> OCFS2_SB(inode->i_sb)->s_clustersize_bits; unsigned int num_clusters = 0; unsigned int ext_flags = 0; /* * If the new offset is aligned to the range of the cluster, there is * no space for ocfs2_zero_range_for_truncate to fill, so no need to * CoW either. */ if ((offset & (OCFS2_SB(inode->i_sb)->s_clustersize - 1)) == 0) return 0; status = ocfs2_get_clusters(inode, cpos, &phys, &num_clusters, &ext_flags); if (status) { mlog_errno(status); goto out; } if (!(ext_flags & OCFS2_EXT_REFCOUNTED)) goto out; return ocfs2_refcount_cow(inode, fe_bh, cpos, 1, cpos+1); out: return status; } static int ocfs2_orphan_for_truncate(struct ocfs2_super *osb, struct inode *inode, struct buffer_head *fe_bh, u64 new_i_size) { int status; handle_t *handle; struct ocfs2_dinode *di; u64 cluster_bytes; /* * We need to CoW the cluster contains the offset if it is reflinked * since we will call ocfs2_zero_range_for_truncate later which will * write "0" from offset to the end of the cluster. */ status = ocfs2_cow_file_pos(inode, fe_bh, new_i_size); if (status) { mlog_errno(status); return status; } /* TODO: This needs to actually orphan the inode in this * transaction. */ handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS); if (IS_ERR(handle)) { status = PTR_ERR(handle); mlog_errno(status); goto out; } status = ocfs2_journal_access_di(handle, INODE_CACHE(inode), fe_bh, OCFS2_JOURNAL_ACCESS_WRITE); if (status < 0) { mlog_errno(status); goto out_commit; } /* * Do this before setting i_size. */ cluster_bytes = ocfs2_align_bytes_to_clusters(inode->i_sb, new_i_size); status = ocfs2_zero_range_for_truncate(inode, handle, new_i_size, cluster_bytes); if (status) { mlog_errno(status); goto out_commit; } i_size_write(inode, new_i_size); inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode)); di = (struct ocfs2_dinode *) fe_bh->b_data; di->i_size = cpu_to_le64(new_i_size); di->i_ctime = di->i_mtime = cpu_to_le64(inode_get_ctime_sec(inode)); di->i_ctime_nsec = di->i_mtime_nsec = cpu_to_le32(inode_get_ctime_nsec(inode)); ocfs2_update_inode_fsync_trans(handle, inode, 0); ocfs2_journal_dirty(handle, fe_bh); out_commit: ocfs2_commit_trans(osb, handle); out: return status; } int ocfs2_truncate_file(struct inode *inode, struct buffer_head *di_bh, u64 new_i_size) { int status = 0; struct ocfs2_dinode *fe = NULL; struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); /* We trust di_bh because it comes from ocfs2_inode_lock(), which * already validated it */ fe = (struct ocfs2_dinode *) di_bh->b_data; trace_ocfs2_truncate_file((unsigned long long)OCFS2_I(inode)->ip_blkno, (unsigned long long)le64_to_cpu(fe->i_size), (unsigned long long)new_i_size); mlog_bug_on_msg(le64_to_cpu(fe->i_size) != i_size_read(inode), "Inode %llu, inode i_size = %lld != di " "i_size = %llu, i_flags = 0x%x\n", (unsigned long long)OCFS2_I(inode)->ip_blkno, i_size_read(inode), (unsigned long long)le64_to_cpu(fe->i_size), le32_to_cpu(fe->i_flags)); if (new_i_size > le64_to_cpu(fe->i_size)) { trace_ocfs2_truncate_file_error( (unsigned long long)le64_to_cpu(fe->i_size), (unsigned long long)new_i_size); status = -EINVAL; mlog_errno(status); goto bail; } down_write(&OCFS2_I(inode)->ip_alloc_sem); ocfs2_resv_discard(&osb->osb_la_resmap, &OCFS2_I(inode)->ip_la_data_resv); /* * The inode lock forced other nodes to sync and drop their * pages, which (correctly) happens even if we have a truncate * without allocation change - ocfs2 cluster sizes can be much * greater than page size, so we have to truncate them * anyway. */ if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) { unmap_mapping_range(inode->i_mapping, new_i_size + PAGE_SIZE - 1, 0, 1); truncate_inode_pages(inode->i_mapping, new_i_size); status = ocfs2_truncate_inline(inode, di_bh, new_i_size, i_size_read(inode), 1); if (status) mlog_errno(status); goto bail_unlock_sem; } /* alright, we're going to need to do a full blown alloc size * change. Orphan the inode so that recovery can complete the * truncate if necessary. This does the task of marking * i_size. */ status = ocfs2_orphan_for_truncate(osb, inode, di_bh, new_i_size); if (status < 0) { mlog_errno(status); goto bail_unlock_sem; } unmap_mapping_range(inode->i_mapping, new_i_size + PAGE_SIZE - 1, 0, 1); truncate_inode_pages(inode->i_mapping, new_i_size); status = ocfs2_commit_truncate(osb, inode, di_bh); if (status < 0) { mlog_errno(status); goto bail_unlock_sem; } /* TODO: orphan dir cleanup here. */ bail_unlock_sem: up_write(&OCFS2_I(inode)->ip_alloc_sem); bail: if (!status && OCFS2_I(inode)->ip_clusters == 0) status = ocfs2_try_remove_refcount_tree(inode, di_bh); return status; } /* * extend file allocation only here. * we'll update all the disk stuff, and oip->alloc_size * * expect stuff to be locked, a transaction started and enough data / * metadata reservations in the contexts. * * Will return -EAGAIN, and a reason if a restart is needed. * If passed in, *reason will always be set, even in error. */ int ocfs2_add_inode_data(struct ocfs2_super *osb, struct inode *inode, u32 *logical_offset, u32 clusters_to_add, int mark_unwritten, struct buffer_head *fe_bh, handle_t *handle, struct ocfs2_alloc_context *data_ac, struct ocfs2_alloc_context *meta_ac, enum ocfs2_alloc_restarted *reason_ret) { struct ocfs2_extent_tree et; ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(inode), fe_bh); return ocfs2_add_clusters_in_btree(handle, &et, logical_offset, clusters_to_add, mark_unwritten, data_ac, meta_ac, reason_ret); } static int ocfs2_extend_allocation(struct inode *inode, u32 logical_start, u32 clusters_to_add, int mark_unwritten) { int status = 0; int restart_func = 0; int credits; u32 prev_clusters; struct buffer_head *bh = NULL; struct ocfs2_dinode *fe = NULL; handle_t *handle = NULL; struct ocfs2_alloc_context *data_ac = NULL; struct ocfs2_alloc_context *meta_ac = NULL; enum ocfs2_alloc_restarted why = RESTART_NONE; struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); struct ocfs2_extent_tree et; int did_quota = 0; /* * Unwritten extent only exists for file systems which * support holes. */ BUG_ON(mark_unwritten && !ocfs2_sparse_alloc(osb)); status = ocfs2_read_inode_block(inode, &bh); if (status < 0) { mlog_errno(status); goto leave; } fe = (struct ocfs2_dinode *) bh->b_data; restart_all: BUG_ON(le32_to_cpu(fe->i_clusters) != OCFS2_I(inode)->ip_clusters); ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(inode), bh); status = ocfs2_lock_allocators(inode, &et, clusters_to_add, 0, &data_ac, &meta_ac); if (status) { mlog_errno(status); goto leave; } credits = ocfs2_calc_extend_credits(osb->sb, &fe->id2.i_list); handle = ocfs2_start_trans(osb, credits); if (IS_ERR(handle)) { status = PTR_ERR(handle); handle = NULL; mlog_errno(status); goto leave; } restarted_transaction: trace_ocfs2_extend_allocation( (unsigned long long)OCFS2_I(inode)->ip_blkno, (unsigned long long)i_size_read(inode), le32_to_cpu(fe->i_clusters), clusters_to_add, why, restart_func); status = dquot_alloc_space_nodirty(inode, ocfs2_clusters_to_bytes(osb->sb, clusters_to_add)); if (status) goto leave; did_quota = 1; /* reserve a write to the file entry early on - that we if we * run out of credits in the allocation path, we can still * update i_size. */ status = ocfs2_journal_access_di(handle, INODE_CACHE(inode), bh, OCFS2_JOURNAL_ACCESS_WRITE); if (status < 0) { mlog_errno(status); goto leave; } prev_clusters = OCFS2_I(inode)->ip_clusters; status = ocfs2_add_inode_data(osb, inode, &logical_start, clusters_to_add, mark_unwritten, bh, handle, data_ac, meta_ac, &why); if ((status < 0) && (status != -EAGAIN)) { if (status != -ENOSPC) mlog_errno(status); goto leave; } ocfs2_update_inode_fsync_trans(handle, inode, 1); ocfs2_journal_dirty(handle, bh); spin_lock(&OCFS2_I(inode)->ip_lock); clusters_to_add -= (OCFS2_I(inode)->ip_clusters - prev_clusters); spin_unlock(&OCFS2_I(inode)->ip_lock); /* Release unused quota reservation */ dquot_free_space(inode, ocfs2_clusters_to_bytes(osb->sb, clusters_to_add)); did_quota = 0; if (why != RESTART_NONE && clusters_to_add) { if (why == RESTART_META) { restart_func = 1; status = 0; } else { BUG_ON(why != RESTART_TRANS); status = ocfs2_allocate_extend_trans(handle, 1); if (status < 0) { /* handle still has to be committed at * this point. */ status = -ENOMEM; mlog_errno(status); goto leave; } goto restarted_transaction; } } trace_ocfs2_extend_allocation_end(OCFS2_I(inode)->ip_blkno, le32_to_cpu(fe->i_clusters), (unsigned long long)le64_to_cpu(fe->i_size), OCFS2_I(inode)->ip_clusters, (unsigned long long)i_size_read(inode)); leave: if (status < 0 && did_quota) dquot_free_space(inode, ocfs2_clusters_to_bytes(osb->sb, clusters_to_add)); if (handle) { ocfs2_commit_trans(osb, handle); handle = NULL; } if (data_ac) { ocfs2_free_alloc_context(data_ac); data_ac = NULL; } if (meta_ac) { ocfs2_free_alloc_context(meta_ac); meta_ac = NULL; } if ((!status) && restart_func) { restart_func = 0; goto restart_all; } brelse(bh); bh = NULL; return status; } /* * While a write will already be ordering the data, a truncate will not. * Thus, we need to explicitly order the zeroed pages. */ static handle_t *ocfs2_zero_start_ordered_transaction(struct inode *inode, struct buffer_head *di_bh, loff_t start_byte, loff_t length) { struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); handle_t *handle = NULL; int ret = 0; if (!ocfs2_should_order_data(inode)) goto out; handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS); if (IS_ERR(handle)) { ret = -ENOMEM; mlog_errno(ret); goto out; } ret = ocfs2_jbd2_inode_add_write(handle, inode, start_byte, length); if (ret < 0) { mlog_errno(ret); goto out; } ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh, OCFS2_JOURNAL_ACCESS_WRITE); if (ret) mlog_errno(ret); ocfs2_update_inode_fsync_trans(handle, inode, 1); out: if (ret) { if (!IS_ERR(handle)) ocfs2_commit_trans(osb, handle); handle = ERR_PTR(ret); } return handle; } /* Some parts of this taken from generic_cont_expand, which turned out * to be too fragile to do exactly what we need without us having to * worry about recursive locking in ->write_begin() and ->write_end(). */ static int ocfs2_write_zero_page(struct inode *inode, u64 abs_from, u64 abs_to, struct buffer_head *di_bh) { struct address_space *mapping = inode->i_mapping; struct folio *folio; unsigned long index = abs_from >> PAGE_SHIFT; handle_t *handle; int ret = 0; unsigned zero_from, zero_to, block_start, block_end; struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data; BUG_ON(abs_from >= abs_to); BUG_ON(abs_to > (((u64)index + 1) << PAGE_SHIFT)); BUG_ON(abs_from & (inode->i_blkbits - 1)); handle = ocfs2_zero_start_ordered_transaction(inode, di_bh, abs_from, abs_to - abs_from); if (IS_ERR(handle)) { ret = PTR_ERR(handle); goto out; } folio = __filemap_get_folio(mapping, index, FGP_LOCK | FGP_ACCESSED | FGP_CREAT, GFP_NOFS); if (IS_ERR(folio)) { ret = PTR_ERR(folio); mlog_errno(ret); goto out_commit_trans; } /* Get the offsets within the folio that we want to zero */ zero_from = offset_in_folio(folio, abs_from); zero_to = offset_in_folio(folio, abs_to); if (!zero_to) zero_to = folio_size(folio); trace_ocfs2_write_zero_page( (unsigned long long)OCFS2_I(inode)->ip_blkno, (unsigned long long)abs_from, (unsigned long long)abs_to, index, zero_from, zero_to); /* We know that zero_from is block aligned */ for (block_start = zero_from; block_start < zero_to; block_start = block_end) { block_end = block_start + i_blocksize(inode); /* * block_start is block-aligned. Bump it by one to force * __block_write_begin and block_commit_write to zero the * whole block. */ ret = __block_write_begin(folio, block_start + 1, 0, ocfs2_get_block); if (ret < 0) { mlog_errno(ret); goto out_unlock; } /* must not update i_size! */ block_commit_write(folio, block_start + 1, block_start + 1); } /* * fs-writeback will release the dirty pages without page lock * whose offset are over inode size, the release happens at * block_write_full_folio(). */ i_size_write(inode, abs_to); inode->i_blocks = ocfs2_inode_sector_count(inode); di->i_size = cpu_to_le64((u64)i_size_read(inode)); inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode)); di->i_mtime = di->i_ctime = cpu_to_le64(inode_get_mtime_sec(inode)); di->i_ctime_nsec = cpu_to_le32(inode_get_mtime_nsec(inode)); di->i_mtime_nsec = di->i_ctime_nsec; if (handle) { ocfs2_journal_dirty(handle, di_bh); ocfs2_update_inode_fsync_trans(handle, inode, 1); } out_unlock: folio_unlock(folio); folio_put(folio); out_commit_trans: if (handle) ocfs2_commit_trans(OCFS2_SB(inode->i_sb), handle); out: return ret; } /* * Find the next range to zero. We do this in terms of bytes because * that's what ocfs2_zero_extend() wants, and it is dealing with the * pagecache. We may return multiple extents. * * zero_start and zero_end are ocfs2_zero_extend()s current idea of what * needs to be zeroed. range_start and range_end return the next zeroing * range. A subsequent call should pass the previous range_end as its * zero_start. If range_end is 0, there's nothing to do. * * Unwritten extents are skipped over. Refcounted extents are CoWd. */ static int ocfs2_zero_extend_get_range(struct inode *inode, struct buffer_head *di_bh, u64 zero_start, u64 zero_end, u64 *range_start, u64 *range_end) { int rc = 0, needs_cow = 0; u32 p_cpos, zero_clusters = 0; u32 zero_cpos = zero_start >> OCFS2_SB(inode->i_sb)->s_clustersize_bits; u32 last_cpos = ocfs2_clusters_for_bytes(inode->i_sb, zero_end); unsigned int num_clusters = 0; unsigned int ext_flags = 0; while (zero_cpos < last_cpos) { rc = ocfs2_get_clusters(inode, zero_cpos, &p_cpos, &num_clusters, &ext_flags); if (rc) { mlog_errno(rc); goto out; } if (p_cpos && !(ext_flags & OCFS2_EXT_UNWRITTEN)) { zero_clusters = num_clusters; if (ext_flags & OCFS2_EXT_REFCOUNTED) needs_cow = 1; break; } zero_cpos += num_clusters; } if (!zero_clusters) { *range_end = 0; goto out; } while ((zero_cpos + zero_clusters) < last_cpos) { rc = ocfs2_get_clusters(inode, zero_cpos + zero_clusters, &p_cpos, &num_clusters, &ext_flags); if (rc) { mlog_errno(rc); goto out; } if (!p_cpos || (ext_flags & OCFS2_EXT_UNWRITTEN)) break; if (ext_flags & OCFS2_EXT_REFCOUNTED) needs_cow = 1; zero_clusters += num_clusters; } if ((zero_cpos + zero_clusters) > last_cpos) zero_clusters = last_cpos - zero_cpos; if (needs_cow) { rc = ocfs2_refcount_cow(inode, di_bh, zero_cpos, zero_clusters, UINT_MAX); if (rc) { mlog_errno(rc); goto out; } } *range_start = ocfs2_clusters_to_bytes(inode->i_sb, zero_cpos); *range_end = ocfs2_clusters_to_bytes(inode->i_sb, zero_cpos + zero_clusters); out: return rc; } /* * Zero one range returned from ocfs2_zero_extend_get_range(). The caller * has made sure that the entire range needs zeroing. */ static int ocfs2_zero_extend_range(struct inode *inode, u64 range_start, u64 range_end, struct buffer_head *di_bh) { int rc = 0; u64 next_pos; u64 zero_pos = range_start; trace_ocfs2_zero_extend_range( (unsigned long long)OCFS2_I(inode)->ip_blkno, (unsigned long long)range_start, (unsigned long long)range_end); BUG_ON(range_start >= range_end); while (zero_pos < range_end) { next_pos = (zero_pos & PAGE_MASK) + PAGE_SIZE; if (next_pos > range_end) next_pos = range_end; rc = ocfs2_write_zero_page(inode, zero_pos, next_pos, di_bh); if (rc < 0) { mlog_errno(rc); break; } zero_pos = next_pos; /* * Very large extends have the potential to lock up * the cpu for extended periods of time. */ cond_resched(); } return rc; } int ocfs2_zero_extend(struct inode *inode, struct buffer_head *di_bh, loff_t zero_to_size) { int ret = 0; u64 zero_start, range_start = 0, range_end = 0; struct super_block *sb = inode->i_sb; zero_start = ocfs2_align_bytes_to_blocks(sb, i_size_read(inode)); trace_ocfs2_zero_extend((unsigned long long)OCFS2_I(inode)->ip_blkno, (unsigned long long)zero_start, (unsigned long long)i_size_read(inode)); while (zero_start < zero_to_size) { ret = ocfs2_zero_extend_get_range(inode, di_bh, zero_start, zero_to_size, &range_start, &range_end); if (ret) { mlog_errno(ret); break; } if (!range_end) break; /* Trim the ends */ if (range_start < zero_start) range_start = zero_start; if (range_end > zero_to_size) range_end = zero_to_size; ret = ocfs2_zero_extend_range(inode, range_start, range_end, di_bh); if (ret) { mlog_errno(ret); break; } zero_start = range_end; } return ret; } int ocfs2_extend_no_holes(struct inode *inode, struct buffer_head *di_bh, u64 new_i_size, u64 zero_to) { int ret; u32 clusters_to_add; struct ocfs2_inode_info *oi = OCFS2_I(inode); /* * Only quota files call this without a bh, and they can't be * refcounted. */ BUG_ON(!di_bh && ocfs2_is_refcount_inode(inode)); BUG_ON(!di_bh && !(oi->ip_flags & OCFS2_INODE_SYSTEM_FILE)); clusters_to_add = ocfs2_clusters_for_bytes(inode->i_sb, new_i_size); if (clusters_to_add < oi->ip_clusters) clusters_to_add = 0; else clusters_to_add -= oi->ip_clusters; if (clusters_to_add) { ret = ocfs2_extend_allocation(inode, oi->ip_clusters, clusters_to_add, 0); if (ret) { mlog_errno(ret); goto out; } } /* * Call this even if we don't add any clusters to the tree. We * still need to zero the area between the old i_size and the * new i_size. */ ret = ocfs2_zero_extend(inode, di_bh, zero_to); if (ret < 0) mlog_errno(ret); out: return ret; } static int ocfs2_extend_file(struct inode *inode, struct buffer_head *di_bh, u64 new_i_size) { int ret = 0; struct ocfs2_inode_info *oi = OCFS2_I(inode); BUG_ON(!di_bh); /* setattr sometimes calls us like this. */ if (new_i_size == 0) goto out; if (i_size_read(inode) == new_i_size) goto out; BUG_ON(new_i_size < i_size_read(inode)); /* * The alloc sem blocks people in read/write from reading our * allocation until we're done changing it. We depend on * i_rwsem to block other extend/truncate calls while we're * here. We even have to hold it for sparse files because there * might be some tail zeroing. */ down_write(&oi->ip_alloc_sem); if (oi->ip_dyn_features & OCFS2_INLINE_DATA_FL) { /* * We can optimize small extends by keeping the inodes * inline data. */ if (ocfs2_size_fits_inline_data(di_bh, new_i_size)) { up_write(&oi->ip_alloc_sem); goto out_update_size; } ret = ocfs2_convert_inline_data_to_extents(inode, di_bh); if (ret) { up_write(&oi->ip_alloc_sem); mlog_errno(ret); goto out; } } if (ocfs2_sparse_alloc(OCFS2_SB(inode->i_sb))) ret = ocfs2_zero_extend(inode, di_bh, new_i_size); else ret = ocfs2_extend_no_holes(inode, di_bh, new_i_size, new_i_size); up_write(&oi->ip_alloc_sem); if (ret < 0) { mlog_errno(ret); goto out; } out_update_size: ret = ocfs2_simple_size_update(inode, di_bh, new_i_size); if (ret < 0) mlog_errno(ret); out: return ret; } int ocfs2_setattr(struct mnt_idmap *idmap, struct dentry *dentry, struct iattr *attr) { int status = 0, size_change; int inode_locked = 0; struct inode *inode = d_inode(dentry); struct super_block *sb = inode->i_sb; struct ocfs2_super *osb = OCFS2_SB(sb); struct buffer_head *bh = NULL; handle_t *handle = NULL; struct dquot *transfer_to[MAXQUOTAS] = { }; int qtype; int had_lock; struct ocfs2_lock_holder oh; trace_ocfs2_setattr(inode, dentry, (unsigned long long)OCFS2_I(inode)->ip_blkno, dentry->d_name.len, dentry->d_name.name, attr->ia_valid, attr->ia_valid & ATTR_MODE ? attr->ia_mode : 0, attr->ia_valid & ATTR_UID ? from_kuid(&init_user_ns, attr->ia_uid) : 0, attr->ia_valid & ATTR_GID ? from_kgid(&init_user_ns, attr->ia_gid) : 0); /* ensuring we don't even attempt to truncate a symlink */ if (S_ISLNK(inode->i_mode)) attr->ia_valid &= ~ATTR_SIZE; #define OCFS2_VALID_ATTRS (ATTR_ATIME | ATTR_MTIME | ATTR_CTIME | ATTR_SIZE \ | ATTR_GID | ATTR_UID | ATTR_MODE) if (!(attr->ia_valid & OCFS2_VALID_ATTRS)) return 0; status = setattr_prepare(&nop_mnt_idmap, dentry, attr); if (status) return status; if (is_quota_modification(&nop_mnt_idmap, inode, attr)) { status = dquot_initialize(inode); if (status) return status; } size_change = S_ISREG(inode->i_mode) && attr->ia_valid & ATTR_SIZE; if (size_change) { /* * Here we should wait dio to finish before inode lock * to avoid a deadlock between ocfs2_setattr() and * ocfs2_dio_end_io_write() */ inode_dio_wait(inode); status = ocfs2_rw_lock(inode, 1); if (status < 0) { mlog_errno(status); goto bail; } } had_lock = ocfs2_inode_lock_tracker(inode, &bh, 1, &oh); if (had_lock < 0) { status = had_lock; goto bail_unlock_rw; } else if (had_lock) { /* * As far as we know, ocfs2_setattr() could only be the first * VFS entry point in the call chain of recursive cluster * locking issue. * * For instance: * chmod_common() * notify_change() * ocfs2_setattr() * posix_acl_chmod() * ocfs2_iop_get_acl() * * But, we're not 100% sure if it's always true, because the * ordering of the VFS entry points in the call chain is out * of our control. So, we'd better dump the stack here to * catch the other cases of recursive locking. */ mlog(ML_ERROR, "Another case of recursive locking:\n"); dump_stack(); } inode_locked = 1; if (size_change) { status = inode_newsize_ok(inode, attr->ia_size); if (status) goto bail_unlock; if (i_size_read(inode) >= attr->ia_size) { if (ocfs2_should_order_data(inode)) { status = ocfs2_begin_ordered_truncate(inode, attr->ia_size); if (status) goto bail_unlock; } status = ocfs2_truncate_file(inode, bh, attr->ia_size); } else status = ocfs2_extend_file(inode, bh, attr->ia_size); if (status < 0) { if (status != -ENOSPC) mlog_errno(status); status = -ENOSPC; goto bail_unlock; } } if ((attr->ia_valid & ATTR_UID && !uid_eq(attr->ia_uid, inode->i_uid)) || (attr->ia_valid & ATTR_GID && !gid_eq(attr->ia_gid, inode->i_gid))) { /* * Gather pointers to quota structures so that allocation / * freeing of quota structures happens here and not inside * dquot_transfer() where we have problems with lock ordering */ if (attr->ia_valid & ATTR_UID && !uid_eq(attr->ia_uid, inode->i_uid) && OCFS2_HAS_RO_COMPAT_FEATURE(sb, OCFS2_FEATURE_RO_COMPAT_USRQUOTA)) { transfer_to[USRQUOTA] = dqget(sb, make_kqid_uid(attr->ia_uid)); if (IS_ERR(transfer_to[USRQUOTA])) { status = PTR_ERR(transfer_to[USRQUOTA]); transfer_to[USRQUOTA] = NULL; goto bail_unlock; } } if (attr->ia_valid & ATTR_GID && !gid_eq(attr->ia_gid, inode->i_gid) && OCFS2_HAS_RO_COMPAT_FEATURE(sb, OCFS2_FEATURE_RO_COMPAT_GRPQUOTA)) { transfer_to[GRPQUOTA] = dqget(sb, make_kqid_gid(attr->ia_gid)); if (IS_ERR(transfer_to[GRPQUOTA])) { status = PTR_ERR(transfer_to[GRPQUOTA]); transfer_to[GRPQUOTA] = NULL; goto bail_unlock; } } down_write(&OCFS2_I(inode)->ip_alloc_sem); handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS + 2 * ocfs2_quota_trans_credits(sb)); if (IS_ERR(handle)) { status = PTR_ERR(handle); mlog_errno(status); goto bail_unlock_alloc; } status = __dquot_transfer(inode, transfer_to); if (status < 0) goto bail_commit; } else { down_write(&OCFS2_I(inode)->ip_alloc_sem); handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS); if (IS_ERR(handle)) { status = PTR_ERR(handle); mlog_errno(status); goto bail_unlock_alloc; } } setattr_copy(&nop_mnt_idmap, inode, attr); mark_inode_dirty(inode); status = ocfs2_mark_inode_dirty(handle, inode, bh); if (status < 0) mlog_errno(status); bail_commit: ocfs2_commit_trans(osb, handle); bail_unlock_alloc: up_write(&OCFS2_I(inode)->ip_alloc_sem); bail_unlock: if (status && inode_locked) { ocfs2_inode_unlock_tracker(inode, 1, &oh, had_lock); inode_locked = 0; } bail_unlock_rw: if (size_change) ocfs2_rw_unlock(inode, 1); bail: /* Release quota pointers in case we acquired them */ for (qtype = 0; qtype < OCFS2_MAXQUOTAS; qtype++) dqput(transfer_to[qtype]); if (!status && attr->ia_valid & ATTR_MODE) { status = ocfs2_acl_chmod(inode, bh); if (status < 0) mlog_errno(status); } if (inode_locked) ocfs2_inode_unlock_tracker(inode, 1, &oh, had_lock); brelse(bh); return status; } int ocfs2_getattr(struct mnt_idmap *idmap, const struct path *path, struct kstat *stat, u32 request_mask, unsigned int flags) { struct inode *inode = d_inode(path->dentry); struct super_block *sb = path->dentry->d_sb; struct ocfs2_super *osb = sb->s_fs_info; int err; err = ocfs2_inode_revalidate(path->dentry); if (err) { if (err != -ENOENT) mlog_errno(err); goto bail; } generic_fillattr(&nop_mnt_idmap, request_mask, inode, stat); /* * If there is inline data in the inode, the inode will normally not * have data blocks allocated (it may have an external xattr block). * Report at least one sector for such files, so tools like tar, rsync, * others don't incorrectly think the file is completely sparse. */ if (unlikely(OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL)) stat->blocks += (stat->size + 511)>>9; /* We set the blksize from the cluster size for performance */ stat->blksize = osb->s_clustersize; bail: return err; } int ocfs2_permission(struct mnt_idmap *idmap, struct inode *inode, int mask) { int ret, had_lock; struct ocfs2_lock_holder oh; if (mask & MAY_NOT_BLOCK) return -ECHILD; had_lock = ocfs2_inode_lock_tracker(inode, NULL, 0, &oh); if (had_lock < 0) { ret = had_lock; goto out; } else if (had_lock) { /* See comments in ocfs2_setattr() for details. * The call chain of this case could be: * do_sys_open() * may_open() * inode_permission() * ocfs2_permission() * ocfs2_iop_get_acl() */ mlog(ML_ERROR, "Another case of recursive locking:\n"); dump_stack(); } ret = generic_permission(&nop_mnt_idmap, inode, mask); ocfs2_inode_unlock_tracker(inode, 0, &oh, had_lock); out: return ret; } static int __ocfs2_write_remove_suid(struct inode *inode, struct buffer_head *bh) { int ret; handle_t *handle; struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); struct ocfs2_dinode *di; trace_ocfs2_write_remove_suid( (unsigned long long)OCFS2_I(inode)->ip_blkno, inode->i_mode); handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS); if (IS_ERR(handle)) { ret = PTR_ERR(handle); mlog_errno(ret); goto out; } ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), bh, OCFS2_JOURNAL_ACCESS_WRITE); if (ret < 0) { mlog_errno(ret); goto out_trans; } inode->i_mode &= ~S_ISUID; if ((inode->i_mode & S_ISGID) && (inode->i_mode & S_IXGRP)) inode->i_mode &= ~S_ISGID; di = (struct ocfs2_dinode *) bh->b_data; di->i_mode = cpu_to_le16(inode->i_mode); ocfs2_update_inode_fsync_trans(handle, inode, 0); ocfs2_journal_dirty(handle, bh); out_trans: ocfs2_commit_trans(osb, handle); out: return ret; } static int ocfs2_write_remove_suid(struct inode *inode) { int ret; struct buffer_head *bh = NULL; ret = ocfs2_read_inode_block(inode, &bh); if (ret < 0) { mlog_errno(ret); goto out; } ret = __ocfs2_write_remove_suid(inode, bh); out: brelse(bh); return ret; } /* * Allocate enough extents to cover the region starting at byte offset * start for len bytes. Existing extents are skipped, any extents * added are marked as "unwritten". */ static int ocfs2_allocate_unwritten_extents(struct inode *inode, u64 start, u64 len) { int ret; u32 cpos, phys_cpos, clusters, alloc_size; u64 end = start + len; struct buffer_head *di_bh = NULL; if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) { ret = ocfs2_read_inode_block(inode, &di_bh); if (ret) { mlog_errno(ret); goto out; } /* * Nothing to do if the requested reservation range * fits within the inode. */ if (ocfs2_size_fits_inline_data(di_bh, end)) goto out; ret = ocfs2_convert_inline_data_to_extents(inode, di_bh); if (ret) { mlog_errno(ret); goto out; } } /* * We consider both start and len to be inclusive. */ cpos = start >> OCFS2_SB(inode->i_sb)->s_clustersize_bits; clusters = ocfs2_clusters_for_bytes(inode->i_sb, start + len); clusters -= cpos; while (clusters) { ret = ocfs2_get_clusters(inode, cpos, &phys_cpos, &alloc_size, NULL); if (ret) { mlog_errno(ret); goto out; } /* * Hole or existing extent len can be arbitrary, so * cap it to our own allocation request. */ if (alloc_size > clusters) alloc_size = clusters; if (phys_cpos) { /* * We already have an allocation at this * region so we can safely skip it. */ goto next; } ret = ocfs2_extend_allocation(inode, cpos, alloc_size, 1); if (ret) { if (ret != -ENOSPC) mlog_errno(ret); goto out; } next: cpos += alloc_size; clusters -= alloc_size; } ret = 0; out: brelse(di_bh); return ret; } /* * Truncate a byte range, avoiding pages within partial clusters. This * preserves those pages for the zeroing code to write to. */ static void ocfs2_truncate_cluster_pages(struct inode *inode, u64 byte_start, u64 byte_len) { struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); loff_t start, end; struct address_space *mapping = inode->i_mapping; start = (loff_t)ocfs2_align_bytes_to_clusters(inode->i_sb, byte_start); end = byte_start + byte_len; end = end & ~(osb->s_clustersize - 1); if (start < end) { unmap_mapping_range(mapping, start, end - start, 0); truncate_inode_pages_range(mapping, start, end - 1); } } /* * zero out partial blocks of one cluster. * * start: file offset where zero starts, will be made upper block aligned. * len: it will be trimmed to the end of current cluster if "start + len" * is bigger than it. */ static int ocfs2_zeroout_partial_cluster(struct inode *inode, u64 start, u64 len) { int ret; u64 start_block, end_block, nr_blocks; u64 p_block, offset; u32 cluster, p_cluster, nr_clusters; struct super_block *sb = inode->i_sb; u64 end = ocfs2_align_bytes_to_clusters(sb, start); if (start + len < end) end = start + len; start_block = ocfs2_blocks_for_bytes(sb, start); end_block = ocfs2_blocks_for_bytes(sb, end); nr_blocks = end_block - start_block; if (!nr_blocks) return 0; cluster = ocfs2_bytes_to_clusters(sb, start); ret = ocfs2_get_clusters(inode, cluster, &p_cluster, &nr_clusters, NULL); if (ret) return ret; if (!p_cluster) return 0; offset = start_block - ocfs2_clusters_to_blocks(sb, cluster); p_block = ocfs2_clusters_to_blocks(sb, p_cluster) + offset; return sb_issue_zeroout(sb, p_block, nr_blocks, GFP_NOFS); } static int ocfs2_zero_partial_clusters(struct inode *inode, u64 start, u64 len) { int ret = 0; u64 tmpend = 0; u64 end = start + len; struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); unsigned int csize = osb->s_clustersize; handle_t *handle; loff_t isize = i_size_read(inode); /* * The "start" and "end" values are NOT necessarily part of * the range whose allocation is being deleted. Rather, this * is what the user passed in with the request. We must zero * partial clusters here. There's no need to worry about * physical allocation - the zeroing code knows to skip holes. */ trace_ocfs2_zero_partial_clusters( (unsigned long long)OCFS2_I(inode)->ip_blkno, (unsigned long long)start, (unsigned long long)end); /* * If both edges are on a cluster boundary then there's no * zeroing required as the region is part of the allocation to * be truncated. */ if ((start & (csize - 1)) == 0 && (end & (csize - 1)) == 0) goto out; /* No page cache for EOF blocks, issue zero out to disk. */ if (end > isize) { /* * zeroout eof blocks in last cluster starting from * "isize" even "start" > "isize" because it is * complicated to zeroout just at "start" as "start" * may be not aligned with block size, buffer write * would be required to do that, but out of eof buffer * write is not supported. */ ret = ocfs2_zeroout_partial_cluster(inode, isize, end - isize); if (ret) { mlog_errno(ret); goto out; } if (start >= isize) goto out; end = isize; } handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS); if (IS_ERR(handle)) { ret = PTR_ERR(handle); mlog_errno(ret); goto out; } /* * If start is on a cluster boundary and end is somewhere in another * cluster, we have not COWed the cluster starting at start, unless * end is also within the same cluster. So, in this case, we skip this * first call to ocfs2_zero_range_for_truncate() truncate and move on * to the next one. */ if ((start & (csize - 1)) != 0) { /* * We want to get the byte offset of the end of the 1st * cluster. */ tmpend = (u64)osb->s_clustersize + (start & ~(osb->s_clustersize - 1)); if (tmpend > end) tmpend = end; trace_ocfs2_zero_partial_clusters_range1( (unsigned long long)start, (unsigned long long)tmpend); ret = ocfs2_zero_range_for_truncate(inode, handle, start, tmpend); if (ret) mlog_errno(ret); } if (tmpend < end) { /* * This may make start and end equal, but the zeroing * code will skip any work in that case so there's no * need to catch it up here. */ start = end & ~(osb->s_clustersize - 1); trace_ocfs2_zero_partial_clusters_range2( (unsigned long long)start, (unsigned long long)end); ret = ocfs2_zero_range_for_truncate(inode, handle, start, end); if (ret) mlog_errno(ret); } ocfs2_update_inode_fsync_trans(handle, inode, 1); ocfs2_commit_trans(osb, handle); out: return ret; } static int ocfs2_find_rec(struct ocfs2_extent_list *el, u32 pos) { int i; struct ocfs2_extent_rec *rec = NULL; for (i = le16_to_cpu(el->l_next_free_rec) - 1; i >= 0; i--) { rec = &el->l_recs[i]; if (le32_to_cpu(rec->e_cpos) < pos) break; } return i; } /* * Helper to calculate the punching pos and length in one run, we handle the * following three cases in order: * * - remove the entire record * - remove a partial record * - no record needs to be removed (hole-punching completed) */ static void ocfs2_calc_trunc_pos(struct inode *inode, struct ocfs2_extent_list *el, struct ocfs2_extent_rec *rec, u32 trunc_start, u32 *trunc_cpos, u32 *trunc_len, u32 *trunc_end, u64 *blkno, int *done) { int ret = 0; u32 coff, range; range = le32_to_cpu(rec->e_cpos) + ocfs2_rec_clusters(el, rec); if (le32_to_cpu(rec->e_cpos) >= trunc_start) { /* * remove an entire extent record. */ *trunc_cpos = le32_to_cpu(rec->e_cpos); /* * Skip holes if any. */ if (range < *trunc_end) *trunc_end = range; *trunc_len = *trunc_end - le32_to_cpu(rec->e_cpos); *blkno = le64_to_cpu(rec->e_blkno); *trunc_end = le32_to_cpu(rec->e_cpos); } else if (range > trunc_start) { /* * remove a partial extent record, which means we're * removing the last extent record. */ *trunc_cpos = trunc_start; /* * skip hole if any. */ if (range < *trunc_end) *trunc_end = range; *trunc_len = *trunc_end - trunc_start; coff = trunc_start - le32_to_cpu(rec->e_cpos); *blkno = le64_to_cpu(rec->e_blkno) + ocfs2_clusters_to_blocks(inode->i_sb, coff); *trunc_end = trunc_start; } else { /* * It may have two following possibilities: * * - last record has been removed * - trunc_start was within a hole * * both two cases mean the completion of hole punching. */ ret = 1; } *done = ret; } int ocfs2_remove_inode_range(struct inode *inode, struct buffer_head *di_bh, u64 byte_start, u64 byte_len) { int ret = 0, flags = 0, done = 0, i; u32 trunc_start, trunc_len, trunc_end, trunc_cpos, phys_cpos; u32 cluster_in_el; struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); struct ocfs2_cached_dealloc_ctxt dealloc; struct address_space *mapping = inode->i_mapping; struct ocfs2_extent_tree et; struct ocfs2_path *path = NULL; struct ocfs2_extent_list *el = NULL; struct ocfs2_extent_rec *rec = NULL; struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data; u64 blkno, refcount_loc = le64_to_cpu(di->i_refcount_loc); ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(inode), di_bh); ocfs2_init_dealloc_ctxt(&dealloc); trace_ocfs2_remove_inode_range( (unsigned long long)OCFS2_I(inode)->ip_blkno, (unsigned long long)byte_start, (unsigned long long)byte_len); if (byte_len == 0) return 0; if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) { int id_count = ocfs2_max_inline_data_with_xattr(inode->i_sb, di); if (byte_start > id_count || byte_start + byte_len > id_count) { ret = -EINVAL; mlog_errno(ret); goto out; } ret = ocfs2_truncate_inline(inode, di_bh, byte_start, byte_start + byte_len, 0); if (ret) { mlog_errno(ret); goto out; } /* * There's no need to get fancy with the page cache * truncate of an inline-data inode. We're talking * about less than a page here, which will be cached * in the dinode buffer anyway. */ unmap_mapping_range(mapping, 0, 0, 0); truncate_inode_pages(mapping, 0); goto out; } /* * For reflinks, we may need to CoW 2 clusters which might be * partially zero'd later, if hole's start and end offset were * within one cluster(means is not exactly aligned to clustersize). */ if (ocfs2_is_refcount_inode(inode)) { ret = ocfs2_cow_file_pos(inode, di_bh, byte_start); if (ret) { mlog_errno(ret); goto out; } ret = ocfs2_cow_file_pos(inode, di_bh, byte_start + byte_len); if (ret) { mlog_errno(ret); goto out; } } trunc_start = ocfs2_clusters_for_bytes(osb->sb, byte_start); trunc_end = (byte_start + byte_len) >> osb->s_clustersize_bits; cluster_in_el = trunc_end; ret = ocfs2_zero_partial_clusters(inode, byte_start, byte_len); if (ret) { mlog_errno(ret); goto out; } path = ocfs2_new_path_from_et(&et); if (!path) { ret = -ENOMEM; mlog_errno(ret); goto out; } while (trunc_end > trunc_start) { ret = ocfs2_find_path(INODE_CACHE(inode), path, cluster_in_el); if (ret) { mlog_errno(ret); goto out; } el = path_leaf_el(path); i = ocfs2_find_rec(el, trunc_end); /* * Need to go to previous extent block. */ if (i < 0) { if (path->p_tree_depth == 0) break; ret = ocfs2_find_cpos_for_left_leaf(inode->i_sb, path, &cluster_in_el); if (ret) { mlog_errno(ret); goto out; } /* * We've reached the leftmost extent block, * it's safe to leave. */ if (cluster_in_el == 0) break; /* * The 'pos' searched for previous extent block is * always one cluster less than actual trunc_end. */ trunc_end = cluster_in_el + 1; ocfs2_reinit_path(path, 1); continue; } else rec = &el->l_recs[i]; ocfs2_calc_trunc_pos(inode, el, rec, trunc_start, &trunc_cpos, &trunc_len, &trunc_end, &blkno, &done); if (done) break; flags = rec->e_flags; phys_cpos = ocfs2_blocks_to_clusters(inode->i_sb, blkno); ret = ocfs2_remove_btree_range(inode, &et, trunc_cpos, phys_cpos, trunc_len, flags, &dealloc, refcount_loc, false); if (ret < 0) { mlog_errno(ret); goto out; } cluster_in_el = trunc_end; ocfs2_reinit_path(path, 1); } ocfs2_truncate_cluster_pages(inode, byte_start, byte_len); out: ocfs2_free_path(path); ocfs2_schedule_truncate_log_flush(osb, 1); ocfs2_run_deallocs(osb, &dealloc); return ret; } /* * Parts of this function taken from xfs_change_file_space() */ static int __ocfs2_change_file_space(struct file *file, struct inode *inode, loff_t f_pos, unsigned int cmd, struct ocfs2_space_resv *sr, int change_size) { int ret; s64 llen; loff_t size, orig_isize; struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); struct buffer_head *di_bh = NULL; handle_t *handle; unsigned long long max_off = inode->i_sb->s_maxbytes; if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb)) return -EROFS; inode_lock(inode); /* Wait all existing dio workers, newcomers will block on i_rwsem */ inode_dio_wait(inode); /* * This prevents concurrent writes on other nodes */ ret = ocfs2_rw_lock(inode, 1); if (ret) { mlog_errno(ret); goto out; } ret = ocfs2_inode_lock(inode, &di_bh, 1); if (ret) { mlog_errno(ret); goto out_rw_unlock; } if (inode->i_flags & (S_IMMUTABLE|S_APPEND)) { ret = -EPERM; goto out_inode_unlock; } switch (sr->l_whence) { case 0: /*SEEK_SET*/ break; case 1: /*SEEK_CUR*/ sr->l_start += f_pos; break; case 2: /*SEEK_END*/ sr->l_start += i_size_read(inode); break; default: ret = -EINVAL; goto out_inode_unlock; } sr->l_whence = 0; llen = sr->l_len > 0 ? sr->l_len - 1 : sr->l_len; if (sr->l_start < 0 || sr->l_start > max_off || (sr->l_start + llen) < 0 || (sr->l_start + llen) > max_off) { ret = -EINVAL; goto out_inode_unlock; } size = sr->l_start + sr->l_len; if (cmd == OCFS2_IOC_RESVSP || cmd == OCFS2_IOC_RESVSP64 || cmd == OCFS2_IOC_UNRESVSP || cmd == OCFS2_IOC_UNRESVSP64) { if (sr->l_len <= 0) { ret = -EINVAL; goto out_inode_unlock; } } if (file && setattr_should_drop_suidgid(&nop_mnt_idmap, file_inode(file))) { ret = __ocfs2_write_remove_suid(inode, di_bh); if (ret) { mlog_errno(ret); goto out_inode_unlock; } } down_write(&OCFS2_I(inode)->ip_alloc_sem); switch (cmd) { case OCFS2_IOC_RESVSP: case OCFS2_IOC_RESVSP64: /* * This takes unsigned offsets, but the signed ones we * pass have been checked against overflow above. */ ret = ocfs2_allocate_unwritten_extents(inode, sr->l_start, sr->l_len); break; case OCFS2_IOC_UNRESVSP: case OCFS2_IOC_UNRESVSP64: ret = ocfs2_remove_inode_range(inode, di_bh, sr->l_start, sr->l_len); break; default: ret = -EINVAL; } orig_isize = i_size_read(inode); /* zeroout eof blocks in the cluster. */ if (!ret && change_size && orig_isize < size) { ret = ocfs2_zeroout_partial_cluster(inode, orig_isize, size - orig_isize); if (!ret) i_size_write(inode, size); } up_write(&OCFS2_I(inode)->ip_alloc_sem); if (ret) { mlog_errno(ret); goto out_inode_unlock; } /* * We update c/mtime for these changes */ handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS); if (IS_ERR(handle)) { ret = PTR_ERR(handle); mlog_errno(ret); goto out_inode_unlock; } inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode)); ret = ocfs2_mark_inode_dirty(handle, inode, di_bh); if (ret < 0) mlog_errno(ret); if (file && (file->f_flags & O_SYNC)) handle->h_sync = 1; ocfs2_commit_trans(osb, handle); out_inode_unlock: brelse(di_bh); ocfs2_inode_unlock(inode, 1); out_rw_unlock: ocfs2_rw_unlock(inode, 1); out: inode_unlock(inode); return ret; } int ocfs2_change_file_space(struct file *file, unsigned int cmd, struct ocfs2_space_resv *sr) { struct inode *inode = file_inode(file); struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); int ret; if ((cmd == OCFS2_IOC_RESVSP || cmd == OCFS2_IOC_RESVSP64) && !ocfs2_writes_unwritten_extents(osb)) return -ENOTTY; else if ((cmd == OCFS2_IOC_UNRESVSP || cmd == OCFS2_IOC_UNRESVSP64) && !ocfs2_sparse_alloc(osb)) return -ENOTTY; if (!S_ISREG(inode->i_mode)) return -EINVAL; if (!(file->f_mode & FMODE_WRITE)) return -EBADF; ret = mnt_want_write_file(file); if (ret) return ret; ret = __ocfs2_change_file_space(file, inode, file->f_pos, cmd, sr, 0); mnt_drop_write_file(file); return ret; } static long ocfs2_fallocate(struct file *file, int mode, loff_t offset, loff_t len) { struct inode *inode = file_inode(file); struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); struct ocfs2_space_resv sr; int change_size = 1; int cmd = OCFS2_IOC_RESVSP64; int ret = 0; if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE)) return -EOPNOTSUPP; if (!ocfs2_writes_unwritten_extents(osb)) return -EOPNOTSUPP; if (mode & FALLOC_FL_KEEP_SIZE) { change_size = 0; } else { ret = inode_newsize_ok(inode, offset + len); if (ret) return ret; } if (mode & FALLOC_FL_PUNCH_HOLE) cmd = OCFS2_IOC_UNRESVSP64; sr.l_whence = 0; sr.l_start = (s64)offset; sr.l_len = (s64)len; return __ocfs2_change_file_space(NULL, inode, offset, cmd, &sr, change_size); } int ocfs2_check_range_for_refcount(struct inode *inode, loff_t pos, size_t count) { int ret = 0; unsigned int extent_flags; u32 cpos, clusters, extent_len, phys_cpos; struct super_block *sb = inode->i_sb; if (!ocfs2_refcount_tree(OCFS2_SB(inode->i_sb)) || !ocfs2_is_refcount_inode(inode) || OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) return 0; cpos = pos >> OCFS2_SB(sb)->s_clustersize_bits; clusters = ocfs2_clusters_for_bytes(sb, pos + count) - cpos; while (clusters) { ret = ocfs2_get_clusters(inode, cpos, &phys_cpos, &extent_len, &extent_flags); if (ret < 0) { mlog_errno(ret); goto out; } if (phys_cpos && (extent_flags & OCFS2_EXT_REFCOUNTED)) { ret = 1; break; } if (extent_len > clusters) extent_len = clusters; clusters -= extent_len; cpos += extent_len; } out: return ret; } static int ocfs2_is_io_unaligned(struct inode *inode, size_t count, loff_t pos) { int blockmask = inode->i_sb->s_blocksize - 1; loff_t final_size = pos + count; if ((pos & blockmask) || (final_size & blockmask)) return 1; return 0; } static int ocfs2_inode_lock_for_extent_tree(struct inode *inode, struct buffer_head **di_bh, int meta_level, int write_sem, int wait) { int ret = 0; if (wait) ret = ocfs2_inode_lock(inode, di_bh, meta_level); else ret = ocfs2_try_inode_lock(inode, di_bh, meta_level); if (ret < 0) goto out; if (wait) { if (write_sem) down_write(&OCFS2_I(inode)->ip_alloc_sem); else down_read(&OCFS2_I(inode)->ip_alloc_sem); } else { if (write_sem) ret = down_write_trylock(&OCFS2_I(inode)->ip_alloc_sem); else ret = down_read_trylock(&OCFS2_I(inode)->ip_alloc_sem); if (!ret) { ret = -EAGAIN; goto out_unlock; } } return ret; out_unlock: brelse(*di_bh); *di_bh = NULL; ocfs2_inode_unlock(inode, meta_level); out: return ret; } static void ocfs2_inode_unlock_for_extent_tree(struct inode *inode, struct buffer_head **di_bh, int meta_level, int write_sem) { if (write_sem) up_write(&OCFS2_I(inode)->ip_alloc_sem); else up_read(&OCFS2_I(inode)->ip_alloc_sem); brelse(*di_bh); *di_bh = NULL; if (meta_level >= 0) ocfs2_inode_unlock(inode, meta_level); } static int ocfs2_prepare_inode_for_write(struct file *file, loff_t pos, size_t count, int wait) { int ret = 0, meta_level = 0, overwrite_io = 0; int write_sem = 0; struct dentry *dentry = file->f_path.dentry; struct inode *inode = d_inode(dentry); struct buffer_head *di_bh = NULL; u32 cpos; u32 clusters; /* * We start with a read level meta lock and only jump to an ex * if we need to make modifications here. */ for(;;) { ret = ocfs2_inode_lock_for_extent_tree(inode, &di_bh, meta_level, write_sem, wait); if (ret < 0) { if (ret != -EAGAIN) mlog_errno(ret); goto out; } /* * Check if IO will overwrite allocated blocks in case * IOCB_NOWAIT flag is set. */ if (!wait && !overwrite_io) { overwrite_io = 1; ret = ocfs2_overwrite_io(inode, di_bh, pos, count); if (ret < 0) { if (ret != -EAGAIN) mlog_errno(ret); goto out_unlock; } } /* Clear suid / sgid if necessary. We do this here * instead of later in the write path because * remove_suid() calls ->setattr without any hint that * we may have already done our cluster locking. Since * ocfs2_setattr() *must* take cluster locks to * proceed, this will lead us to recursively lock the * inode. There's also the dinode i_size state which * can be lost via setattr during extending writes (we * set inode->i_size at the end of a write. */ if (setattr_should_drop_suidgid(&nop_mnt_idmap, inode)) { if (meta_level == 0) { ocfs2_inode_unlock_for_extent_tree(inode, &di_bh, meta_level, write_sem); meta_level = 1; continue; } ret = ocfs2_write_remove_suid(inode); if (ret < 0) { mlog_errno(ret); goto out_unlock; } } ret = ocfs2_check_range_for_refcount(inode, pos, count); if (ret == 1) { ocfs2_inode_unlock_for_extent_tree(inode, &di_bh, meta_level, write_sem); meta_level = 1; write_sem = 1; ret = ocfs2_inode_lock_for_extent_tree(inode, &di_bh, meta_level, write_sem, wait); if (ret < 0) { if (ret != -EAGAIN) mlog_errno(ret); goto out; } cpos = pos >> OCFS2_SB(inode->i_sb)->s_clustersize_bits; clusters = ocfs2_clusters_for_bytes(inode->i_sb, pos + count) - cpos; ret = ocfs2_refcount_cow(inode, di_bh, cpos, clusters, UINT_MAX); } if (ret < 0) { if (ret != -EAGAIN) mlog_errno(ret); goto out_unlock; } break; } out_unlock: trace_ocfs2_prepare_inode_for_write(OCFS2_I(inode)->ip_blkno, pos, count, wait); ocfs2_inode_unlock_for_extent_tree(inode, &di_bh, meta_level, write_sem); out: return ret; } static ssize_t ocfs2_file_write_iter(struct kiocb *iocb, struct iov_iter *from) { int rw_level; ssize_t written = 0; ssize_t ret; size_t count = iov_iter_count(from); struct file *file = iocb->ki_filp; struct inode *inode = file_inode(file); struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); int full_coherency = !(osb->s_mount_opt & OCFS2_MOUNT_COHERENCY_BUFFERED); void *saved_ki_complete = NULL; int append_write = ((iocb->ki_pos + count) >= i_size_read(inode) ? 1 : 0); int direct_io = iocb->ki_flags & IOCB_DIRECT ? 1 : 0; int nowait = iocb->ki_flags & IOCB_NOWAIT ? 1 : 0; trace_ocfs2_file_write_iter(inode, file, file->f_path.dentry, (unsigned long long)OCFS2_I(inode)->ip_blkno, file->f_path.dentry->d_name.len, file->f_path.dentry->d_name.name, (unsigned int)from->nr_segs); /* GRRRRR */ if (!direct_io && nowait) return -EOPNOTSUPP; if (count == 0) return 0; if (nowait) { if (!inode_trylock(inode)) return -EAGAIN; } else inode_lock(inode); ocfs2_iocb_init_rw_locked(iocb); /* * Concurrent O_DIRECT writes are allowed with * mount_option "coherency=buffered". * For append write, we must take rw EX. */ rw_level = (!direct_io || full_coherency || append_write); if (nowait) ret = ocfs2_try_rw_lock(inode, rw_level); else ret = ocfs2_rw_lock(inode, rw_level); if (ret < 0) { if (ret != -EAGAIN) mlog_errno(ret); goto out_mutex; } /* * O_DIRECT writes with "coherency=full" need to take EX cluster * inode_lock to guarantee coherency. */ if (direct_io && full_coherency) { /* * We need to take and drop the inode lock to force * other nodes to drop their caches. Buffered I/O * already does this in write_begin(). */ if (nowait) ret = ocfs2_try_inode_lock(inode, NULL, 1); else ret = ocfs2_inode_lock(inode, NULL, 1); if (ret < 0) { if (ret != -EAGAIN) mlog_errno(ret); goto out; } ocfs2_inode_unlock(inode, 1); } ret = generic_write_checks(iocb, from); if (ret <= 0) { if (ret) mlog_errno(ret); goto out; } count = ret; ret = ocfs2_prepare_inode_for_write(file, iocb->ki_pos, count, !nowait); if (ret < 0) { if (ret != -EAGAIN) mlog_errno(ret); goto out; } if (direct_io && !is_sync_kiocb(iocb) && ocfs2_is_io_unaligned(inode, count, iocb->ki_pos)) { /* * Make it a sync io if it's an unaligned aio. */ saved_ki_complete = xchg(&iocb->ki_complete, NULL); } /* communicate with ocfs2_dio_end_io */ ocfs2_iocb_set_rw_locked(iocb, rw_level); written = __generic_file_write_iter(iocb, from); /* buffered aio wouldn't have proper lock coverage today */ BUG_ON(written == -EIOCBQUEUED && !direct_io); /* * deep in g_f_a_w_n()->ocfs2_direct_IO we pass in a ocfs2_dio_end_io * function pointer which is called when o_direct io completes so that * it can unlock our rw lock. * Unfortunately there are error cases which call end_io and others * that don't. so we don't have to unlock the rw_lock if either an * async dio is going to do it in the future or an end_io after an * error has already done it. */ if ((written == -EIOCBQUEUED) || (!ocfs2_iocb_is_rw_locked(iocb))) { rw_level = -1; } if (unlikely(written <= 0)) goto out; if (((file->f_flags & O_DSYNC) && !direct_io) || IS_SYNC(inode)) { ret = filemap_fdatawrite_range(file->f_mapping, iocb->ki_pos - written, iocb->ki_pos - 1); if (ret < 0) written = ret; if (!ret) { ret = jbd2_journal_force_commit(osb->journal->j_journal); if (ret < 0) written = ret; } if (!ret) ret = filemap_fdatawait_range(file->f_mapping, iocb->ki_pos - written, iocb->ki_pos - 1); } out: if (saved_ki_complete) xchg(&iocb->ki_complete, saved_ki_complete); if (rw_level != -1) ocfs2_rw_unlock(inode, rw_level); out_mutex: inode_unlock(inode); if (written) ret = written; return ret; } static ssize_t ocfs2_file_read_iter(struct kiocb *iocb, struct iov_iter *to) { int ret = 0, rw_level = -1, lock_level = 0; struct file *filp = iocb->ki_filp; struct inode *inode = file_inode(filp); int direct_io = iocb->ki_flags & IOCB_DIRECT ? 1 : 0; int nowait = iocb->ki_flags & IOCB_NOWAIT ? 1 : 0; trace_ocfs2_file_read_iter(inode, filp, filp->f_path.dentry, (unsigned long long)OCFS2_I(inode)->ip_blkno, filp->f_path.dentry->d_name.len, filp->f_path.dentry->d_name.name, to->nr_segs); /* GRRRRR */ if (!inode) { ret = -EINVAL; mlog_errno(ret); goto bail; } if (!direct_io && nowait) return -EOPNOTSUPP; ocfs2_iocb_init_rw_locked(iocb); /* * buffered reads protect themselves in ->read_folio(). O_DIRECT reads * need locks to protect pending reads from racing with truncate. */ if (direct_io) { if (nowait) ret = ocfs2_try_rw_lock(inode, 0); else ret = ocfs2_rw_lock(inode, 0); if (ret < 0) { if (ret != -EAGAIN) mlog_errno(ret); goto bail; } rw_level = 0; /* communicate with ocfs2_dio_end_io */ ocfs2_iocb_set_rw_locked(iocb, rw_level); } /* * We're fine letting folks race truncates and extending * writes with read across the cluster, just like they can * locally. Hence no rw_lock during read. * * Take and drop the meta data lock to update inode fields * like i_size. This allows the checks down below * copy_splice_read() a chance of actually working. */ ret = ocfs2_inode_lock_atime(inode, filp->f_path.mnt, &lock_level, !nowait); if (ret < 0) { if (ret != -EAGAIN) mlog_errno(ret); goto bail; } ocfs2_inode_unlock(inode, lock_level); ret = generic_file_read_iter(iocb, to); trace_generic_file_read_iter_ret(ret); /* buffered aio wouldn't have proper lock coverage today */ BUG_ON(ret == -EIOCBQUEUED && !direct_io); /* see ocfs2_file_write_iter */ if (ret == -EIOCBQUEUED || !ocfs2_iocb_is_rw_locked(iocb)) { rw_level = -1; } bail: if (rw_level != -1) ocfs2_rw_unlock(inode, rw_level); return ret; } static ssize_t ocfs2_file_splice_read(struct file *in, loff_t *ppos, struct pipe_inode_info *pipe, size_t len, unsigned int flags) { struct inode *inode = file_inode(in); ssize_t ret = 0; int lock_level = 0; trace_ocfs2_file_splice_read(inode, in, in->f_path.dentry, (unsigned long long)OCFS2_I(inode)->ip_blkno, in->f_path.dentry->d_name.len, in->f_path.dentry->d_name.name, flags); /* * We're fine letting folks race truncates and extending writes with * read across the cluster, just like they can locally. Hence no * rw_lock during read. * * Take and drop the meta data lock to update inode fields like i_size. * This allows the checks down below filemap_splice_read() a chance of * actually working. */ ret = ocfs2_inode_lock_atime(inode, in->f_path.mnt, &lock_level, 1); if (ret < 0) { if (ret != -EAGAIN) mlog_errno(ret); goto bail; } ocfs2_inode_unlock(inode, lock_level); ret = filemap_splice_read(in, ppos, pipe, len, flags); trace_filemap_splice_read_ret(ret); bail: return ret; } /* Refer generic_file_llseek_unlocked() */ static loff_t ocfs2_file_llseek(struct file *file, loff_t offset, int whence) { struct inode *inode = file->f_mapping->host; int ret = 0; inode_lock(inode); switch (whence) { case SEEK_SET: break; case SEEK_END: /* SEEK_END requires the OCFS2 inode lock for the file * because it references the file's size. */ ret = ocfs2_inode_lock(inode, NULL, 0); if (ret < 0) { mlog_errno(ret); goto out; } offset += i_size_read(inode); ocfs2_inode_unlock(inode, 0); break; case SEEK_CUR: if (offset == 0) { offset = file->f_pos; goto out; } offset += file->f_pos; break; case SEEK_DATA: case SEEK_HOLE: ret = ocfs2_seek_data_hole_offset(file, &offset, whence); if (ret) goto out; break; default: ret = -EINVAL; goto out; } offset = vfs_setpos(file, offset, inode->i_sb->s_maxbytes); out: inode_unlock(inode); if (ret) return ret; return offset; } static loff_t ocfs2_remap_file_range(struct file *file_in, loff_t pos_in, struct file *file_out, loff_t pos_out, loff_t len, unsigned int remap_flags) { struct inode *inode_in = file_inode(file_in); struct inode *inode_out = file_inode(file_out); struct ocfs2_super *osb = OCFS2_SB(inode_in->i_sb); struct buffer_head *in_bh = NULL, *out_bh = NULL; bool same_inode = (inode_in == inode_out); loff_t remapped = 0; ssize_t ret; if (remap_flags & ~(REMAP_FILE_DEDUP | REMAP_FILE_ADVISORY)) return -EINVAL; if (!ocfs2_refcount_tree(osb)) return -EOPNOTSUPP; if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb)) return -EROFS; /* Lock both files against IO */ ret = ocfs2_reflink_inodes_lock(inode_in, &in_bh, inode_out, &out_bh); if (ret) return ret; /* Check file eligibility and prepare for block sharing. */ ret = -EINVAL; if ((OCFS2_I(inode_in)->ip_flags & OCFS2_INODE_SYSTEM_FILE) || (OCFS2_I(inode_out)->ip_flags & OCFS2_INODE_SYSTEM_FILE)) goto out_unlock; ret = generic_remap_file_range_prep(file_in, pos_in, file_out, pos_out, &len, remap_flags); if (ret < 0 || len == 0) goto out_unlock; /* Lock out changes to the allocation maps and remap. */ down_write(&OCFS2_I(inode_in)->ip_alloc_sem); if (!same_inode) down_write_nested(&OCFS2_I(inode_out)->ip_alloc_sem, SINGLE_DEPTH_NESTING); /* Zap any page cache for the destination file's range. */ truncate_inode_pages_range(&inode_out->i_data, round_down(pos_out, PAGE_SIZE), round_up(pos_out + len, PAGE_SIZE) - 1); remapped = ocfs2_reflink_remap_blocks(inode_in, in_bh, pos_in, inode_out, out_bh, pos_out, len); up_write(&OCFS2_I(inode_in)->ip_alloc_sem); if (!same_inode) up_write(&OCFS2_I(inode_out)->ip_alloc_sem); if (remapped < 0) { ret = remapped; mlog_errno(ret); goto out_unlock; } /* * Empty the extent map so that we may get the right extent * record from the disk. */ ocfs2_extent_map_trunc(inode_in, 0); ocfs2_extent_map_trunc(inode_out, 0); ret = ocfs2_reflink_update_dest(inode_out, out_bh, pos_out + len); if (ret) { mlog_errno(ret); goto out_unlock; } out_unlock: ocfs2_reflink_inodes_unlock(inode_in, in_bh, inode_out, out_bh); return remapped > 0 ? remapped : ret; } static loff_t ocfs2_dir_llseek(struct file *file, loff_t offset, int whence) { struct ocfs2_file_private *fp = file->private_data; return generic_llseek_cookie(file, offset, whence, &fp->cookie); } const struct inode_operations ocfs2_file_iops = { .setattr = ocfs2_setattr, .getattr = ocfs2_getattr, .permission = ocfs2_permission, .listxattr = ocfs2_listxattr, .fiemap = ocfs2_fiemap, .get_inode_acl = ocfs2_iop_get_acl, .set_acl = ocfs2_iop_set_acl, .fileattr_get = ocfs2_fileattr_get, .fileattr_set = ocfs2_fileattr_set, }; const struct inode_operations ocfs2_special_file_iops = { .setattr = ocfs2_setattr, .getattr = ocfs2_getattr, .listxattr = ocfs2_listxattr, .permission = ocfs2_permission, .get_inode_acl = ocfs2_iop_get_acl, .set_acl = ocfs2_iop_set_acl, }; /* * Other than ->lock, keep ocfs2_fops and ocfs2_dops in sync with * ocfs2_fops_no_plocks and ocfs2_dops_no_plocks! */ const struct file_operations ocfs2_fops = { .llseek = ocfs2_file_llseek, .mmap_prepare = ocfs2_mmap_prepare, .fsync = ocfs2_sync_file, .release = ocfs2_file_release, .open = ocfs2_file_open, .read_iter = ocfs2_file_read_iter, .write_iter = ocfs2_file_write_iter, .unlocked_ioctl = ocfs2_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = ocfs2_compat_ioctl, #endif .lock = ocfs2_lock, .flock = ocfs2_flock, .splice_read = ocfs2_file_splice_read, .splice_write = iter_file_splice_write, .fallocate = ocfs2_fallocate, .remap_file_range = ocfs2_remap_file_range, .fop_flags = FOP_ASYNC_LOCK, }; WRAP_DIR_ITER(ocfs2_readdir) // FIXME! const struct file_operations ocfs2_dops = { .llseek = ocfs2_dir_llseek, .read = generic_read_dir, .iterate_shared = shared_ocfs2_readdir, .fsync = ocfs2_sync_file, .release = ocfs2_dir_release, .open = ocfs2_dir_open, .unlocked_ioctl = ocfs2_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = ocfs2_compat_ioctl, #endif .lock = ocfs2_lock, .flock = ocfs2_flock, .fop_flags = FOP_ASYNC_LOCK, }; /* * POSIX-lockless variants of our file_operations. * * These will be used if the underlying cluster stack does not support * posix file locking, if the user passes the "localflocks" mount * option, or if we have a local-only fs. * * ocfs2_flock is in here because all stacks handle UNIX file locks, * so we still want it in the case of no stack support for * plocks. Internally, it will do the right thing when asked to ignore * the cluster. */ const struct file_operations ocfs2_fops_no_plocks = { .llseek = ocfs2_file_llseek, .mmap_prepare = ocfs2_mmap_prepare, .fsync = ocfs2_sync_file, .release = ocfs2_file_release, .open = ocfs2_file_open, .read_iter = ocfs2_file_read_iter, .write_iter = ocfs2_file_write_iter, .unlocked_ioctl = ocfs2_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = ocfs2_compat_ioctl, #endif .flock = ocfs2_flock, .splice_read = filemap_splice_read, .splice_write = iter_file_splice_write, .fallocate = ocfs2_fallocate, .remap_file_range = ocfs2_remap_file_range, }; const struct file_operations ocfs2_dops_no_plocks = { .llseek = ocfs2_dir_llseek, .read = generic_read_dir, .iterate_shared = shared_ocfs2_readdir, .fsync = ocfs2_sync_file, .release = ocfs2_dir_release, .open = ocfs2_dir_open, .unlocked_ioctl = ocfs2_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = ocfs2_compat_ioctl, #endif .flock = ocfs2_flock, }; |
| 4200 394 4155 1347 3 1562 1 4 1 82 5316 1722 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 | /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_RCULIST_H #define _LINUX_RCULIST_H #ifdef __KERNEL__ /* * RCU-protected list version */ #include <linux/list.h> #include <linux/rcupdate.h> /* * INIT_LIST_HEAD_RCU - Initialize a list_head visible to RCU readers * @list: list to be initialized * * You should instead use INIT_LIST_HEAD() for normal initialization and * cleanup tasks, when readers have no access to the list being initialized. * However, if the list being initialized is visible to readers, you * need to keep the compiler from being too mischievous. */ static inline void INIT_LIST_HEAD_RCU(struct list_head *list) { WRITE_ONCE(list->next, list); WRITE_ONCE(list->prev, list); } /* * return the ->next pointer of a list_head in an rcu safe * way, we must not access it directly */ #define list_next_rcu(list) (*((struct list_head __rcu **)(&(list)->next))) /* * Return the ->prev pointer of a list_head in an rcu safe way. Don't * access it directly. * * Any list traversed with list_bidir_prev_rcu() must never use * list_del_rcu(). Doing so will poison the ->prev pointer that * list_bidir_prev_rcu() relies on, which will result in segfaults. * To prevent these segfaults, use list_bidir_del_rcu() instead * of list_del_rcu(). */ #define list_bidir_prev_rcu(list) (*((struct list_head __rcu **)(&(list)->prev))) /** * list_tail_rcu - returns the prev pointer of the head of the list * @head: the head of the list * * Note: This should only be used with the list header, and even then * only if list_del() and similar primitives are not also used on the * list header. */ #define list_tail_rcu(head) (*((struct list_head __rcu **)(&(head)->prev))) /* * Check during list traversal that we are within an RCU reader */ #define check_arg_count_one(dummy) #ifdef CONFIG_PROVE_RCU_LIST #define __list_check_rcu(dummy, cond, extra...) \ ({ \ check_arg_count_one(extra); \ RCU_LOCKDEP_WARN(!(cond) && !rcu_read_lock_any_held(), \ "RCU-list traversed in non-reader section!"); \ }) #define __list_check_srcu(cond) \ ({ \ RCU_LOCKDEP_WARN(!(cond), \ "RCU-list traversed without holding the required lock!");\ }) #else #define __list_check_rcu(dummy, cond, extra...) \ ({ check_arg_count_one(extra); }) #define __list_check_srcu(cond) ({ }) #endif /* * Insert a new entry between two known consecutive entries. * * This is only for internal list manipulation where we know * the prev/next entries already! */ static inline void __list_add_rcu(struct list_head *new, struct list_head *prev, struct list_head *next) { if (!__list_add_valid(new, prev, next)) return; new->next = next; new->prev = prev; rcu_assign_pointer(list_next_rcu(prev), new); next->prev = new; } /** * list_add_rcu - add a new entry to rcu-protected list * @new: new entry to be added * @head: list head to add it after * * Insert a new entry after the specified head. * This is good for implementing stacks. * * The caller must take whatever precautions are necessary * (such as holding appropriate locks) to avoid racing * with another list-mutation primitive, such as list_add_rcu() * or list_del_rcu(), running on this same list. * However, it is perfectly legal to run concurrently with * the _rcu list-traversal primitives, such as * list_for_each_entry_rcu(). */ static inline void list_add_rcu(struct list_head *new, struct list_head *head) { __list_add_rcu(new, head, head->next); } /** * list_add_tail_rcu - add a new entry to rcu-protected list * @new: new entry to be added * @head: list head to add it before * * Insert a new entry before the specified head. * This is useful for implementing queues. * * The caller must take whatever precautions are necessary * (such as holding appropriate locks) to avoid racing * with another list-mutation primitive, such as list_add_tail_rcu() * or list_del_rcu(), running on this same list. * However, it is perfectly legal to run concurrently with * the _rcu list-traversal primitives, such as * list_for_each_entry_rcu(). */ static inline void list_add_tail_rcu(struct list_head *new, struct list_head *head) { __list_add_rcu(new, head->prev, head); } /** * list_del_rcu - deletes entry from list without re-initialization * @entry: the element to delete from the list. * * Note: list_empty() on entry does not return true after this, * the entry is in an undefined state. It is useful for RCU based * lockfree traversal. * * In particular, it means that we can not poison the forward * pointers that may still be used for walking the list. * * The caller must take whatever precautions are necessary * (such as holding appropriate locks) to avoid racing * with another list-mutation primitive, such as list_del_rcu() * or list_add_rcu(), running on this same list. * However, it is perfectly legal to run concurrently with * the _rcu list-traversal primitives, such as * list_for_each_entry_rcu(). * * Note that the caller is not permitted to immediately free * the newly deleted entry. Instead, either synchronize_rcu() * or call_rcu() must be used to defer freeing until an RCU * grace period has elapsed. */ static inline void list_del_rcu(struct list_head *entry) { __list_del_entry(entry); entry->prev = LIST_POISON2; } /** * list_bidir_del_rcu - deletes entry from list without re-initialization * @entry: the element to delete from the list. * * In contrast to list_del_rcu() doesn't poison the prev pointer thus * allowing backwards traversal via list_bidir_prev_rcu(). * * Note: list_empty() on entry does not return true after this because * the entry is in a special undefined state that permits RCU-based * lockfree reverse traversal. In particular this means that we can not * poison the forward and backwards pointers that may still be used for * walking the list. * * The caller must take whatever precautions are necessary (such as * holding appropriate locks) to avoid racing with another list-mutation * primitive, such as list_bidir_del_rcu() or list_add_rcu(), running on * this same list. However, it is perfectly legal to run concurrently * with the _rcu list-traversal primitives, such as * list_for_each_entry_rcu(). * * Note that list_del_rcu() and list_bidir_del_rcu() must not be used on * the same list. * * Note that the caller is not permitted to immediately free * the newly deleted entry. Instead, either synchronize_rcu() * or call_rcu() must be used to defer freeing until an RCU * grace period has elapsed. */ static inline void list_bidir_del_rcu(struct list_head *entry) { __list_del_entry(entry); } /** * hlist_del_init_rcu - deletes entry from hash list with re-initialization * @n: the element to delete from the hash list. * * Note: list_unhashed() on the node return true after this. It is * useful for RCU based read lockfree traversal if the writer side * must know if the list entry is still hashed or already unhashed. * * In particular, it means that we can not poison the forward pointers * that may still be used for walking the hash list and we can only * zero the pprev pointer so list_unhashed() will return true after * this. * * The caller must take whatever precautions are necessary (such as * holding appropriate locks) to avoid racing with another * list-mutation primitive, such as hlist_add_head_rcu() or * hlist_del_rcu(), running on this same list. However, it is * perfectly legal to run concurrently with the _rcu list-traversal * primitives, such as hlist_for_each_entry_rcu(). */ static inline void hlist_del_init_rcu(struct hlist_node *n) { if (!hlist_unhashed(n)) { __hlist_del(n); WRITE_ONCE(n->pprev, NULL); } } /** * list_replace_rcu - replace old entry by new one * @old : the element to be replaced * @new : the new element to insert * * The @old entry will be replaced with the @new entry atomically from * the perspective of concurrent readers. It is the caller's responsibility * to synchronize with concurrent updaters, if any. * * Note: @old should not be empty. */ static inline void list_replace_rcu(struct list_head *old, struct list_head *new) { new->next = old->next; new->prev = old->prev; rcu_assign_pointer(list_next_rcu(new->prev), new); new->next->prev = new; old->prev = LIST_POISON2; } /** * __list_splice_init_rcu - join an RCU-protected list into an existing list. * @list: the RCU-protected list to splice * @prev: points to the last element of the existing list * @next: points to the first element of the existing list * @sync: synchronize_rcu, synchronize_rcu_expedited, ... * * The list pointed to by @prev and @next can be RCU-read traversed * concurrently with this function. * * Note that this function blocks. * * Important note: the caller must take whatever action is necessary to prevent * any other updates to the existing list. In principle, it is possible to * modify the list as soon as sync() begins execution. If this sort of thing * becomes necessary, an alternative version based on call_rcu() could be * created. But only if -really- needed -- there is no shortage of RCU API * members. */ static inline void __list_splice_init_rcu(struct list_head *list, struct list_head *prev, struct list_head *next, void (*sync)(void)) { struct list_head *first = list->next; struct list_head *last = list->prev; /* * "first" and "last" tracking list, so initialize it. RCU readers * have access to this list, so we must use INIT_LIST_HEAD_RCU() * instead of INIT_LIST_HEAD(). */ INIT_LIST_HEAD_RCU(list); /* * At this point, the list body still points to the source list. * Wait for any readers to finish using the list before splicing * the list body into the new list. Any new readers will see * an empty list. */ sync(); ASSERT_EXCLUSIVE_ACCESS(*first); ASSERT_EXCLUSIVE_ACCESS(*last); /* * Readers are finished with the source list, so perform splice. * The order is important if the new list is global and accessible * to concurrent RCU readers. Note that RCU readers are not * permitted to traverse the prev pointers without excluding * this function. */ last->next = next; rcu_assign_pointer(list_next_rcu(prev), first); first->prev = prev; next->prev = last; } /** * list_splice_init_rcu - splice an RCU-protected list into an existing list, * designed for stacks. * @list: the RCU-protected list to splice * @head: the place in the existing list to splice the first list into * @sync: synchronize_rcu, synchronize_rcu_expedited, ... */ static inline void list_splice_init_rcu(struct list_head *list, struct list_head *head, void (*sync)(void)) { if (!list_empty(list)) __list_splice_init_rcu(list, head, head->next, sync); } /** * list_splice_tail_init_rcu - splice an RCU-protected list into an existing * list, designed for queues. * @list: the RCU-protected list to splice * @head: the place in the existing list to splice the first list into * @sync: synchronize_rcu, synchronize_rcu_expedited, ... */ static inline void list_splice_tail_init_rcu(struct list_head *list, struct list_head *head, void (*sync)(void)) { if (!list_empty(list)) __list_splice_init_rcu(list, head->prev, head, sync); } /** * list_entry_rcu - get the struct for this entry * @ptr: the &struct list_head pointer. * @type: the type of the struct this is embedded in. * @member: the name of the list_head within the struct. * * This primitive may safely run concurrently with the _rcu list-mutation * primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock(). */ #define list_entry_rcu(ptr, type, member) \ container_of(READ_ONCE(ptr), type, member) /* * Where are list_empty_rcu() and list_first_entry_rcu()? * * They do not exist because they would lead to subtle race conditions: * * if (!list_empty_rcu(mylist)) { * struct foo *bar = list_first_entry_rcu(mylist, struct foo, list_member); * do_something(bar); * } * * The list might be non-empty when list_empty_rcu() checks it, but it * might have become empty by the time that list_first_entry_rcu() rereads * the ->next pointer, which would result in a SEGV. * * When not using RCU, it is OK for list_first_entry() to re-read that * pointer because both functions should be protected by some lock that * blocks writers. * * When using RCU, list_empty() uses READ_ONCE() to fetch the * RCU-protected ->next pointer and then compares it to the address of the * list head. However, it neither dereferences this pointer nor provides * this pointer to its caller. Thus, READ_ONCE() suffices (that is, * rcu_dereference() is not needed), which means that list_empty() can be * used anywhere you would want to use list_empty_rcu(). Just don't * expect anything useful to happen if you do a subsequent lockless * call to list_first_entry_rcu()!!! * * See list_first_or_null_rcu for an alternative. */ /** * list_first_or_null_rcu - get the first element from a list * @ptr: the list head to take the element from. * @type: the type of the struct this is embedded in. * @member: the name of the list_head within the struct. * * Note that if the list is empty, it returns NULL. * * This primitive may safely run concurrently with the _rcu list-mutation * primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock(). */ #define list_first_or_null_rcu(ptr, type, member) \ ({ \ struct list_head *__ptr = (ptr); \ struct list_head *__next = READ_ONCE(__ptr->next); \ likely(__ptr != __next) ? list_entry_rcu(__next, type, member) : NULL; \ }) /** * list_next_or_null_rcu - get the next element from a list * @head: the head for the list. * @ptr: the list head to take the next element from. * @type: the type of the struct this is embedded in. * @member: the name of the list_head within the struct. * * Note that if the ptr is at the end of the list, NULL is returned. * * This primitive may safely run concurrently with the _rcu list-mutation * primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock(). */ #define list_next_or_null_rcu(head, ptr, type, member) \ ({ \ struct list_head *__head = (head); \ struct list_head *__ptr = (ptr); \ struct list_head *__next = READ_ONCE(__ptr->next); \ likely(__next != __head) ? list_entry_rcu(__next, type, \ member) : NULL; \ }) /** * list_for_each_entry_rcu - iterate over rcu list of given type * @pos: the type * to use as a loop cursor. * @head: the head for your list. * @member: the name of the list_head within the struct. * @cond: optional lockdep expression if called from non-RCU protection. * * This list-traversal primitive may safely run concurrently with * the _rcu list-mutation primitives such as list_add_rcu() * as long as the traversal is guarded by rcu_read_lock(). */ #define list_for_each_entry_rcu(pos, head, member, cond...) \ for (__list_check_rcu(dummy, ## cond, 0), \ pos = list_entry_rcu((head)->next, typeof(*pos), member); \ &pos->member != (head); \ pos = list_entry_rcu(pos->member.next, typeof(*pos), member)) /** * list_for_each_entry_srcu - iterate over rcu list of given type * @pos: the type * to use as a loop cursor. * @head: the head for your list. * @member: the name of the list_head within the struct. * @cond: lockdep expression for the lock required to traverse the list. * * This list-traversal primitive may safely run concurrently with * the _rcu list-mutation primitives such as list_add_rcu() * as long as the traversal is guarded by srcu_read_lock(). * The lockdep expression srcu_read_lock_held() can be passed as the * cond argument from read side. */ #define list_for_each_entry_srcu(pos, head, member, cond) \ for (__list_check_srcu(cond), \ pos = list_entry_rcu((head)->next, typeof(*pos), member); \ &pos->member != (head); \ pos = list_entry_rcu(pos->member.next, typeof(*pos), member)) /** * list_entry_lockless - get the struct for this entry * @ptr: the &struct list_head pointer. * @type: the type of the struct this is embedded in. * @member: the name of the list_head within the struct. * * This primitive may safely run concurrently with the _rcu * list-mutation primitives such as list_add_rcu(), but requires some * implicit RCU read-side guarding. One example is running within a special * exception-time environment where preemption is disabled and where lockdep * cannot be invoked. Another example is when items are added to the list, * but never deleted. */ #define list_entry_lockless(ptr, type, member) \ container_of((typeof(ptr))READ_ONCE(ptr), type, member) /** * list_for_each_entry_lockless - iterate over rcu list of given type * @pos: the type * to use as a loop cursor. * @head: the head for your list. * @member: the name of the list_struct within the struct. * * This primitive may safely run concurrently with the _rcu * list-mutation primitives such as list_add_rcu(), but requires some * implicit RCU read-side guarding. One example is running within a special * exception-time environment where preemption is disabled and where lockdep * cannot be invoked. Another example is when items are added to the list, * but never deleted. */ #define list_for_each_entry_lockless(pos, head, member) \ for (pos = list_entry_lockless((head)->next, typeof(*pos), member); \ &pos->member != (head); \ pos = list_entry_lockless(pos->member.next, typeof(*pos), member)) /** * list_for_each_entry_continue_rcu - continue iteration over list of given type * @pos: the type * to use as a loop cursor. * @head: the head for your list. * @member: the name of the list_head within the struct. * * Continue to iterate over list of given type, continuing after * the current position which must have been in the list when the RCU read * lock was taken. * This would typically require either that you obtained the node from a * previous walk of the list in the same RCU read-side critical section, or * that you held some sort of non-RCU reference (such as a reference count) * to keep the node alive *and* in the list. * * This iterator is similar to list_for_each_entry_from_rcu() except * this starts after the given position and that one starts at the given * position. */ #define list_for_each_entry_continue_rcu(pos, head, member) \ for (pos = list_entry_rcu(pos->member.next, typeof(*pos), member); \ &pos->member != (head); \ pos = list_entry_rcu(pos->member.next, typeof(*pos), member)) /** * list_for_each_entry_from_rcu - iterate over a list from current point * @pos: the type * to use as a loop cursor. * @head: the head for your list. * @member: the name of the list_node within the struct. * * Iterate over the tail of a list starting from a given position, * which must have been in the list when the RCU read lock was taken. * This would typically require either that you obtained the node from a * previous walk of the list in the same RCU read-side critical section, or * that you held some sort of non-RCU reference (such as a reference count) * to keep the node alive *and* in the list. * * This iterator is similar to list_for_each_entry_continue_rcu() except * this starts from the given position and that one starts from the position * after the given position. */ #define list_for_each_entry_from_rcu(pos, head, member) \ for (; &(pos)->member != (head); \ pos = list_entry_rcu(pos->member.next, typeof(*(pos)), member)) /** * hlist_del_rcu - deletes entry from hash list without re-initialization * @n: the element to delete from the hash list. * * Note: list_unhashed() on entry does not return true after this, * the entry is in an undefined state. It is useful for RCU based * lockfree traversal. * * In particular, it means that we can not poison the forward * pointers that may still be used for walking the hash list. * * The caller must take whatever precautions are necessary * (such as holding appropriate locks) to avoid racing * with another list-mutation primitive, such as hlist_add_head_rcu() * or hlist_del_rcu(), running on this same list. * However, it is perfectly legal to run concurrently with * the _rcu list-traversal primitives, such as * hlist_for_each_entry(). */ static inline void hlist_del_rcu(struct hlist_node *n) { __hlist_del(n); WRITE_ONCE(n->pprev, LIST_POISON2); } /** * hlist_replace_rcu - replace old entry by new one * @old : the element to be replaced * @new : the new element to insert * * The @old entry will be replaced with the @new entry atomically from * the perspective of concurrent readers. It is the caller's responsibility * to synchronize with concurrent updaters, if any. */ static inline void hlist_replace_rcu(struct hlist_node *old, struct hlist_node *new) { struct hlist_node *next = old->next; new->next = next; WRITE_ONCE(new->pprev, old->pprev); rcu_assign_pointer(*(struct hlist_node __rcu **)new->pprev, new); if (next) WRITE_ONCE(new->next->pprev, &new->next); WRITE_ONCE(old->pprev, LIST_POISON2); } /** * hlists_swap_heads_rcu - swap the lists the hlist heads point to * @left: The hlist head on the left * @right: The hlist head on the right * * The lists start out as [@left ][node1 ... ] and * [@right ][node2 ... ] * The lists end up as [@left ][node2 ... ] * [@right ][node1 ... ] */ static inline void hlists_swap_heads_rcu(struct hlist_head *left, struct hlist_head *right) { struct hlist_node *node1 = left->first; struct hlist_node *node2 = right->first; rcu_assign_pointer(left->first, node2); rcu_assign_pointer(right->first, node1); WRITE_ONCE(node2->pprev, &left->first); WRITE_ONCE(node1->pprev, &right->first); } /* * return the first or the next element in an RCU protected hlist */ #define hlist_first_rcu(head) (*((struct hlist_node __rcu **)(&(head)->first))) #define hlist_next_rcu(node) (*((struct hlist_node __rcu **)(&(node)->next))) #define hlist_pprev_rcu(node) (*((struct hlist_node __rcu **)((node)->pprev))) /** * hlist_add_head_rcu * @n: the element to add to the hash list. * @h: the list to add to. * * Description: * Adds the specified element to the specified hlist, * while permitting racing traversals. * * The caller must take whatever precautions are necessary * (such as holding appropriate locks) to avoid racing * with another list-mutation primitive, such as hlist_add_head_rcu() * or hlist_del_rcu(), running on this same list. * However, it is perfectly legal to run concurrently with * the _rcu list-traversal primitives, such as * hlist_for_each_entry_rcu(), used to prevent memory-consistency * problems on Alpha CPUs. Regardless of the type of CPU, the * list-traversal primitive must be guarded by rcu_read_lock(). */ static inline void hlist_add_head_rcu(struct hlist_node *n, struct hlist_head *h) { struct hlist_node *first = h->first; n->next = first; WRITE_ONCE(n->pprev, &h->first); rcu_assign_pointer(hlist_first_rcu(h), n); if (first) WRITE_ONCE(first->pprev, &n->next); } /** * hlist_add_tail_rcu * @n: the element to add to the hash list. * @h: the list to add to. * * Description: * Adds the specified element to the specified hlist, * while permitting racing traversals. * * The caller must take whatever precautions are necessary * (such as holding appropriate locks) to avoid racing * with another list-mutation primitive, such as hlist_add_head_rcu() * or hlist_del_rcu(), running on this same list. * However, it is perfectly legal to run concurrently with * the _rcu list-traversal primitives, such as * hlist_for_each_entry_rcu(), used to prevent memory-consistency * problems on Alpha CPUs. Regardless of the type of CPU, the * list-traversal primitive must be guarded by rcu_read_lock(). */ static inline void hlist_add_tail_rcu(struct hlist_node *n, struct hlist_head *h) { struct hlist_node *i, *last = NULL; /* Note: write side code, so rcu accessors are not needed. */ for (i = h->first; i; i = i->next) last = i; if (last) { n->next = last->next; WRITE_ONCE(n->pprev, &last->next); rcu_assign_pointer(hlist_next_rcu(last), n); } else { hlist_add_head_rcu(n, h); } } /** * hlist_add_before_rcu * @n: the new element to add to the hash list. * @next: the existing element to add the new element before. * * Description: * Adds the specified element to the specified hlist * before the specified node while permitting racing traversals. * * The caller must take whatever precautions are necessary * (such as holding appropriate locks) to avoid racing * with another list-mutation primitive, such as hlist_add_head_rcu() * or hlist_del_rcu(), running on this same list. * However, it is perfectly legal to run concurrently with * the _rcu list-traversal primitives, such as * hlist_for_each_entry_rcu(), used to prevent memory-consistency * problems on Alpha CPUs. */ static inline void hlist_add_before_rcu(struct hlist_node *n, struct hlist_node *next) { WRITE_ONCE(n->pprev, next->pprev); n->next = next; rcu_assign_pointer(hlist_pprev_rcu(n), n); WRITE_ONCE(next->pprev, &n->next); } /** * hlist_add_behind_rcu * @n: the new element to add to the hash list. * @prev: the existing element to add the new element after. * * Description: * Adds the specified element to the specified hlist * after the specified node while permitting racing traversals. * * The caller must take whatever precautions are necessary * (such as holding appropriate locks) to avoid racing * with another list-mutation primitive, such as hlist_add_head_rcu() * or hlist_del_rcu(), running on this same list. * However, it is perfectly legal to run concurrently with * the _rcu list-traversal primitives, such as * hlist_for_each_entry_rcu(), used to prevent memory-consistency * problems on Alpha CPUs. */ static inline void hlist_add_behind_rcu(struct hlist_node *n, struct hlist_node *prev) { n->next = prev->next; WRITE_ONCE(n->pprev, &prev->next); rcu_assign_pointer(hlist_next_rcu(prev), n); if (n->next) WRITE_ONCE(n->next->pprev, &n->next); } #define __hlist_for_each_rcu(pos, head) \ for (pos = rcu_dereference(hlist_first_rcu(head)); \ pos; \ pos = rcu_dereference(hlist_next_rcu(pos))) /** * hlist_for_each_entry_rcu - iterate over rcu list of given type * @pos: the type * to use as a loop cursor. * @head: the head for your list. * @member: the name of the hlist_node within the struct. * @cond: optional lockdep expression if called from non-RCU protection. * * This list-traversal primitive may safely run concurrently with * the _rcu list-mutation primitives such as hlist_add_head_rcu() * as long as the traversal is guarded by rcu_read_lock(). */ #define hlist_for_each_entry_rcu(pos, head, member, cond...) \ for (__list_check_rcu(dummy, ## cond, 0), \ pos = hlist_entry_safe(rcu_dereference_raw(hlist_first_rcu(head)),\ typeof(*(pos)), member); \ pos; \ pos = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu(\ &(pos)->member)), typeof(*(pos)), member)) /** * hlist_for_each_entry_srcu - iterate over rcu list of given type * @pos: the type * to use as a loop cursor. * @head: the head for your list. * @member: the name of the hlist_node within the struct. * @cond: lockdep expression for the lock required to traverse the list. * * This list-traversal primitive may safely run concurrently with * the _rcu list-mutation primitives such as hlist_add_head_rcu() * as long as the traversal is guarded by srcu_read_lock(). * The lockdep expression srcu_read_lock_held() can be passed as the * cond argument from read side. */ #define hlist_for_each_entry_srcu(pos, head, member, cond) \ for (__list_check_srcu(cond), \ pos = hlist_entry_safe(rcu_dereference_raw(hlist_first_rcu(head)),\ typeof(*(pos)), member); \ pos; \ pos = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu(\ &(pos)->member)), typeof(*(pos)), member)) /** * hlist_for_each_entry_rcu_notrace - iterate over rcu list of given type (for tracing) * @pos: the type * to use as a loop cursor. * @head: the head for your list. * @member: the name of the hlist_node within the struct. * * This list-traversal primitive may safely run concurrently with * the _rcu list-mutation primitives such as hlist_add_head_rcu() * as long as the traversal is guarded by rcu_read_lock(). * * This is the same as hlist_for_each_entry_rcu() except that it does * not do any RCU debugging or tracing. */ #define hlist_for_each_entry_rcu_notrace(pos, head, member) \ for (pos = hlist_entry_safe(rcu_dereference_raw_check(hlist_first_rcu(head)),\ typeof(*(pos)), member); \ pos; \ pos = hlist_entry_safe(rcu_dereference_raw_check(hlist_next_rcu(\ &(pos)->member)), typeof(*(pos)), member)) /** * hlist_for_each_entry_rcu_bh - iterate over rcu list of given type * @pos: the type * to use as a loop cursor. * @head: the head for your list. * @member: the name of the hlist_node within the struct. * * This list-traversal primitive may safely run concurrently with * the _rcu list-mutation primitives such as hlist_add_head_rcu() * as long as the traversal is guarded by rcu_read_lock(). */ #define hlist_for_each_entry_rcu_bh(pos, head, member) \ for (pos = hlist_entry_safe(rcu_dereference_bh(hlist_first_rcu(head)),\ typeof(*(pos)), member); \ pos; \ pos = hlist_entry_safe(rcu_dereference_bh(hlist_next_rcu(\ &(pos)->member)), typeof(*(pos)), member)) /** * hlist_for_each_entry_continue_rcu - iterate over a hlist continuing after current point * @pos: the type * to use as a loop cursor. * @member: the name of the hlist_node within the struct. */ #define hlist_for_each_entry_continue_rcu(pos, member) \ for (pos = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu( \ &(pos)->member)), typeof(*(pos)), member); \ pos; \ pos = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu( \ &(pos)->member)), typeof(*(pos)), member)) /** * hlist_for_each_entry_continue_rcu_bh - iterate over a hlist continuing after current point * @pos: the type * to use as a loop cursor. * @member: the name of the hlist_node within the struct. */ #define hlist_for_each_entry_continue_rcu_bh(pos, member) \ for (pos = hlist_entry_safe(rcu_dereference_bh(hlist_next_rcu( \ &(pos)->member)), typeof(*(pos)), member); \ pos; \ pos = hlist_entry_safe(rcu_dereference_bh(hlist_next_rcu( \ &(pos)->member)), typeof(*(pos)), member)) /** * hlist_for_each_entry_from_rcu - iterate over a hlist continuing from current point * @pos: the type * to use as a loop cursor. * @member: the name of the hlist_node within the struct. */ #define hlist_for_each_entry_from_rcu(pos, member) \ for (; pos; \ pos = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu( \ &(pos)->member)), typeof(*(pos)), member)) #endif /* __KERNEL__ */ #endif |
| 3 3 1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 | // SPDX-License-Identifier: GPL-2.0 /* * linux/fs/hpfs/alloc.c * * Mikulas Patocka (mikulas@artax.karlin.mff.cuni.cz), 1998-1999 * * HPFS bitmap operations */ #include "hpfs_fn.h" static void hpfs_claim_alloc(struct super_block *s, secno sec) { struct hpfs_sb_info *sbi = hpfs_sb(s); if (sbi->sb_n_free != (unsigned)-1) { if (unlikely(!sbi->sb_n_free)) { hpfs_error(s, "free count underflow, allocating sector %08x", sec); sbi->sb_n_free = -1; return; } sbi->sb_n_free--; } } static void hpfs_claim_free(struct super_block *s, secno sec) { struct hpfs_sb_info *sbi = hpfs_sb(s); if (sbi->sb_n_free != (unsigned)-1) { if (unlikely(sbi->sb_n_free >= sbi->sb_fs_size)) { hpfs_error(s, "free count overflow, freeing sector %08x", sec); sbi->sb_n_free = -1; return; } sbi->sb_n_free++; } } static void hpfs_claim_dirband_alloc(struct super_block *s, secno sec) { struct hpfs_sb_info *sbi = hpfs_sb(s); if (sbi->sb_n_free_dnodes != (unsigned)-1) { if (unlikely(!sbi->sb_n_free_dnodes)) { hpfs_error(s, "dirband free count underflow, allocating sector %08x", sec); sbi->sb_n_free_dnodes = -1; return; } sbi->sb_n_free_dnodes--; } } static void hpfs_claim_dirband_free(struct super_block *s, secno sec) { struct hpfs_sb_info *sbi = hpfs_sb(s); if (sbi->sb_n_free_dnodes != (unsigned)-1) { if (unlikely(sbi->sb_n_free_dnodes >= sbi->sb_dirband_size / 4)) { hpfs_error(s, "dirband free count overflow, freeing sector %08x", sec); sbi->sb_n_free_dnodes = -1; return; } sbi->sb_n_free_dnodes++; } } /* * Check if a sector is allocated in bitmap * This is really slow. Turned on only if chk==2 */ static int chk_if_allocated(struct super_block *s, secno sec, char *msg) { struct quad_buffer_head qbh; __le32 *bmp; if (!(bmp = hpfs_map_bitmap(s, sec >> 14, &qbh, "chk"))) goto fail; if ((le32_to_cpu(bmp[(sec & 0x3fff) >> 5]) >> (sec & 0x1f)) & 1) { hpfs_error(s, "sector '%s' - %08x not allocated in bitmap", msg, sec); goto fail1; } hpfs_brelse4(&qbh); if (sec >= hpfs_sb(s)->sb_dirband_start && sec < hpfs_sb(s)->sb_dirband_start + hpfs_sb(s)->sb_dirband_size) { unsigned ssec = (sec - hpfs_sb(s)->sb_dirband_start) / 4; if (!(bmp = hpfs_map_dnode_bitmap(s, &qbh))) goto fail; if ((le32_to_cpu(bmp[ssec >> 5]) >> (ssec & 0x1f)) & 1) { hpfs_error(s, "sector '%s' - %08x not allocated in directory bitmap", msg, sec); goto fail1; } hpfs_brelse4(&qbh); } return 0; fail1: hpfs_brelse4(&qbh); fail: return 1; } /* * Check if sector(s) have proper number and additionally check if they're * allocated in bitmap. */ int hpfs_chk_sectors(struct super_block *s, secno start, int len, char *msg) { if (start + len < start || start < 0x12 || start + len > hpfs_sb(s)->sb_fs_size) { hpfs_error(s, "sector(s) '%s' badly placed at %08x", msg, start); return 1; } if (hpfs_sb(s)->sb_chk>=2) { int i; for (i = 0; i < len; i++) if (chk_if_allocated(s, start + i, msg)) return 1; } return 0; } static secno alloc_in_bmp(struct super_block *s, secno near, unsigned n, unsigned forward) { struct quad_buffer_head qbh; __le32 *bmp; unsigned bs = near & ~0x3fff; unsigned nr = (near & 0x3fff) & ~(n - 1); /*unsigned mnr;*/ unsigned i, q; int a, b; secno ret = 0; if (n != 1 && n != 4) { hpfs_error(s, "Bad allocation size: %d", n); return 0; } if (bs != ~0x3fff) { if (!(bmp = hpfs_map_bitmap(s, near >> 14, &qbh, "aib"))) goto uls; } else { if (!(bmp = hpfs_map_dnode_bitmap(s, &qbh))) goto uls; } if (!tstbits(bmp, nr, n + forward)) { ret = bs + nr; goto rt; } q = nr + n; b = 0; while ((a = tstbits(bmp, q, n + forward)) != 0) { q += a; if (n != 1) q = ((q-1)&~(n-1))+n; if (!b) { if (q>>5 != nr>>5) { b = 1; q = nr & 0x1f; } } else if (q > nr) break; } if (!a) { ret = bs + q; goto rt; } nr >>= 5; /*for (i = nr + 1; i != nr; i++, i &= 0x1ff) */ i = nr; do { if (!le32_to_cpu(bmp[i])) goto cont; if (n + forward >= 0x3f && le32_to_cpu(bmp[i]) != 0xffffffff) goto cont; q = i<<5; if (i > 0) { unsigned k = le32_to_cpu(bmp[i-1]); while (k & 0x80000000) { q--; k <<= 1; } } if (n != 1) q = ((q-1)&~(n-1))+n; while ((a = tstbits(bmp, q, n + forward)) != 0) { q += a; if (n != 1) q = ((q-1)&~(n-1))+n; if (q>>5 > i) break; } if (!a) { ret = bs + q; goto rt; } cont: i++, i &= 0x1ff; } while (i != nr); rt: if (ret) { if (hpfs_sb(s)->sb_chk && ((ret >> 14) != (bs >> 14) || (le32_to_cpu(bmp[(ret & 0x3fff) >> 5]) | ~(((1 << n) - 1) << (ret & 0x1f))) != 0xffffffff)) { hpfs_error(s, "Allocation doesn't work! Wanted %d, allocated at %08x", n, ret); ret = 0; goto b; } bmp[(ret & 0x3fff) >> 5] &= cpu_to_le32(~(((1 << n) - 1) << (ret & 0x1f))); hpfs_mark_4buffers_dirty(&qbh); } b: hpfs_brelse4(&qbh); uls: return ret; } /* * Allocation strategy: 1) search place near the sector specified * 2) search bitmap where free sectors last found * 3) search all bitmaps * 4) search all bitmaps ignoring number of pre-allocated * sectors */ secno hpfs_alloc_sector(struct super_block *s, secno near, unsigned n, int forward) { secno sec; int i; unsigned n_bmps; struct hpfs_sb_info *sbi = hpfs_sb(s); int f_p = 0; int near_bmp; if (forward < 0) { forward = -forward; f_p = 1; } n_bmps = (sbi->sb_fs_size + 0x4000 - 1) >> 14; if (near && near < sbi->sb_fs_size) { if ((sec = alloc_in_bmp(s, near, n, f_p ? forward : forward/4))) goto ret; near_bmp = near >> 14; } else near_bmp = n_bmps / 2; /* if (b != -1) { if ((sec = alloc_in_bmp(s, b<<14, n, f_p ? forward : forward/2))) { b &= 0x0fffffff; goto ret; } if (b > 0x10000000) if ((sec = alloc_in_bmp(s, (b&0xfffffff)<<14, n, f_p ? forward : 0))) goto ret; */ if (!f_p) if (forward > sbi->sb_max_fwd_alloc) forward = sbi->sb_max_fwd_alloc; less_fwd: for (i = 0; i < n_bmps; i++) { if (near_bmp+i < n_bmps && ((sec = alloc_in_bmp(s, (near_bmp+i) << 14, n, forward)))) { sbi->sb_c_bitmap = near_bmp+i; goto ret; } if (!forward) { if (near_bmp-i-1 >= 0 && ((sec = alloc_in_bmp(s, (near_bmp-i-1) << 14, n, forward)))) { sbi->sb_c_bitmap = near_bmp-i-1; goto ret; } } else { if (near_bmp+i >= n_bmps && ((sec = alloc_in_bmp(s, (near_bmp+i-n_bmps) << 14, n, forward)))) { sbi->sb_c_bitmap = near_bmp+i-n_bmps; goto ret; } } if (i == 1 && sbi->sb_c_bitmap != -1 && ((sec = alloc_in_bmp(s, (sbi->sb_c_bitmap) << 14, n, forward)))) { goto ret; } } if (!f_p) { if (forward) { sbi->sb_max_fwd_alloc = forward * 3 / 4; forward /= 2; goto less_fwd; } } sec = 0; ret: if (sec) { i = 0; do hpfs_claim_alloc(s, sec + i); while (unlikely(++i < n)); } if (sec && f_p) { for (i = 0; i < forward; i++) { if (!hpfs_alloc_if_possible(s, sec + n + i)) { hpfs_error(s, "Prealloc doesn't work! Wanted %d, allocated at %08x, can't allocate %d", forward, sec, i); sec = 0; break; } } } return sec; } static secno alloc_in_dirband(struct super_block *s, secno near) { unsigned nr = near; secno sec; struct hpfs_sb_info *sbi = hpfs_sb(s); if (nr < sbi->sb_dirband_start) nr = sbi->sb_dirband_start; if (nr >= sbi->sb_dirband_start + sbi->sb_dirband_size) nr = sbi->sb_dirband_start + sbi->sb_dirband_size - 4; nr -= sbi->sb_dirband_start; nr >>= 2; sec = alloc_in_bmp(s, (~0x3fff) | nr, 1, 0); if (!sec) return 0; hpfs_claim_dirband_alloc(s, sec); return ((sec & 0x3fff) << 2) + sbi->sb_dirband_start; } /* Alloc sector if it's free */ int hpfs_alloc_if_possible(struct super_block *s, secno sec) { struct quad_buffer_head qbh; __le32 *bmp; if (!(bmp = hpfs_map_bitmap(s, sec >> 14, &qbh, "aip"))) goto end; if (le32_to_cpu(bmp[(sec & 0x3fff) >> 5]) & (1 << (sec & 0x1f))) { bmp[(sec & 0x3fff) >> 5] &= cpu_to_le32(~(1 << (sec & 0x1f))); hpfs_mark_4buffers_dirty(&qbh); hpfs_brelse4(&qbh); hpfs_claim_alloc(s, sec); return 1; } hpfs_brelse4(&qbh); end: return 0; } /* Free sectors in bitmaps */ void hpfs_free_sectors(struct super_block *s, secno sec, unsigned n) { struct quad_buffer_head qbh; __le32 *bmp; struct hpfs_sb_info *sbi = hpfs_sb(s); /*pr_info("2 - ");*/ if (!n) return; if (sec < 0x12) { hpfs_error(s, "Trying to free reserved sector %08x", sec); return; } sbi->sb_max_fwd_alloc += n > 0xffff ? 0xffff : n; if (sbi->sb_max_fwd_alloc > 0xffffff) sbi->sb_max_fwd_alloc = 0xffffff; new_map: if (!(bmp = hpfs_map_bitmap(s, sec >> 14, &qbh, "free"))) { return; } new_tst: if ((le32_to_cpu(bmp[(sec & 0x3fff) >> 5]) >> (sec & 0x1f) & 1)) { hpfs_error(s, "sector %08x not allocated", sec); hpfs_brelse4(&qbh); return; } bmp[(sec & 0x3fff) >> 5] |= cpu_to_le32(1 << (sec & 0x1f)); hpfs_claim_free(s, sec); if (!--n) { hpfs_mark_4buffers_dirty(&qbh); hpfs_brelse4(&qbh); return; } if (!(++sec & 0x3fff)) { hpfs_mark_4buffers_dirty(&qbh); hpfs_brelse4(&qbh); goto new_map; } goto new_tst; } /* * Check if there are at least n free dnodes on the filesystem. * Called before adding to dnode. If we run out of space while * splitting dnodes, it would corrupt dnode tree. */ int hpfs_check_free_dnodes(struct super_block *s, int n) { int n_bmps = (hpfs_sb(s)->sb_fs_size + 0x4000 - 1) >> 14; int b = hpfs_sb(s)->sb_c_bitmap & 0x0fffffff; int i, j; __le32 *bmp; struct quad_buffer_head qbh; if ((bmp = hpfs_map_dnode_bitmap(s, &qbh))) { for (j = 0; j < 512; j++) { unsigned k; if (!le32_to_cpu(bmp[j])) continue; for (k = le32_to_cpu(bmp[j]); k; k >>= 1) if (k & 1) if (!--n) { hpfs_brelse4(&qbh); return 0; } } } hpfs_brelse4(&qbh); i = 0; if (hpfs_sb(s)->sb_c_bitmap != -1) { bmp = hpfs_map_bitmap(s, b, &qbh, "chkdn1"); goto chk_bmp; } chk_next: if (i == b) i++; if (i >= n_bmps) return 1; bmp = hpfs_map_bitmap(s, i, &qbh, "chkdn2"); chk_bmp: if (bmp) { for (j = 0; j < 512; j++) { u32 k; if (!le32_to_cpu(bmp[j])) continue; for (k = 0xf; k; k <<= 4) if ((le32_to_cpu(bmp[j]) & k) == k) { if (!--n) { hpfs_brelse4(&qbh); return 0; } } } hpfs_brelse4(&qbh); } i++; goto chk_next; } void hpfs_free_dnode(struct super_block *s, dnode_secno dno) { if (hpfs_sb(s)->sb_chk) if (dno & 3) { hpfs_error(s, "hpfs_free_dnode: dnode %08x not aligned", dno); return; } if (dno < hpfs_sb(s)->sb_dirband_start || dno >= hpfs_sb(s)->sb_dirband_start + hpfs_sb(s)->sb_dirband_size) { hpfs_free_sectors(s, dno, 4); } else { struct quad_buffer_head qbh; __le32 *bmp; unsigned ssec = (dno - hpfs_sb(s)->sb_dirband_start) / 4; if (!(bmp = hpfs_map_dnode_bitmap(s, &qbh))) { return; } bmp[ssec >> 5] |= cpu_to_le32(1 << (ssec & 0x1f)); hpfs_mark_4buffers_dirty(&qbh); hpfs_brelse4(&qbh); hpfs_claim_dirband_free(s, dno); } } struct dnode *hpfs_alloc_dnode(struct super_block *s, secno near, dnode_secno *dno, struct quad_buffer_head *qbh) { struct dnode *d; if (hpfs_get_free_dnodes(s) > FREE_DNODES_ADD) { if (!(*dno = alloc_in_dirband(s, near))) if (!(*dno = hpfs_alloc_sector(s, near, 4, 0))) return NULL; } else { if (!(*dno = hpfs_alloc_sector(s, near, 4, 0))) if (!(*dno = alloc_in_dirband(s, near))) return NULL; } if (!(d = hpfs_get_4sectors(s, *dno, qbh))) { hpfs_free_dnode(s, *dno); return NULL; } memset(d, 0, 2048); d->magic = cpu_to_le32(DNODE_MAGIC); d->first_free = cpu_to_le32(52); d->dirent[0] = 32; d->dirent[2] = 8; d->dirent[30] = 1; d->dirent[31] = 255; d->self = cpu_to_le32(*dno); return d; } struct fnode *hpfs_alloc_fnode(struct super_block *s, secno near, fnode_secno *fno, struct buffer_head **bh) { struct fnode *f; if (!(*fno = hpfs_alloc_sector(s, near, 1, FNODE_ALLOC_FWD))) return NULL; if (!(f = hpfs_get_sector(s, *fno, bh))) { hpfs_free_sectors(s, *fno, 1); return NULL; } memset(f, 0, 512); f->magic = cpu_to_le32(FNODE_MAGIC); f->ea_offs = cpu_to_le16(0xc4); f->btree.n_free_nodes = 8; f->btree.first_free = cpu_to_le16(8); return f; } struct anode *hpfs_alloc_anode(struct super_block *s, secno near, anode_secno *ano, struct buffer_head **bh) { struct anode *a; if (!(*ano = hpfs_alloc_sector(s, near, 1, ANODE_ALLOC_FWD))) return NULL; if (!(a = hpfs_get_sector(s, *ano, bh))) { hpfs_free_sectors(s, *ano, 1); return NULL; } memset(a, 0, 512); a->magic = cpu_to_le32(ANODE_MAGIC); a->self = cpu_to_le32(*ano); a->btree.n_free_nodes = 40; a->btree.n_used_nodes = 0; a->btree.first_free = cpu_to_le16(8); return a; } static unsigned find_run(__le32 *bmp, unsigned *idx) { unsigned len; while (tstbits(bmp, *idx, 1)) { (*idx)++; if (unlikely(*idx >= 0x4000)) return 0; } len = 1; while (!tstbits(bmp, *idx + len, 1)) len++; return len; } static int do_trim(struct super_block *s, secno start, unsigned len, secno limit_start, secno limit_end, unsigned minlen, unsigned *result) { int err; secno end; if (fatal_signal_pending(current)) return -EINTR; end = start + len; if (start < limit_start) start = limit_start; if (end > limit_end) end = limit_end; if (start >= end) return 0; if (end - start < minlen) return 0; err = sb_issue_discard(s, start, end - start, GFP_NOFS, 0); if (err) return err; *result += end - start; return 0; } int hpfs_trim_fs(struct super_block *s, u64 start, u64 end, u64 minlen, unsigned *result) { int err = 0; struct hpfs_sb_info *sbi = hpfs_sb(s); unsigned idx, len, start_bmp, end_bmp; __le32 *bmp; struct quad_buffer_head qbh; *result = 0; if (!end || end > sbi->sb_fs_size) end = sbi->sb_fs_size; if (start >= sbi->sb_fs_size) return 0; if (minlen > 0x4000) return 0; if (start < sbi->sb_dirband_start + sbi->sb_dirband_size && end > sbi->sb_dirband_start) { hpfs_lock(s); if (sb_rdonly(s)) { err = -EROFS; goto unlock_1; } if (!(bmp = hpfs_map_dnode_bitmap(s, &qbh))) { err = -EIO; goto unlock_1; } idx = 0; while ((len = find_run(bmp, &idx)) && !err) { err = do_trim(s, sbi->sb_dirband_start + idx * 4, len * 4, start, end, minlen, result); idx += len; } hpfs_brelse4(&qbh); unlock_1: hpfs_unlock(s); } start_bmp = start >> 14; end_bmp = (end + 0x3fff) >> 14; while (start_bmp < end_bmp && !err) { hpfs_lock(s); if (sb_rdonly(s)) { err = -EROFS; goto unlock_2; } if (!(bmp = hpfs_map_bitmap(s, start_bmp, &qbh, "trim"))) { err = -EIO; goto unlock_2; } idx = 0; while ((len = find_run(bmp, &idx)) && !err) { err = do_trim(s, (start_bmp << 14) + idx, len, start, end, minlen, result); idx += len; } hpfs_brelse4(&qbh); unlock_2: hpfs_unlock(s); start_bmp++; } return err; } |
| 1 1 1 1 1 1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 | // SPDX-License-Identifier: GPL-2.0-or-later /* * ip_vs_proto.c: transport protocol load balancing support for IPVS * * Authors: Wensong Zhang <wensong@linuxvirtualserver.org> * Julian Anastasov <ja@ssi.bg> * * Changes: */ #define KMSG_COMPONENT "IPVS" #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt #include <linux/module.h> #include <linux/kernel.h> #include <linux/skbuff.h> #include <linux/gfp.h> #include <linux/in.h> #include <linux/ip.h> #include <net/protocol.h> #include <net/tcp.h> #include <net/udp.h> #include <linux/stat.h> #include <linux/proc_fs.h> #include <net/ip_vs.h> /* * IPVS protocols can only be registered/unregistered when the ipvs * module is loaded/unloaded, so no lock is needed in accessing the * ipvs protocol table. */ #define IP_VS_PROTO_TAB_SIZE 32 /* must be power of 2 */ #define IP_VS_PROTO_HASH(proto) ((proto) & (IP_VS_PROTO_TAB_SIZE-1)) static struct ip_vs_protocol *ip_vs_proto_table[IP_VS_PROTO_TAB_SIZE]; /* States for conn templates: NONE or words separated with ",", max 15 chars */ static const char *ip_vs_ctpl_state_name_table[IP_VS_CTPL_S_LAST] = { [IP_VS_CTPL_S_NONE] = "NONE", [IP_VS_CTPL_S_ASSURED] = "ASSURED", }; /* * register an ipvs protocol */ static int __used __init register_ip_vs_protocol(struct ip_vs_protocol *pp) { unsigned int hash = IP_VS_PROTO_HASH(pp->protocol); pp->next = ip_vs_proto_table[hash]; ip_vs_proto_table[hash] = pp; if (pp->init != NULL) pp->init(pp); return 0; } /* * register an ipvs protocols netns related data */ static int register_ip_vs_proto_netns(struct netns_ipvs *ipvs, struct ip_vs_protocol *pp) { unsigned int hash = IP_VS_PROTO_HASH(pp->protocol); struct ip_vs_proto_data *pd = kzalloc(sizeof(struct ip_vs_proto_data), GFP_KERNEL); if (!pd) return -ENOMEM; pd->pp = pp; /* For speed issues */ pd->next = ipvs->proto_data_table[hash]; ipvs->proto_data_table[hash] = pd; atomic_set(&pd->appcnt, 0); /* Init app counter */ if (pp->init_netns != NULL) { int ret = pp->init_netns(ipvs, pd); if (ret) { /* unlink an free proto data */ ipvs->proto_data_table[hash] = pd->next; kfree(pd); return ret; } } return 0; } /* * unregister an ipvs protocol */ static int unregister_ip_vs_protocol(struct ip_vs_protocol *pp) { struct ip_vs_protocol **pp_p; unsigned int hash = IP_VS_PROTO_HASH(pp->protocol); pp_p = &ip_vs_proto_table[hash]; for (; *pp_p; pp_p = &(*pp_p)->next) { if (*pp_p == pp) { *pp_p = pp->next; if (pp->exit != NULL) pp->exit(pp); return 0; } } return -ESRCH; } /* * unregister an ipvs protocols netns data */ static int unregister_ip_vs_proto_netns(struct netns_ipvs *ipvs, struct ip_vs_proto_data *pd) { struct ip_vs_proto_data **pd_p; unsigned int hash = IP_VS_PROTO_HASH(pd->pp->protocol); pd_p = &ipvs->proto_data_table[hash]; for (; *pd_p; pd_p = &(*pd_p)->next) { if (*pd_p == pd) { *pd_p = pd->next; if (pd->pp->exit_netns != NULL) pd->pp->exit_netns(ipvs, pd); kfree(pd); return 0; } } return -ESRCH; } /* * get ip_vs_protocol object by its proto. */ struct ip_vs_protocol * ip_vs_proto_get(unsigned short proto) { struct ip_vs_protocol *pp; unsigned int hash = IP_VS_PROTO_HASH(proto); for (pp = ip_vs_proto_table[hash]; pp; pp = pp->next) { if (pp->protocol == proto) return pp; } return NULL; } EXPORT_SYMBOL(ip_vs_proto_get); /* * get ip_vs_protocol object data by netns and proto */ struct ip_vs_proto_data * ip_vs_proto_data_get(struct netns_ipvs *ipvs, unsigned short proto) { struct ip_vs_proto_data *pd; unsigned int hash = IP_VS_PROTO_HASH(proto); for (pd = ipvs->proto_data_table[hash]; pd; pd = pd->next) { if (pd->pp->protocol == proto) return pd; } return NULL; } EXPORT_SYMBOL(ip_vs_proto_data_get); /* * Propagate event for state change to all protocols */ void ip_vs_protocol_timeout_change(struct netns_ipvs *ipvs, int flags) { struct ip_vs_proto_data *pd; int i; for (i = 0; i < IP_VS_PROTO_TAB_SIZE; i++) { for (pd = ipvs->proto_data_table[i]; pd; pd = pd->next) { if (pd->pp->timeout_change) pd->pp->timeout_change(pd, flags); } } } int * ip_vs_create_timeout_table(int *table, int size) { return kmemdup(table, size, GFP_KERNEL); } const char *ip_vs_state_name(const struct ip_vs_conn *cp) { unsigned int state = cp->state; struct ip_vs_protocol *pp; if (cp->flags & IP_VS_CONN_F_TEMPLATE) { if (state >= IP_VS_CTPL_S_LAST) return "ERR!"; return ip_vs_ctpl_state_name_table[state] ? : "?"; } pp = ip_vs_proto_get(cp->protocol); if (pp == NULL || pp->state_name == NULL) return (cp->protocol == IPPROTO_IP) ? "NONE" : "ERR!"; return pp->state_name(state); } static void ip_vs_tcpudp_debug_packet_v4(struct ip_vs_protocol *pp, const struct sk_buff *skb, int offset, const char *msg) { char buf[128]; struct iphdr _iph, *ih; ih = skb_header_pointer(skb, offset, sizeof(_iph), &_iph); if (ih == NULL) sprintf(buf, "TRUNCATED"); else if (ih->frag_off & htons(IP_OFFSET)) sprintf(buf, "%pI4->%pI4 frag", &ih->saddr, &ih->daddr); else { __be16 _ports[2], *pptr; pptr = skb_header_pointer(skb, offset + ih->ihl*4, sizeof(_ports), _ports); if (pptr == NULL) sprintf(buf, "TRUNCATED %pI4->%pI4", &ih->saddr, &ih->daddr); else sprintf(buf, "%pI4:%u->%pI4:%u", &ih->saddr, ntohs(pptr[0]), &ih->daddr, ntohs(pptr[1])); } pr_debug("%s: %s %s\n", msg, pp->name, buf); } #ifdef CONFIG_IP_VS_IPV6 static void ip_vs_tcpudp_debug_packet_v6(struct ip_vs_protocol *pp, const struct sk_buff *skb, int offset, const char *msg) { char buf[192]; struct ipv6hdr _iph, *ih; ih = skb_header_pointer(skb, offset, sizeof(_iph), &_iph); if (ih == NULL) sprintf(buf, "TRUNCATED"); else if (ih->nexthdr == IPPROTO_FRAGMENT) sprintf(buf, "%pI6c->%pI6c frag", &ih->saddr, &ih->daddr); else { __be16 _ports[2], *pptr; pptr = skb_header_pointer(skb, offset + sizeof(struct ipv6hdr), sizeof(_ports), _ports); if (pptr == NULL) sprintf(buf, "TRUNCATED %pI6c->%pI6c", &ih->saddr, &ih->daddr); else sprintf(buf, "%pI6c:%u->%pI6c:%u", &ih->saddr, ntohs(pptr[0]), &ih->daddr, ntohs(pptr[1])); } pr_debug("%s: %s %s\n", msg, pp->name, buf); } #endif void ip_vs_tcpudp_debug_packet(int af, struct ip_vs_protocol *pp, const struct sk_buff *skb, int offset, const char *msg) { #ifdef CONFIG_IP_VS_IPV6 if (af == AF_INET6) ip_vs_tcpudp_debug_packet_v6(pp, skb, offset, msg); else #endif ip_vs_tcpudp_debug_packet_v4(pp, skb, offset, msg); } /* * per network name-space init */ int __net_init ip_vs_protocol_net_init(struct netns_ipvs *ipvs) { int i, ret; static struct ip_vs_protocol *protos[] = { #ifdef CONFIG_IP_VS_PROTO_TCP &ip_vs_protocol_tcp, #endif #ifdef CONFIG_IP_VS_PROTO_UDP &ip_vs_protocol_udp, #endif #ifdef CONFIG_IP_VS_PROTO_SCTP &ip_vs_protocol_sctp, #endif #ifdef CONFIG_IP_VS_PROTO_AH &ip_vs_protocol_ah, #endif #ifdef CONFIG_IP_VS_PROTO_ESP &ip_vs_protocol_esp, #endif }; for (i = 0; i < ARRAY_SIZE(protos); i++) { ret = register_ip_vs_proto_netns(ipvs, protos[i]); if (ret < 0) goto cleanup; } return 0; cleanup: ip_vs_protocol_net_cleanup(ipvs); return ret; } void __net_exit ip_vs_protocol_net_cleanup(struct netns_ipvs *ipvs) { struct ip_vs_proto_data *pd; int i; /* unregister all the ipvs proto data for this netns */ for (i = 0; i < IP_VS_PROTO_TAB_SIZE; i++) { while ((pd = ipvs->proto_data_table[i]) != NULL) unregister_ip_vs_proto_netns(ipvs, pd); } } int __init ip_vs_protocol_init(void) { char protocols[64] = { 0 }; #define REGISTER_PROTOCOL(p) \ do { \ register_ip_vs_protocol(p); \ strcat(protocols, ", "); \ strcat(protocols, (p)->name); \ } while (0) #ifdef CONFIG_IP_VS_PROTO_TCP REGISTER_PROTOCOL(&ip_vs_protocol_tcp); #endif #ifdef CONFIG_IP_VS_PROTO_UDP REGISTER_PROTOCOL(&ip_vs_protocol_udp); #endif #ifdef CONFIG_IP_VS_PROTO_SCTP REGISTER_PROTOCOL(&ip_vs_protocol_sctp); #endif #ifdef CONFIG_IP_VS_PROTO_AH REGISTER_PROTOCOL(&ip_vs_protocol_ah); #endif #ifdef CONFIG_IP_VS_PROTO_ESP REGISTER_PROTOCOL(&ip_vs_protocol_esp); #endif pr_info("Registered protocols (%s)\n", &protocols[2]); return 0; } void ip_vs_protocol_cleanup(void) { struct ip_vs_protocol *pp; int i; /* unregister all the ipvs protocols */ for (i = 0; i < IP_VS_PROTO_TAB_SIZE; i++) { while ((pp = ip_vs_proto_table[i]) != NULL) unregister_ip_vs_protocol(pp); } } |
| 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 | /* SPDX-License-Identifier: GPL-2.0+ */ /* * Copyright (C) 2016 Oracle. All Rights Reserved. * Author: Darrick J. Wong <darrick.wong@oracle.com> */ #ifndef __XFS_AG_RESV_H__ #define __XFS_AG_RESV_H__ void xfs_ag_resv_free(struct xfs_perag *pag); int xfs_ag_resv_init(struct xfs_perag *pag, struct xfs_trans *tp); bool xfs_ag_resv_critical(struct xfs_perag *pag, enum xfs_ag_resv_type type); xfs_extlen_t xfs_ag_resv_needed(struct xfs_perag *pag, enum xfs_ag_resv_type type); void xfs_ag_resv_alloc_extent(struct xfs_perag *pag, enum xfs_ag_resv_type type, struct xfs_alloc_arg *args); void xfs_ag_resv_free_extent(struct xfs_perag *pag, enum xfs_ag_resv_type type, struct xfs_trans *tp, xfs_extlen_t len); static inline struct xfs_ag_resv * xfs_perag_resv( struct xfs_perag *pag, enum xfs_ag_resv_type type) { switch (type) { case XFS_AG_RESV_METADATA: return &pag->pag_meta_resv; case XFS_AG_RESV_RMAPBT: return &pag->pag_rmapbt_resv; default: return NULL; } } #endif /* __XFS_AG_RESV_H__ */ |
| 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 | // SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (c) International Business Machines Corp., 2006 * Copyright (c) Nokia Corporation, 2006, 2007 * * Author: Artem Bityutskiy (Битюцкий Артём) */ /* * UBI input/output sub-system. * * This sub-system provides a uniform way to work with all kinds of the * underlying MTD devices. It also implements handy functions for reading and * writing UBI headers. * * We are trying to have a paranoid mindset and not to trust to what we read * from the flash media in order to be more secure and robust. So this * sub-system validates every single header it reads from the flash media. * * Some words about how the eraseblock headers are stored. * * The erase counter header is always stored at offset zero. By default, the * VID header is stored after the EC header at the closest aligned offset * (i.e. aligned to the minimum I/O unit size). Data starts next to the VID * header at the closest aligned offset. But this default layout may be * changed. For example, for different reasons (e.g., optimization) UBI may be * asked to put the VID header at further offset, and even at an unaligned * offset. Of course, if the offset of the VID header is unaligned, UBI adds * proper padding in front of it. Data offset may also be changed but it has to * be aligned. * * About minimal I/O units. In general, UBI assumes flash device model where * there is only one minimal I/O unit size. E.g., in case of NOR flash it is 1, * in case of NAND flash it is a NAND page, etc. This is reported by MTD in the * @ubi->mtd->writesize field. But as an exception, UBI admits use of another * (smaller) minimal I/O unit size for EC and VID headers to make it possible * to do different optimizations. * * This is extremely useful in case of NAND flashes which admit of several * write operations to one NAND page. In this case UBI can fit EC and VID * headers at one NAND page. Thus, UBI may use "sub-page" size as the minimal * I/O unit for the headers (the @ubi->hdrs_min_io_size field). But it still * reports NAND page size (@ubi->min_io_size) as a minimal I/O unit for the UBI * users. * * Example: some Samsung NANDs with 2KiB pages allow 4x 512-byte writes, so * although the minimal I/O unit is 2K, UBI uses 512 bytes for EC and VID * headers. * * Q: why not just to treat sub-page as a minimal I/O unit of this flash * device, e.g., make @ubi->min_io_size = 512 in the example above? * * A: because when writing a sub-page, MTD still writes a full 2K page but the * bytes which are not relevant to the sub-page are 0xFF. So, basically, * writing 4x512 sub-pages is 4 times slower than writing one 2KiB NAND page. * Thus, we prefer to use sub-pages only for EC and VID headers. * * As it was noted above, the VID header may start at a non-aligned offset. * For example, in case of a 2KiB page NAND flash with a 512 bytes sub-page, * the VID header may reside at offset 1984 which is the last 64 bytes of the * last sub-page (EC header is always at offset zero). This causes some * difficulties when reading and writing VID headers. * * Suppose we have a 64-byte buffer and we read a VID header at it. We change * the data and want to write this VID header out. As we can only write in * 512-byte chunks, we have to allocate one more buffer and copy our VID header * to offset 448 of this buffer. * * The I/O sub-system does the following trick in order to avoid this extra * copy. It always allocates a @ubi->vid_hdr_alsize bytes buffer for the VID * header and returns a pointer to offset @ubi->vid_hdr_shift of this buffer. * When the VID header is being written out, it shifts the VID header pointer * back and writes the whole sub-page. */ #include <linux/crc32.h> #include <linux/err.h> #include <linux/slab.h> #include "ubi.h" static int self_check_not_bad(const struct ubi_device *ubi, int pnum); static int self_check_peb_ec_hdr(const struct ubi_device *ubi, int pnum); static int self_check_ec_hdr(const struct ubi_device *ubi, int pnum, const struct ubi_ec_hdr *ec_hdr); static int self_check_peb_vid_hdr(const struct ubi_device *ubi, int pnum); static int self_check_vid_hdr(const struct ubi_device *ubi, int pnum, const struct ubi_vid_hdr *vid_hdr); static int self_check_write(struct ubi_device *ubi, const void *buf, int pnum, int offset, int len); /** * ubi_io_read - read data from a physical eraseblock. * @ubi: UBI device description object * @buf: buffer where to store the read data * @pnum: physical eraseblock number to read from * @offset: offset within the physical eraseblock from where to read * @len: how many bytes to read * * This function reads data from offset @offset of physical eraseblock @pnum * and stores the read data in the @buf buffer. The following return codes are * possible: * * o %0 if all the requested data were successfully read; * o %UBI_IO_BITFLIPS if all the requested data were successfully read, but * correctable bit-flips were detected; this is harmless but may indicate * that this eraseblock may become bad soon (but do not have to); * o %-EBADMSG if the MTD subsystem reported about data integrity problems, for * example it can be an ECC error in case of NAND; this most probably means * that the data is corrupted; * o %-EIO if some I/O error occurred; * o other negative error codes in case of other errors. */ int ubi_io_read(const struct ubi_device *ubi, void *buf, int pnum, int offset, int len) { int err, retries = 0; size_t read; loff_t addr; dbg_io("read %d bytes from PEB %d:%d", len, pnum, offset); ubi_assert(pnum >= 0 && pnum < ubi->peb_count); ubi_assert(offset >= 0 && offset + len <= ubi->peb_size); ubi_assert(len > 0); err = self_check_not_bad(ubi, pnum); if (err) return err; /* * Deliberately corrupt the buffer to improve robustness. Indeed, if we * do not do this, the following may happen: * 1. The buffer contains data from previous operation, e.g., read from * another PEB previously. The data looks like expected, e.g., if we * just do not read anything and return - the caller would not * notice this. E.g., if we are reading a VID header, the buffer may * contain a valid VID header from another PEB. * 2. The driver is buggy and returns us success or -EBADMSG or * -EUCLEAN, but it does not actually put any data to the buffer. * * This may confuse UBI or upper layers - they may think the buffer * contains valid data while in fact it is just old data. This is * especially possible because UBI (and UBIFS) relies on CRC, and * treats data as correct even in case of ECC errors if the CRC is * correct. * * Try to prevent this situation by changing the first byte of the * buffer. */ *((uint8_t *)buf) ^= 0xFF; addr = (loff_t)pnum * ubi->peb_size + offset; retry: err = mtd_read(ubi->mtd, addr, len, &read, buf); if (err) { const char *errstr = mtd_is_eccerr(err) ? " (ECC error)" : ""; if (mtd_is_bitflip(err)) { /* * -EUCLEAN is reported if there was a bit-flip which * was corrected, so this is harmless. * * We do not report about it here unless debugging is * enabled. A corresponding message will be printed * later, when it is has been scrubbed. */ ubi_msg(ubi, "fixable bit-flip detected at PEB %d", pnum); ubi_assert(len == read); return UBI_IO_BITFLIPS; } if (retries++ < UBI_IO_RETRIES) { ubi_warn(ubi, "error %d%s while reading %d bytes from PEB %d:%d, read only %zd bytes, retry", err, errstr, len, pnum, offset, read); yield(); goto retry; } ubi_err(ubi, "error %d%s while reading %d bytes from PEB %d:%d, read %zd bytes", err, errstr, len, pnum, offset, read); dump_stack(); /* * The driver should never return -EBADMSG if it failed to read * all the requested data. But some buggy drivers might do * this, so we change it to -EIO. */ if (read != len && mtd_is_eccerr(err)) { ubi_assert(0); err = -EIO; } } else { ubi_assert(len == read); if (ubi_dbg_is_bitflip(ubi)) { dbg_gen("bit-flip (emulated)"); return UBI_IO_BITFLIPS; } if (ubi_dbg_is_read_failure(ubi, MASK_READ_FAILURE)) { ubi_warn(ubi, "cannot read %d bytes from PEB %d:%d (emulated)", len, pnum, offset); return -EIO; } if (ubi_dbg_is_eccerr(ubi)) { ubi_warn(ubi, "ECC error (emulated) while reading %d bytes from PEB %d:%d, read %zd bytes", len, pnum, offset, read); return -EBADMSG; } } return err; } /** * ubi_io_write - write data to a physical eraseblock. * @ubi: UBI device description object * @buf: buffer with the data to write * @pnum: physical eraseblock number to write to * @offset: offset within the physical eraseblock where to write * @len: how many bytes to write * * This function writes @len bytes of data from buffer @buf to offset @offset * of physical eraseblock @pnum. If all the data were successfully written, * zero is returned. If an error occurred, this function returns a negative * error code. If %-EIO is returned, the physical eraseblock most probably went * bad. * * Note, in case of an error, it is possible that something was still written * to the flash media, but may be some garbage. */ int ubi_io_write(struct ubi_device *ubi, const void *buf, int pnum, int offset, int len) { int err; size_t written; loff_t addr; dbg_io("write %d bytes to PEB %d:%d", len, pnum, offset); ubi_assert(pnum >= 0 && pnum < ubi->peb_count); ubi_assert(offset >= 0 && offset + len <= ubi->peb_size); ubi_assert(offset % ubi->hdrs_min_io_size == 0); ubi_assert(len > 0 && len % ubi->hdrs_min_io_size == 0); if (ubi->ro_mode) { ubi_err(ubi, "read-only mode"); return -EROFS; } err = self_check_not_bad(ubi, pnum); if (err) return err; /* The area we are writing to has to contain all 0xFF bytes */ err = ubi_self_check_all_ff(ubi, pnum, offset, len); if (err) return err; if (offset >= ubi->leb_start) { /* * We write to the data area of the physical eraseblock. Make * sure it has valid EC and VID headers. */ err = self_check_peb_ec_hdr(ubi, pnum); if (err) return err; err = self_check_peb_vid_hdr(ubi, pnum); if (err) return err; } if (ubi_dbg_is_write_failure(ubi)) { ubi_err(ubi, "cannot write %d bytes to PEB %d:%d (emulated)", len, pnum, offset); dump_stack(); return -EIO; } addr = (loff_t)pnum * ubi->peb_size + offset; err = mtd_write(ubi->mtd, addr, len, &written, buf); if (err) { ubi_err(ubi, "error %d while writing %d bytes to PEB %d:%d, written %zd bytes", err, len, pnum, offset, written); dump_stack(); ubi_dump_flash(ubi, pnum, offset, len); } else ubi_assert(written == len); if (!err) { err = self_check_write(ubi, buf, pnum, offset, len); if (err) return err; /* * Since we always write sequentially, the rest of the PEB has * to contain only 0xFF bytes. */ offset += len; len = ubi->peb_size - offset; if (len) err = ubi_self_check_all_ff(ubi, pnum, offset, len); } return err; } /** * do_sync_erase - synchronously erase a physical eraseblock. * @ubi: UBI device description object * @pnum: the physical eraseblock number to erase * * This function synchronously erases physical eraseblock @pnum and returns * zero in case of success and a negative error code in case of failure. If * %-EIO is returned, the physical eraseblock most probably went bad. */ static int do_sync_erase(struct ubi_device *ubi, int pnum) { int err, retries = 0; struct erase_info ei; dbg_io("erase PEB %d", pnum); ubi_assert(pnum >= 0 && pnum < ubi->peb_count); if (ubi->ro_mode) { ubi_err(ubi, "read-only mode"); return -EROFS; } retry: memset(&ei, 0, sizeof(struct erase_info)); ei.addr = (loff_t)pnum * ubi->peb_size; ei.len = ubi->peb_size; err = mtd_erase(ubi->mtd, &ei); if (err) { if (retries++ < UBI_IO_RETRIES) { ubi_warn(ubi, "error %d while erasing PEB %d, retry", err, pnum); yield(); goto retry; } ubi_err(ubi, "cannot erase PEB %d, error %d", pnum, err); dump_stack(); return err; } err = ubi_self_check_all_ff(ubi, pnum, 0, ubi->peb_size); if (err) return err; if (ubi_dbg_is_erase_failure(ubi)) { ubi_err(ubi, "cannot erase PEB %d (emulated)", pnum); return -EIO; } return 0; } /* Patterns to write to a physical eraseblock when torturing it */ static uint8_t patterns[] = {0xa5, 0x5a, 0x0}; /** * torture_peb - test a supposedly bad physical eraseblock. * @ubi: UBI device description object * @pnum: the physical eraseblock number to test * * This function returns %-EIO if the physical eraseblock did not pass the * test, a positive number of erase operations done if the test was * successfully passed, and other negative error codes in case of other errors. */ static int torture_peb(struct ubi_device *ubi, int pnum) { int err, i, patt_count; ubi_msg(ubi, "run torture test for PEB %d", pnum); patt_count = ARRAY_SIZE(patterns); ubi_assert(patt_count > 0); mutex_lock(&ubi->buf_mutex); for (i = 0; i < patt_count; i++) { err = do_sync_erase(ubi, pnum); if (err) goto out; /* Make sure the PEB contains only 0xFF bytes */ err = ubi_io_read(ubi, ubi->peb_buf, pnum, 0, ubi->peb_size); if (err) goto out; err = ubi_check_pattern(ubi->peb_buf, 0xFF, ubi->peb_size); if (err == 0) { ubi_err(ubi, "erased PEB %d, but a non-0xFF byte found", pnum); err = -EIO; goto out; } /* Write a pattern and check it */ memset(ubi->peb_buf, patterns[i], ubi->peb_size); err = ubi_io_write(ubi, ubi->peb_buf, pnum, 0, ubi->peb_size); if (err) goto out; memset(ubi->peb_buf, ~patterns[i], ubi->peb_size); err = ubi_io_read(ubi, ubi->peb_buf, pnum, 0, ubi->peb_size); if (err) goto out; err = ubi_check_pattern(ubi->peb_buf, patterns[i], ubi->peb_size); if (err == 0) { ubi_err(ubi, "pattern %x checking failed for PEB %d", patterns[i], pnum); err = -EIO; goto out; } } err = patt_count; ubi_msg(ubi, "PEB %d passed torture test, do not mark it as bad", pnum); out: mutex_unlock(&ubi->buf_mutex); if (err == UBI_IO_BITFLIPS || mtd_is_eccerr(err)) { /* * If a bit-flip or data integrity error was detected, the test * has not passed because it happened on a freshly erased * physical eraseblock which means something is wrong with it. */ ubi_err(ubi, "read problems on freshly erased PEB %d, must be bad", pnum); err = -EIO; } return err; } /** * nor_erase_prepare - prepare a NOR flash PEB for erasure. * @ubi: UBI device description object * @pnum: physical eraseblock number to prepare * * NOR flash, or at least some of them, have peculiar embedded PEB erasure * algorithm: the PEB is first filled with zeroes, then it is erased. And * filling with zeroes starts from the end of the PEB. This was observed with * Spansion S29GL512N NOR flash. * * This means that in case of a power cut we may end up with intact data at the * beginning of the PEB, and all zeroes at the end of PEB. In other words, the * EC and VID headers are OK, but a large chunk of data at the end of PEB is * zeroed. This makes UBI mistakenly treat this PEB as used and associate it * with an LEB, which leads to subsequent failures (e.g., UBIFS fails). * * This function is called before erasing NOR PEBs and it zeroes out EC and VID * magic numbers in order to invalidate them and prevent the failures. Returns * zero in case of success and a negative error code in case of failure. */ static int nor_erase_prepare(struct ubi_device *ubi, int pnum) { int err; size_t written; loff_t addr; uint32_t data = 0; struct ubi_ec_hdr ec_hdr; struct ubi_vid_io_buf vidb; /* * Note, we cannot generally define VID header buffers on stack, * because of the way we deal with these buffers (see the header * comment in this file). But we know this is a NOR-specific piece of * code, so we can do this. But yes, this is error-prone and we should * (pre-)allocate VID header buffer instead. */ struct ubi_vid_hdr vid_hdr; /* * If VID or EC is valid, we have to corrupt them before erasing. * It is important to first invalidate the EC header, and then the VID * header. Otherwise a power cut may lead to valid EC header and * invalid VID header, in which case UBI will treat this PEB as * corrupted and will try to preserve it, and print scary warnings. */ addr = (loff_t)pnum * ubi->peb_size; err = ubi_io_read_ec_hdr(ubi, pnum, &ec_hdr, 0); if (err != UBI_IO_BAD_HDR_EBADMSG && err != UBI_IO_BAD_HDR && err != UBI_IO_FF){ err = mtd_write(ubi->mtd, addr, 4, &written, (void *)&data); if(err) goto error; } ubi_init_vid_buf(ubi, &vidb, &vid_hdr); ubi_assert(&vid_hdr == ubi_get_vid_hdr(&vidb)); err = ubi_io_read_vid_hdr(ubi, pnum, &vidb, 0); if (err != UBI_IO_BAD_HDR_EBADMSG && err != UBI_IO_BAD_HDR && err != UBI_IO_FF){ addr += ubi->vid_hdr_aloffset; err = mtd_write(ubi->mtd, addr, 4, &written, (void *)&data); if (err) goto error; } return 0; error: /* * The PEB contains a valid VID or EC header, but we cannot invalidate * it. Supposedly the flash media or the driver is screwed up, so * return an error. */ ubi_err(ubi, "cannot invalidate PEB %d, write returned %d", pnum, err); ubi_dump_flash(ubi, pnum, 0, ubi->peb_size); return -EIO; } /** * ubi_io_sync_erase - synchronously erase a physical eraseblock. * @ubi: UBI device description object * @pnum: physical eraseblock number to erase * @torture: if this physical eraseblock has to be tortured * * This function synchronously erases physical eraseblock @pnum. If @torture * flag is not zero, the physical eraseblock is checked by means of writing * different patterns to it and reading them back. If the torturing is enabled, * the physical eraseblock is erased more than once. * * This function returns the number of erasures made in case of success, %-EIO * if the erasure failed or the torturing test failed, and other negative error * codes in case of other errors. Note, %-EIO means that the physical * eraseblock is bad. */ int ubi_io_sync_erase(struct ubi_device *ubi, int pnum, int torture) { int err, ret = 0; ubi_assert(pnum >= 0 && pnum < ubi->peb_count); err = self_check_not_bad(ubi, pnum); if (err != 0) return err; if (ubi->ro_mode) { ubi_err(ubi, "read-only mode"); return -EROFS; } /* * If the flash is ECC-ed then we have to erase the ECC block before we * can write to it. But the write is in preparation to an erase in the * first place. This means we cannot zero out EC and VID before the * erase and we just have to hope the flash starts erasing from the * start of the page. */ if (ubi->nor_flash && ubi->mtd->writesize == 1) { err = nor_erase_prepare(ubi, pnum); if (err) return err; } if (torture) { ret = torture_peb(ubi, pnum); if (ret < 0) return ret; } err = do_sync_erase(ubi, pnum); if (err) return err; return ret + 1; } /** * ubi_io_is_bad - check if a physical eraseblock is bad. * @ubi: UBI device description object * @pnum: the physical eraseblock number to check * * This function returns a positive number if the physical eraseblock is bad, * zero if not, and a negative error code if an error occurred. */ int ubi_io_is_bad(const struct ubi_device *ubi, int pnum) { struct mtd_info *mtd = ubi->mtd; ubi_assert(pnum >= 0 && pnum < ubi->peb_count); if (ubi->bad_allowed) { int ret; ret = mtd_block_isbad(mtd, (loff_t)pnum * ubi->peb_size); if (ret < 0) ubi_err(ubi, "error %d while checking if PEB %d is bad", ret, pnum); else if (ret) dbg_io("PEB %d is bad", pnum); return ret; } return 0; } /** * ubi_io_mark_bad - mark a physical eraseblock as bad. * @ubi: UBI device description object * @pnum: the physical eraseblock number to mark * * This function returns zero in case of success and a negative error code in * case of failure. */ int ubi_io_mark_bad(const struct ubi_device *ubi, int pnum) { int err; struct mtd_info *mtd = ubi->mtd; ubi_assert(pnum >= 0 && pnum < ubi->peb_count); if (ubi->ro_mode) { ubi_err(ubi, "read-only mode"); return -EROFS; } if (!ubi->bad_allowed) return 0; err = mtd_block_markbad(mtd, (loff_t)pnum * ubi->peb_size); if (err) ubi_err(ubi, "cannot mark PEB %d bad, error %d", pnum, err); return err; } /** * validate_ec_hdr - validate an erase counter header. * @ubi: UBI device description object * @ec_hdr: the erase counter header to check * * This function returns zero if the erase counter header is OK, and %1 if * not. */ static int validate_ec_hdr(const struct ubi_device *ubi, const struct ubi_ec_hdr *ec_hdr) { long long ec; int vid_hdr_offset, leb_start; ec = be64_to_cpu(ec_hdr->ec); vid_hdr_offset = be32_to_cpu(ec_hdr->vid_hdr_offset); leb_start = be32_to_cpu(ec_hdr->data_offset); if (ec_hdr->version != UBI_VERSION) { ubi_err(ubi, "node with incompatible UBI version found: this UBI version is %d, image version is %d", UBI_VERSION, (int)ec_hdr->version); goto bad; } if (vid_hdr_offset != ubi->vid_hdr_offset) { ubi_err(ubi, "bad VID header offset %d, expected %d", vid_hdr_offset, ubi->vid_hdr_offset); goto bad; } if (leb_start != ubi->leb_start) { ubi_err(ubi, "bad data offset %d, expected %d", leb_start, ubi->leb_start); goto bad; } if (ec < 0 || ec > UBI_MAX_ERASECOUNTER) { ubi_err(ubi, "bad erase counter %lld", ec); goto bad; } return 0; bad: ubi_err(ubi, "bad EC header"); ubi_dump_ec_hdr(ec_hdr); dump_stack(); return 1; } /** * ubi_io_read_ec_hdr - read and check an erase counter header. * @ubi: UBI device description object * @pnum: physical eraseblock to read from * @ec_hdr: a &struct ubi_ec_hdr object where to store the read erase counter * header * @verbose: be verbose if the header is corrupted or was not found * * This function reads erase counter header from physical eraseblock @pnum and * stores it in @ec_hdr. This function also checks CRC checksum of the read * erase counter header. The following codes may be returned: * * o %0 if the CRC checksum is correct and the header was successfully read; * o %UBI_IO_BITFLIPS if the CRC is correct, but bit-flips were detected * and corrected by the flash driver; this is harmless but may indicate that * this eraseblock may become bad soon (but may be not); * o %UBI_IO_BAD_HDR if the erase counter header is corrupted (a CRC error); * o %UBI_IO_BAD_HDR_EBADMSG is the same as %UBI_IO_BAD_HDR, but there also was * a data integrity error (uncorrectable ECC error in case of NAND); * o %UBI_IO_FF if only 0xFF bytes were read (the PEB is supposedly empty) * o a negative error code in case of failure. */ int ubi_io_read_ec_hdr(struct ubi_device *ubi, int pnum, struct ubi_ec_hdr *ec_hdr, int verbose) { int err, read_err; uint32_t crc, magic, hdr_crc; dbg_io("read EC header from PEB %d", pnum); ubi_assert(pnum >= 0 && pnum < ubi->peb_count); read_err = ubi_io_read(ubi, ec_hdr, pnum, 0, UBI_EC_HDR_SIZE); if (read_err) { if (read_err != UBI_IO_BITFLIPS && !mtd_is_eccerr(read_err)) return read_err; /* * We read all the data, but either a correctable bit-flip * occurred, or MTD reported a data integrity error * (uncorrectable ECC error in case of NAND). The former is * harmless, the later may mean that the read data is * corrupted. But we have a CRC check-sum and we will detect * this. If the EC header is still OK, we just report this as * there was a bit-flip, to force scrubbing. */ } magic = be32_to_cpu(ec_hdr->magic); if (magic != UBI_EC_HDR_MAGIC) { if (mtd_is_eccerr(read_err)) return UBI_IO_BAD_HDR_EBADMSG; /* * The magic field is wrong. Let's check if we have read all * 0xFF. If yes, this physical eraseblock is assumed to be * empty. */ if (ubi_check_pattern(ec_hdr, 0xFF, UBI_EC_HDR_SIZE)) { /* The physical eraseblock is supposedly empty */ if (verbose) ubi_warn(ubi, "no EC header found at PEB %d, only 0xFF bytes", pnum); dbg_bld("no EC header found at PEB %d, only 0xFF bytes", pnum); if (!read_err) return UBI_IO_FF; else return UBI_IO_FF_BITFLIPS; } /* * This is not a valid erase counter header, and these are not * 0xFF bytes. Report that the header is corrupted. */ if (verbose) { ubi_warn(ubi, "bad magic number at PEB %d: %08x instead of %08x", pnum, magic, UBI_EC_HDR_MAGIC); ubi_dump_ec_hdr(ec_hdr); } dbg_bld("bad magic number at PEB %d: %08x instead of %08x", pnum, magic, UBI_EC_HDR_MAGIC); return UBI_IO_BAD_HDR; } crc = crc32(UBI_CRC32_INIT, ec_hdr, UBI_EC_HDR_SIZE_CRC); hdr_crc = be32_to_cpu(ec_hdr->hdr_crc); if (hdr_crc != crc) { if (verbose) { ubi_warn(ubi, "bad EC header CRC at PEB %d, calculated %#08x, read %#08x", pnum, crc, hdr_crc); ubi_dump_ec_hdr(ec_hdr); } dbg_bld("bad EC header CRC at PEB %d, calculated %#08x, read %#08x", pnum, crc, hdr_crc); if (!read_err) return UBI_IO_BAD_HDR; else return UBI_IO_BAD_HDR_EBADMSG; } /* And of course validate what has just been read from the media */ err = validate_ec_hdr(ubi, ec_hdr); if (err) { ubi_err(ubi, "validation failed for PEB %d", pnum); return -EINVAL; } /* * If there was %-EBADMSG, but the header CRC is still OK, report about * a bit-flip to force scrubbing on this PEB. */ if (read_err) return UBI_IO_BITFLIPS; if (ubi_dbg_is_read_failure(ubi, MASK_READ_FAILURE_EC)) { ubi_warn(ubi, "cannot read EC header from PEB %d (emulated)", pnum); return -EIO; } if (ubi_dbg_is_ff(ubi, MASK_IO_FF_EC)) { ubi_warn(ubi, "bit-all-ff (emulated)"); return UBI_IO_FF; } if (ubi_dbg_is_ff_bitflips(ubi, MASK_IO_FF_BITFLIPS_EC)) { ubi_warn(ubi, "bit-all-ff with error reported by MTD driver (emulated)"); return UBI_IO_FF_BITFLIPS; } if (ubi_dbg_is_bad_hdr(ubi, MASK_BAD_HDR_EC)) { ubi_warn(ubi, "bad_hdr (emulated)"); return UBI_IO_BAD_HDR; } if (ubi_dbg_is_bad_hdr_ebadmsg(ubi, MASK_BAD_HDR_EBADMSG_EC)) { ubi_warn(ubi, "bad_hdr with ECC error (emulated)"); return UBI_IO_BAD_HDR_EBADMSG; } return 0; } /** * ubi_io_write_ec_hdr - write an erase counter header. * @ubi: UBI device description object * @pnum: physical eraseblock to write to * @ec_hdr: the erase counter header to write * * This function writes erase counter header described by @ec_hdr to physical * eraseblock @pnum. It also fills most fields of @ec_hdr before writing, so * the caller do not have to fill them. Callers must only fill the @ec_hdr->ec * field. * * This function returns zero in case of success and a negative error code in * case of failure. If %-EIO is returned, the physical eraseblock most probably * went bad. */ int ubi_io_write_ec_hdr(struct ubi_device *ubi, int pnum, struct ubi_ec_hdr *ec_hdr) { int err; uint32_t crc; dbg_io("write EC header to PEB %d", pnum); ubi_assert(pnum >= 0 && pnum < ubi->peb_count); ec_hdr->magic = cpu_to_be32(UBI_EC_HDR_MAGIC); ec_hdr->version = UBI_VERSION; ec_hdr->vid_hdr_offset = cpu_to_be32(ubi->vid_hdr_offset); ec_hdr->data_offset = cpu_to_be32(ubi->leb_start); ec_hdr->image_seq = cpu_to_be32(ubi->image_seq); crc = crc32(UBI_CRC32_INIT, ec_hdr, UBI_EC_HDR_SIZE_CRC); ec_hdr->hdr_crc = cpu_to_be32(crc); err = self_check_ec_hdr(ubi, pnum, ec_hdr); if (err) return err; if (ubi_dbg_is_power_cut(ubi, MASK_POWER_CUT_EC)) { ubi_warn(ubi, "emulating a power cut when writing EC header"); ubi_ro_mode(ubi); return -EROFS; } err = ubi_io_write(ubi, ec_hdr, pnum, 0, ubi->ec_hdr_alsize); return err; } /** * validate_vid_hdr - validate a volume identifier header. * @ubi: UBI device description object * @vid_hdr: the volume identifier header to check * * This function checks that data stored in the volume identifier header * @vid_hdr. Returns zero if the VID header is OK and %1 if not. */ static int validate_vid_hdr(const struct ubi_device *ubi, const struct ubi_vid_hdr *vid_hdr) { int vol_type = vid_hdr->vol_type; int copy_flag = vid_hdr->copy_flag; int vol_id = be32_to_cpu(vid_hdr->vol_id); int lnum = be32_to_cpu(vid_hdr->lnum); int compat = vid_hdr->compat; int data_size = be32_to_cpu(vid_hdr->data_size); int used_ebs = be32_to_cpu(vid_hdr->used_ebs); int data_pad = be32_to_cpu(vid_hdr->data_pad); int data_crc = be32_to_cpu(vid_hdr->data_crc); int usable_leb_size = ubi->leb_size - data_pad; if (copy_flag != 0 && copy_flag != 1) { ubi_err(ubi, "bad copy_flag"); goto bad; } if (vol_id < 0 || lnum < 0 || data_size < 0 || used_ebs < 0 || data_pad < 0) { ubi_err(ubi, "negative values"); goto bad; } if (vol_id >= UBI_MAX_VOLUMES && vol_id < UBI_INTERNAL_VOL_START) { ubi_err(ubi, "bad vol_id"); goto bad; } if (vol_id < UBI_INTERNAL_VOL_START && compat != 0) { ubi_err(ubi, "bad compat"); goto bad; } if (vol_id >= UBI_INTERNAL_VOL_START && compat != UBI_COMPAT_DELETE && compat != UBI_COMPAT_RO && compat != UBI_COMPAT_PRESERVE && compat != UBI_COMPAT_REJECT) { ubi_err(ubi, "bad compat"); goto bad; } if (vol_type != UBI_VID_DYNAMIC && vol_type != UBI_VID_STATIC) { ubi_err(ubi, "bad vol_type"); goto bad; } if (data_pad >= ubi->leb_size / 2) { ubi_err(ubi, "bad data_pad"); goto bad; } if (data_size > ubi->leb_size) { ubi_err(ubi, "bad data_size"); goto bad; } if (vol_type == UBI_VID_STATIC) { /* * Although from high-level point of view static volumes may * contain zero bytes of data, but no VID headers can contain * zero at these fields, because they empty volumes do not have * mapped logical eraseblocks. */ if (used_ebs == 0) { ubi_err(ubi, "zero used_ebs"); goto bad; } if (data_size == 0) { ubi_err(ubi, "zero data_size"); goto bad; } if (lnum < used_ebs - 1) { if (data_size != usable_leb_size) { ubi_err(ubi, "bad data_size"); goto bad; } } else if (lnum > used_ebs - 1) { ubi_err(ubi, "too high lnum"); goto bad; } } else { if (copy_flag == 0) { if (data_crc != 0) { ubi_err(ubi, "non-zero data CRC"); goto bad; } if (data_size != 0) { ubi_err(ubi, "non-zero data_size"); goto bad; } } else { if (data_size == 0) { ubi_err(ubi, "zero data_size of copy"); goto bad; } } if (used_ebs != 0) { ubi_err(ubi, "bad used_ebs"); goto bad; } } return 0; bad: ubi_err(ubi, "bad VID header"); ubi_dump_vid_hdr(vid_hdr); dump_stack(); return 1; } /** * ubi_io_read_vid_hdr - read and check a volume identifier header. * @ubi: UBI device description object * @pnum: physical eraseblock number to read from * @vidb: the volume identifier buffer to store data in * @verbose: be verbose if the header is corrupted or wasn't found * * This function reads the volume identifier header from physical eraseblock * @pnum and stores it in @vidb. It also checks CRC checksum of the read * volume identifier header. The error codes are the same as in * 'ubi_io_read_ec_hdr()'. * * Note, the implementation of this function is also very similar to * 'ubi_io_read_ec_hdr()', so refer commentaries in 'ubi_io_read_ec_hdr()'. */ int ubi_io_read_vid_hdr(struct ubi_device *ubi, int pnum, struct ubi_vid_io_buf *vidb, int verbose) { int err, read_err; uint32_t crc, magic, hdr_crc; struct ubi_vid_hdr *vid_hdr = ubi_get_vid_hdr(vidb); void *p = vidb->buffer; dbg_io("read VID header from PEB %d", pnum); ubi_assert(pnum >= 0 && pnum < ubi->peb_count); read_err = ubi_io_read(ubi, p, pnum, ubi->vid_hdr_aloffset, ubi->vid_hdr_shift + UBI_VID_HDR_SIZE); if (read_err && read_err != UBI_IO_BITFLIPS && !mtd_is_eccerr(read_err)) return read_err; magic = be32_to_cpu(vid_hdr->magic); if (magic != UBI_VID_HDR_MAGIC) { if (mtd_is_eccerr(read_err)) return UBI_IO_BAD_HDR_EBADMSG; if (ubi_check_pattern(vid_hdr, 0xFF, UBI_VID_HDR_SIZE)) { if (verbose) ubi_warn(ubi, "no VID header found at PEB %d, only 0xFF bytes", pnum); dbg_bld("no VID header found at PEB %d, only 0xFF bytes", pnum); if (!read_err) return UBI_IO_FF; else return UBI_IO_FF_BITFLIPS; } if (verbose) { ubi_warn(ubi, "bad magic number at PEB %d: %08x instead of %08x", pnum, magic, UBI_VID_HDR_MAGIC); ubi_dump_vid_hdr(vid_hdr); } dbg_bld("bad magic number at PEB %d: %08x instead of %08x", pnum, magic, UBI_VID_HDR_MAGIC); return UBI_IO_BAD_HDR; } crc = crc32(UBI_CRC32_INIT, vid_hdr, UBI_VID_HDR_SIZE_CRC); hdr_crc = be32_to_cpu(vid_hdr->hdr_crc); if (hdr_crc != crc) { if (verbose) { ubi_warn(ubi, "bad CRC at PEB %d, calculated %#08x, read %#08x", pnum, crc, hdr_crc); ubi_dump_vid_hdr(vid_hdr); } dbg_bld("bad CRC at PEB %d, calculated %#08x, read %#08x", pnum, crc, hdr_crc); if (!read_err) return UBI_IO_BAD_HDR; else return UBI_IO_BAD_HDR_EBADMSG; } err = validate_vid_hdr(ubi, vid_hdr); if (err) { ubi_err(ubi, "validation failed for PEB %d", pnum); return -EINVAL; } if (read_err) return UBI_IO_BITFLIPS; if (ubi_dbg_is_read_failure(ubi, MASK_READ_FAILURE_VID)) { ubi_warn(ubi, "cannot read VID header from PEB %d (emulated)", pnum); return -EIO; } if (ubi_dbg_is_ff(ubi, MASK_IO_FF_VID)) { ubi_warn(ubi, "bit-all-ff (emulated)"); return UBI_IO_FF; } if (ubi_dbg_is_ff_bitflips(ubi, MASK_IO_FF_BITFLIPS_VID)) { ubi_warn(ubi, "bit-all-ff with error reported by MTD driver (emulated)"); return UBI_IO_FF_BITFLIPS; } if (ubi_dbg_is_bad_hdr(ubi, MASK_BAD_HDR_VID)) { ubi_warn(ubi, "bad_hdr (emulated)"); return UBI_IO_BAD_HDR; } if (ubi_dbg_is_bad_hdr_ebadmsg(ubi, MASK_BAD_HDR_EBADMSG_VID)) { ubi_warn(ubi, "bad_hdr with ECC error (emulated)"); return UBI_IO_BAD_HDR_EBADMSG; } return 0; } /** * ubi_io_write_vid_hdr - write a volume identifier header. * @ubi: UBI device description object * @pnum: the physical eraseblock number to write to * @vidb: the volume identifier buffer to write * * This function writes the volume identifier header described by @vid_hdr to * physical eraseblock @pnum. This function automatically fills the * @vidb->hdr->magic and the @vidb->hdr->version fields, as well as calculates * header CRC checksum and stores it at vidb->hdr->hdr_crc. * * This function returns zero in case of success and a negative error code in * case of failure. If %-EIO is returned, the physical eraseblock probably went * bad. */ int ubi_io_write_vid_hdr(struct ubi_device *ubi, int pnum, struct ubi_vid_io_buf *vidb) { struct ubi_vid_hdr *vid_hdr = ubi_get_vid_hdr(vidb); int err; uint32_t crc; void *p = vidb->buffer; dbg_io("write VID header to PEB %d", pnum); ubi_assert(pnum >= 0 && pnum < ubi->peb_count); err = self_check_peb_ec_hdr(ubi, pnum); if (err) return err; vid_hdr->magic = cpu_to_be32(UBI_VID_HDR_MAGIC); vid_hdr->version = UBI_VERSION; crc = crc32(UBI_CRC32_INIT, vid_hdr, UBI_VID_HDR_SIZE_CRC); vid_hdr->hdr_crc = cpu_to_be32(crc); err = self_check_vid_hdr(ubi, pnum, vid_hdr); if (err) return err; if (ubi_dbg_is_power_cut(ubi, MASK_POWER_CUT_VID)) { ubi_warn(ubi, "emulating a power cut when writing VID header"); ubi_ro_mode(ubi); return -EROFS; } err = ubi_io_write(ubi, p, pnum, ubi->vid_hdr_aloffset, ubi->vid_hdr_alsize); return err; } /** * self_check_not_bad - ensure that a physical eraseblock is not bad. * @ubi: UBI device description object * @pnum: physical eraseblock number to check * * This function returns zero if the physical eraseblock is good, %-EINVAL if * it is bad and a negative error code if an error occurred. */ static int self_check_not_bad(const struct ubi_device *ubi, int pnum) { int err; if (!ubi_dbg_chk_io(ubi)) return 0; err = ubi_io_is_bad(ubi, pnum); if (!err) return err; ubi_err(ubi, "self-check failed for PEB %d", pnum); dump_stack(); return err > 0 ? -EINVAL : err; } /** * self_check_ec_hdr - check if an erase counter header is all right. * @ubi: UBI device description object * @pnum: physical eraseblock number the erase counter header belongs to * @ec_hdr: the erase counter header to check * * This function returns zero if the erase counter header contains valid * values, and %-EINVAL if not. */ static int self_check_ec_hdr(const struct ubi_device *ubi, int pnum, const struct ubi_ec_hdr *ec_hdr) { int err; uint32_t magic; if (!ubi_dbg_chk_io(ubi)) return 0; magic = be32_to_cpu(ec_hdr->magic); if (magic != UBI_EC_HDR_MAGIC) { ubi_err(ubi, "bad magic %#08x, must be %#08x", magic, UBI_EC_HDR_MAGIC); goto fail; } err = validate_ec_hdr(ubi, ec_hdr); if (err) { ubi_err(ubi, "self-check failed for PEB %d", pnum); goto fail; } return 0; fail: ubi_dump_ec_hdr(ec_hdr); dump_stack(); return -EINVAL; } /** * self_check_peb_ec_hdr - check erase counter header. * @ubi: UBI device description object * @pnum: the physical eraseblock number to check * * This function returns zero if the erase counter header is all right and * a negative error code if not or if an error occurred. */ static int self_check_peb_ec_hdr(const struct ubi_device *ubi, int pnum) { int err; uint32_t crc, hdr_crc; struct ubi_ec_hdr *ec_hdr; if (!ubi_dbg_chk_io(ubi)) return 0; ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS); if (!ec_hdr) return -ENOMEM; err = ubi_io_read(ubi, ec_hdr, pnum, 0, UBI_EC_HDR_SIZE); if (err && err != UBI_IO_BITFLIPS && !mtd_is_eccerr(err)) goto exit; crc = crc32(UBI_CRC32_INIT, ec_hdr, UBI_EC_HDR_SIZE_CRC); hdr_crc = be32_to_cpu(ec_hdr->hdr_crc); if (hdr_crc != crc) { ubi_err(ubi, "bad CRC, calculated %#08x, read %#08x", crc, hdr_crc); ubi_err(ubi, "self-check failed for PEB %d", pnum); ubi_dump_ec_hdr(ec_hdr); dump_stack(); err = -EINVAL; goto exit; } err = self_check_ec_hdr(ubi, pnum, ec_hdr); exit: kfree(ec_hdr); return err; } /** * self_check_vid_hdr - check that a volume identifier header is all right. * @ubi: UBI device description object * @pnum: physical eraseblock number the volume identifier header belongs to * @vid_hdr: the volume identifier header to check * * This function returns zero if the volume identifier header is all right, and * %-EINVAL if not. */ static int self_check_vid_hdr(const struct ubi_device *ubi, int pnum, const struct ubi_vid_hdr *vid_hdr) { int err; uint32_t magic; if (!ubi_dbg_chk_io(ubi)) return 0; magic = be32_to_cpu(vid_hdr->magic); if (magic != UBI_VID_HDR_MAGIC) { ubi_err(ubi, "bad VID header magic %#08x at PEB %d, must be %#08x", magic, pnum, UBI_VID_HDR_MAGIC); goto fail; } err = validate_vid_hdr(ubi, vid_hdr); if (err) { ubi_err(ubi, "self-check failed for PEB %d", pnum); goto fail; } return err; fail: ubi_err(ubi, "self-check failed for PEB %d", pnum); ubi_dump_vid_hdr(vid_hdr); dump_stack(); return -EINVAL; } /** * self_check_peb_vid_hdr - check volume identifier header. * @ubi: UBI device description object * @pnum: the physical eraseblock number to check * * This function returns zero if the volume identifier header is all right, * and a negative error code if not or if an error occurred. */ static int self_check_peb_vid_hdr(const struct ubi_device *ubi, int pnum) { int err; uint32_t crc, hdr_crc; struct ubi_vid_io_buf *vidb; struct ubi_vid_hdr *vid_hdr; void *p; if (!ubi_dbg_chk_io(ubi)) return 0; vidb = ubi_alloc_vid_buf(ubi, GFP_NOFS); if (!vidb) return -ENOMEM; vid_hdr = ubi_get_vid_hdr(vidb); p = vidb->buffer; err = ubi_io_read(ubi, p, pnum, ubi->vid_hdr_aloffset, ubi->vid_hdr_alsize); if (err && err != UBI_IO_BITFLIPS && !mtd_is_eccerr(err)) goto exit; crc = crc32(UBI_CRC32_INIT, vid_hdr, UBI_VID_HDR_SIZE_CRC); hdr_crc = be32_to_cpu(vid_hdr->hdr_crc); if (hdr_crc != crc) { ubi_err(ubi, "bad VID header CRC at PEB %d, calculated %#08x, read %#08x", pnum, crc, hdr_crc); ubi_err(ubi, "self-check failed for PEB %d", pnum); ubi_dump_vid_hdr(vid_hdr); dump_stack(); err = -EINVAL; goto exit; } err = self_check_vid_hdr(ubi, pnum, vid_hdr); exit: ubi_free_vid_buf(vidb); return err; } /** * self_check_write - make sure write succeeded. * @ubi: UBI device description object * @buf: buffer with data which were written * @pnum: physical eraseblock number the data were written to * @offset: offset within the physical eraseblock the data were written to * @len: how many bytes were written * * This functions reads data which were recently written and compares it with * the original data buffer - the data have to match. Returns zero if the data * match and a negative error code if not or in case of failure. */ static int self_check_write(struct ubi_device *ubi, const void *buf, int pnum, int offset, int len) { int err, i; size_t read; void *buf1; loff_t addr = (loff_t)pnum * ubi->peb_size + offset; if (!ubi_dbg_chk_io(ubi)) return 0; buf1 = __vmalloc(len, GFP_NOFS); if (!buf1) { ubi_err(ubi, "cannot allocate memory to check writes"); return 0; } err = mtd_read(ubi->mtd, addr, len, &read, buf1); if (err && !mtd_is_bitflip(err)) goto out_free; for (i = 0; i < len; i++) { uint8_t c = ((uint8_t *)buf)[i]; uint8_t c1 = ((uint8_t *)buf1)[i]; int dump_len; if (c == c1) continue; ubi_err(ubi, "self-check failed for PEB %d:%d, len %d", pnum, offset, len); ubi_msg(ubi, "data differ at position %d", i); dump_len = max_t(int, 128, len - i); ubi_msg(ubi, "hex dump of the original buffer from %d to %d", i, i + dump_len); print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1, buf + i, dump_len, 1); ubi_msg(ubi, "hex dump of the read buffer from %d to %d", i, i + dump_len); print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1, buf1 + i, dump_len, 1); dump_stack(); err = -EINVAL; goto out_free; } vfree(buf1); return 0; out_free: vfree(buf1); return err; } /** * ubi_self_check_all_ff - check that a region of flash is empty. * @ubi: UBI device description object * @pnum: the physical eraseblock number to check * @offset: the starting offset within the physical eraseblock to check * @len: the length of the region to check * * This function returns zero if only 0xFF bytes are present at offset * @offset of the physical eraseblock @pnum, and a negative error code if not * or if an error occurred. */ int ubi_self_check_all_ff(struct ubi_device *ubi, int pnum, int offset, int len) { size_t read; int err; void *buf; loff_t addr = (loff_t)pnum * ubi->peb_size + offset; if (!ubi_dbg_chk_io(ubi)) return 0; buf = __vmalloc(len, GFP_NOFS); if (!buf) { ubi_err(ubi, "cannot allocate memory to check for 0xFFs"); return 0; } err = mtd_read(ubi->mtd, addr, len, &read, buf); if (err && !mtd_is_bitflip(err)) { ubi_err(ubi, "err %d while reading %d bytes from PEB %d:%d, read %zd bytes", err, len, pnum, offset, read); goto error; } err = ubi_check_pattern(buf, 0xFF, len); if (err == 0) { ubi_err(ubi, "flash region at PEB %d:%d, length %d does not contain all 0xFF bytes", pnum, offset, len); goto fail; } vfree(buf); return 0; fail: ubi_err(ubi, "self-check failed for PEB %d", pnum); ubi_msg(ubi, "hex dump of the %d-%d region", offset, offset + len); print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1, buf, len, 1); err = -EINVAL; error: dump_stack(); vfree(buf); return err; } |
| 2 2 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 | // SPDX-License-Identifier: GPL-2.0-only #include <linux/netdevice.h> #include <linux/notifier.h> #include <linux/rtnetlink.h> #include <net/busy_poll.h> #include <net/net_namespace.h> #include <net/netdev_queues.h> #include <net/netdev_rx_queue.h> #include <net/sock.h> #include <net/xdp.h> #include <net/xdp_sock.h> #include <net/page_pool/memory_provider.h> #include "dev.h" #include "devmem.h" #include "netdev-genl-gen.h" struct netdev_nl_dump_ctx { unsigned long ifindex; unsigned int rxq_idx; unsigned int txq_idx; unsigned int napi_id; }; static struct netdev_nl_dump_ctx *netdev_dump_ctx(struct netlink_callback *cb) { NL_ASSERT_CTX_FITS(struct netdev_nl_dump_ctx); return (struct netdev_nl_dump_ctx *)cb->ctx; } static int netdev_nl_dev_fill(struct net_device *netdev, struct sk_buff *rsp, const struct genl_info *info) { u64 xsk_features = 0; u64 xdp_rx_meta = 0; void *hdr; netdev_assert_locked(netdev); /* note: rtnl_lock may not be held! */ hdr = genlmsg_iput(rsp, info); if (!hdr) return -EMSGSIZE; #define XDP_METADATA_KFUNC(_, flag, __, xmo) \ if (netdev->xdp_metadata_ops && netdev->xdp_metadata_ops->xmo) \ xdp_rx_meta |= flag; XDP_METADATA_KFUNC_xxx #undef XDP_METADATA_KFUNC if (netdev->xsk_tx_metadata_ops) { if (netdev->xsk_tx_metadata_ops->tmo_fill_timestamp) xsk_features |= NETDEV_XSK_FLAGS_TX_TIMESTAMP; if (netdev->xsk_tx_metadata_ops->tmo_request_checksum) xsk_features |= NETDEV_XSK_FLAGS_TX_CHECKSUM; if (netdev->xsk_tx_metadata_ops->tmo_request_launch_time) xsk_features |= NETDEV_XSK_FLAGS_TX_LAUNCH_TIME_FIFO; } if (nla_put_u32(rsp, NETDEV_A_DEV_IFINDEX, netdev->ifindex) || nla_put_u64_64bit(rsp, NETDEV_A_DEV_XDP_FEATURES, netdev->xdp_features, NETDEV_A_DEV_PAD) || nla_put_u64_64bit(rsp, NETDEV_A_DEV_XDP_RX_METADATA_FEATURES, xdp_rx_meta, NETDEV_A_DEV_PAD) || nla_put_u64_64bit(rsp, NETDEV_A_DEV_XSK_FEATURES, xsk_features, NETDEV_A_DEV_PAD)) goto err_cancel_msg; if (netdev->xdp_features & NETDEV_XDP_ACT_XSK_ZEROCOPY) { if (nla_put_u32(rsp, NETDEV_A_DEV_XDP_ZC_MAX_SEGS, netdev->xdp_zc_max_segs)) goto err_cancel_msg; } genlmsg_end(rsp, hdr); return 0; err_cancel_msg: genlmsg_cancel(rsp, hdr); return -EMSGSIZE; } static void netdev_genl_dev_notify(struct net_device *netdev, int cmd) { struct genl_info info; struct sk_buff *ntf; if (!genl_has_listeners(&netdev_nl_family, dev_net(netdev), NETDEV_NLGRP_MGMT)) return; genl_info_init_ntf(&info, &netdev_nl_family, cmd); ntf = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!ntf) return; if (netdev_nl_dev_fill(netdev, ntf, &info)) { nlmsg_free(ntf); return; } genlmsg_multicast_netns(&netdev_nl_family, dev_net(netdev), ntf, 0, NETDEV_NLGRP_MGMT, GFP_KERNEL); } int netdev_nl_dev_get_doit(struct sk_buff *skb, struct genl_info *info) { struct net_device *netdev; struct sk_buff *rsp; u32 ifindex; int err; if (GENL_REQ_ATTR_CHECK(info, NETDEV_A_DEV_IFINDEX)) return -EINVAL; ifindex = nla_get_u32(info->attrs[NETDEV_A_DEV_IFINDEX]); rsp = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!rsp) return -ENOMEM; netdev = netdev_get_by_index_lock(genl_info_net(info), ifindex); if (!netdev) { err = -ENODEV; goto err_free_msg; } err = netdev_nl_dev_fill(netdev, rsp, info); netdev_unlock(netdev); if (err) goto err_free_msg; return genlmsg_reply(rsp, info); err_free_msg: nlmsg_free(rsp); return err; } int netdev_nl_dev_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb) { struct netdev_nl_dump_ctx *ctx = netdev_dump_ctx(cb); struct net *net = sock_net(skb->sk); int err; for_each_netdev_lock_scoped(net, netdev, ctx->ifindex) { err = netdev_nl_dev_fill(netdev, skb, genl_info_dump(cb)); if (err < 0) return err; } return 0; } static int netdev_nl_napi_fill_one(struct sk_buff *rsp, struct napi_struct *napi, const struct genl_info *info) { unsigned long irq_suspend_timeout; unsigned long gro_flush_timeout; u32 napi_defer_hard_irqs; void *hdr; pid_t pid; if (!napi->dev->up) return 0; hdr = genlmsg_iput(rsp, info); if (!hdr) return -EMSGSIZE; if (nla_put_u32(rsp, NETDEV_A_NAPI_ID, napi->napi_id)) goto nla_put_failure; if (nla_put_u32(rsp, NETDEV_A_NAPI_IFINDEX, napi->dev->ifindex)) goto nla_put_failure; if (napi->irq >= 0 && nla_put_u32(rsp, NETDEV_A_NAPI_IRQ, napi->irq)) goto nla_put_failure; if (nla_put_uint(rsp, NETDEV_A_NAPI_THREADED, napi_get_threaded(napi))) goto nla_put_failure; if (napi->thread) { pid = task_pid_nr(napi->thread); if (nla_put_u32(rsp, NETDEV_A_NAPI_PID, pid)) goto nla_put_failure; } napi_defer_hard_irqs = napi_get_defer_hard_irqs(napi); if (nla_put_s32(rsp, NETDEV_A_NAPI_DEFER_HARD_IRQS, napi_defer_hard_irqs)) goto nla_put_failure; irq_suspend_timeout = napi_get_irq_suspend_timeout(napi); if (nla_put_uint(rsp, NETDEV_A_NAPI_IRQ_SUSPEND_TIMEOUT, irq_suspend_timeout)) goto nla_put_failure; gro_flush_timeout = napi_get_gro_flush_timeout(napi); if (nla_put_uint(rsp, NETDEV_A_NAPI_GRO_FLUSH_TIMEOUT, gro_flush_timeout)) goto nla_put_failure; genlmsg_end(rsp, hdr); return 0; nla_put_failure: genlmsg_cancel(rsp, hdr); return -EMSGSIZE; } int netdev_nl_napi_get_doit(struct sk_buff *skb, struct genl_info *info) { struct napi_struct *napi; struct sk_buff *rsp; u32 napi_id; int err; if (GENL_REQ_ATTR_CHECK(info, NETDEV_A_NAPI_ID)) return -EINVAL; napi_id = nla_get_u32(info->attrs[NETDEV_A_NAPI_ID]); rsp = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!rsp) return -ENOMEM; napi = netdev_napi_by_id_lock(genl_info_net(info), napi_id); if (napi) { err = netdev_nl_napi_fill_one(rsp, napi, info); netdev_unlock(napi->dev); } else { NL_SET_BAD_ATTR(info->extack, info->attrs[NETDEV_A_NAPI_ID]); err = -ENOENT; } if (err) { goto err_free_msg; } else if (!rsp->len) { err = -ENOENT; goto err_free_msg; } return genlmsg_reply(rsp, info); err_free_msg: nlmsg_free(rsp); return err; } static int netdev_nl_napi_dump_one(struct net_device *netdev, struct sk_buff *rsp, const struct genl_info *info, struct netdev_nl_dump_ctx *ctx) { struct napi_struct *napi; unsigned int prev_id; int err = 0; if (!netdev->up) return err; prev_id = UINT_MAX; list_for_each_entry(napi, &netdev->napi_list, dev_list) { if (!napi_id_valid(napi->napi_id)) continue; /* Dump continuation below depends on the list being sorted */ WARN_ON_ONCE(napi->napi_id >= prev_id); prev_id = napi->napi_id; if (ctx->napi_id && napi->napi_id >= ctx->napi_id) continue; err = netdev_nl_napi_fill_one(rsp, napi, info); if (err) return err; ctx->napi_id = napi->napi_id; } return err; } int netdev_nl_napi_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb) { struct netdev_nl_dump_ctx *ctx = netdev_dump_ctx(cb); const struct genl_info *info = genl_info_dump(cb); struct net *net = sock_net(skb->sk); struct net_device *netdev; u32 ifindex = 0; int err = 0; if (info->attrs[NETDEV_A_NAPI_IFINDEX]) ifindex = nla_get_u32(info->attrs[NETDEV_A_NAPI_IFINDEX]); if (ifindex) { netdev = netdev_get_by_index_lock(net, ifindex); if (netdev) { err = netdev_nl_napi_dump_one(netdev, skb, info, ctx); netdev_unlock(netdev); } else { err = -ENODEV; } } else { for_each_netdev_lock_scoped(net, netdev, ctx->ifindex) { err = netdev_nl_napi_dump_one(netdev, skb, info, ctx); if (err < 0) break; ctx->napi_id = 0; } } return err; } static int netdev_nl_napi_set_config(struct napi_struct *napi, struct genl_info *info) { u64 irq_suspend_timeout = 0; u64 gro_flush_timeout = 0; u8 threaded = 0; u32 defer = 0; if (info->attrs[NETDEV_A_NAPI_THREADED]) { int ret; threaded = nla_get_uint(info->attrs[NETDEV_A_NAPI_THREADED]); ret = napi_set_threaded(napi, threaded); if (ret) return ret; } if (info->attrs[NETDEV_A_NAPI_DEFER_HARD_IRQS]) { defer = nla_get_u32(info->attrs[NETDEV_A_NAPI_DEFER_HARD_IRQS]); napi_set_defer_hard_irqs(napi, defer); } if (info->attrs[NETDEV_A_NAPI_IRQ_SUSPEND_TIMEOUT]) { irq_suspend_timeout = nla_get_uint(info->attrs[NETDEV_A_NAPI_IRQ_SUSPEND_TIMEOUT]); napi_set_irq_suspend_timeout(napi, irq_suspend_timeout); } if (info->attrs[NETDEV_A_NAPI_GRO_FLUSH_TIMEOUT]) { gro_flush_timeout = nla_get_uint(info->attrs[NETDEV_A_NAPI_GRO_FLUSH_TIMEOUT]); napi_set_gro_flush_timeout(napi, gro_flush_timeout); } return 0; } int netdev_nl_napi_set_doit(struct sk_buff *skb, struct genl_info *info) { struct napi_struct *napi; unsigned int napi_id; int err; if (GENL_REQ_ATTR_CHECK(info, NETDEV_A_NAPI_ID)) return -EINVAL; napi_id = nla_get_u32(info->attrs[NETDEV_A_NAPI_ID]); napi = netdev_napi_by_id_lock(genl_info_net(info), napi_id); if (napi) { err = netdev_nl_napi_set_config(napi, info); netdev_unlock(napi->dev); } else { NL_SET_BAD_ATTR(info->extack, info->attrs[NETDEV_A_NAPI_ID]); err = -ENOENT; } return err; } static int nla_put_napi_id(struct sk_buff *skb, const struct napi_struct *napi) { if (napi && napi_id_valid(napi->napi_id)) return nla_put_u32(skb, NETDEV_A_QUEUE_NAPI_ID, napi->napi_id); return 0; } static int netdev_nl_queue_fill_one(struct sk_buff *rsp, struct net_device *netdev, u32 q_idx, u32 q_type, const struct genl_info *info) { struct pp_memory_provider_params *params; struct netdev_rx_queue *rxq; struct netdev_queue *txq; void *hdr; hdr = genlmsg_iput(rsp, info); if (!hdr) return -EMSGSIZE; if (nla_put_u32(rsp, NETDEV_A_QUEUE_ID, q_idx) || nla_put_u32(rsp, NETDEV_A_QUEUE_TYPE, q_type) || nla_put_u32(rsp, NETDEV_A_QUEUE_IFINDEX, netdev->ifindex)) goto nla_put_failure; switch (q_type) { case NETDEV_QUEUE_TYPE_RX: rxq = __netif_get_rx_queue(netdev, q_idx); if (nla_put_napi_id(rsp, rxq->napi)) goto nla_put_failure; params = &rxq->mp_params; if (params->mp_ops && params->mp_ops->nl_fill(params->mp_priv, rsp, rxq)) goto nla_put_failure; #ifdef CONFIG_XDP_SOCKETS if (rxq->pool) if (nla_put_empty_nest(rsp, NETDEV_A_QUEUE_XSK)) goto nla_put_failure; #endif break; case NETDEV_QUEUE_TYPE_TX: txq = netdev_get_tx_queue(netdev, q_idx); if (nla_put_napi_id(rsp, txq->napi)) goto nla_put_failure; #ifdef CONFIG_XDP_SOCKETS if (txq->pool) if (nla_put_empty_nest(rsp, NETDEV_A_QUEUE_XSK)) goto nla_put_failure; #endif break; } genlmsg_end(rsp, hdr); return 0; nla_put_failure: genlmsg_cancel(rsp, hdr); return -EMSGSIZE; } static int netdev_nl_queue_validate(struct net_device *netdev, u32 q_id, u32 q_type) { switch (q_type) { case NETDEV_QUEUE_TYPE_RX: if (q_id >= netdev->real_num_rx_queues) return -EINVAL; return 0; case NETDEV_QUEUE_TYPE_TX: if (q_id >= netdev->real_num_tx_queues) return -EINVAL; } return 0; } static int netdev_nl_queue_fill(struct sk_buff *rsp, struct net_device *netdev, u32 q_idx, u32 q_type, const struct genl_info *info) { int err; if (!netdev->up) return -ENOENT; err = netdev_nl_queue_validate(netdev, q_idx, q_type); if (err) return err; return netdev_nl_queue_fill_one(rsp, netdev, q_idx, q_type, info); } int netdev_nl_queue_get_doit(struct sk_buff *skb, struct genl_info *info) { u32 q_id, q_type, ifindex; struct net_device *netdev; struct sk_buff *rsp; int err; if (GENL_REQ_ATTR_CHECK(info, NETDEV_A_QUEUE_ID) || GENL_REQ_ATTR_CHECK(info, NETDEV_A_QUEUE_TYPE) || GENL_REQ_ATTR_CHECK(info, NETDEV_A_QUEUE_IFINDEX)) return -EINVAL; q_id = nla_get_u32(info->attrs[NETDEV_A_QUEUE_ID]); q_type = nla_get_u32(info->attrs[NETDEV_A_QUEUE_TYPE]); ifindex = nla_get_u32(info->attrs[NETDEV_A_QUEUE_IFINDEX]); rsp = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!rsp) return -ENOMEM; netdev = netdev_get_by_index_lock_ops_compat(genl_info_net(info), ifindex); if (netdev) { err = netdev_nl_queue_fill(rsp, netdev, q_id, q_type, info); netdev_unlock_ops_compat(netdev); } else { err = -ENODEV; } if (err) goto err_free_msg; return genlmsg_reply(rsp, info); err_free_msg: nlmsg_free(rsp); return err; } static int netdev_nl_queue_dump_one(struct net_device *netdev, struct sk_buff *rsp, const struct genl_info *info, struct netdev_nl_dump_ctx *ctx) { int err = 0; if (!netdev->up) return err; for (; ctx->rxq_idx < netdev->real_num_rx_queues; ctx->rxq_idx++) { err = netdev_nl_queue_fill_one(rsp, netdev, ctx->rxq_idx, NETDEV_QUEUE_TYPE_RX, info); if (err) return err; } for (; ctx->txq_idx < netdev->real_num_tx_queues; ctx->txq_idx++) { err = netdev_nl_queue_fill_one(rsp, netdev, ctx->txq_idx, NETDEV_QUEUE_TYPE_TX, info); if (err) return err; } return err; } int netdev_nl_queue_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb) { struct netdev_nl_dump_ctx *ctx = netdev_dump_ctx(cb); const struct genl_info *info = genl_info_dump(cb); struct net *net = sock_net(skb->sk); struct net_device *netdev; u32 ifindex = 0; int err = 0; if (info->attrs[NETDEV_A_QUEUE_IFINDEX]) ifindex = nla_get_u32(info->attrs[NETDEV_A_QUEUE_IFINDEX]); if (ifindex) { netdev = netdev_get_by_index_lock_ops_compat(net, ifindex); if (netdev) { err = netdev_nl_queue_dump_one(netdev, skb, info, ctx); netdev_unlock_ops_compat(netdev); } else { err = -ENODEV; } } else { for_each_netdev_lock_ops_compat_scoped(net, netdev, ctx->ifindex) { err = netdev_nl_queue_dump_one(netdev, skb, info, ctx); if (err < 0) break; ctx->rxq_idx = 0; ctx->txq_idx = 0; } } return err; } #define NETDEV_STAT_NOT_SET (~0ULL) static void netdev_nl_stats_add(void *_sum, const void *_add, size_t size) { const u64 *add = _add; u64 *sum = _sum; while (size) { if (*add != NETDEV_STAT_NOT_SET && *sum != NETDEV_STAT_NOT_SET) *sum += *add; sum++; add++; size -= 8; } } static int netdev_stat_put(struct sk_buff *rsp, unsigned int attr_id, u64 value) { if (value == NETDEV_STAT_NOT_SET) return 0; return nla_put_uint(rsp, attr_id, value); } static int netdev_nl_stats_write_rx(struct sk_buff *rsp, struct netdev_queue_stats_rx *rx) { if (netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_PACKETS, rx->packets) || netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_BYTES, rx->bytes) || netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_ALLOC_FAIL, rx->alloc_fail) || netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_HW_DROPS, rx->hw_drops) || netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_HW_DROP_OVERRUNS, rx->hw_drop_overruns) || netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_CSUM_COMPLETE, rx->csum_complete) || netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_CSUM_UNNECESSARY, rx->csum_unnecessary) || netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_CSUM_NONE, rx->csum_none) || netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_CSUM_BAD, rx->csum_bad) || netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_HW_GRO_PACKETS, rx->hw_gro_packets) || netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_HW_GRO_BYTES, rx->hw_gro_bytes) || netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_HW_GRO_WIRE_PACKETS, rx->hw_gro_wire_packets) || netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_HW_GRO_WIRE_BYTES, rx->hw_gro_wire_bytes) || netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_HW_DROP_RATELIMITS, rx->hw_drop_ratelimits)) return -EMSGSIZE; return 0; } static int netdev_nl_stats_write_tx(struct sk_buff *rsp, struct netdev_queue_stats_tx *tx) { if (netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_PACKETS, tx->packets) || netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_BYTES, tx->bytes) || netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_HW_DROPS, tx->hw_drops) || netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_HW_DROP_ERRORS, tx->hw_drop_errors) || netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_CSUM_NONE, tx->csum_none) || netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_NEEDS_CSUM, tx->needs_csum) || netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_HW_GSO_PACKETS, tx->hw_gso_packets) || netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_HW_GSO_BYTES, tx->hw_gso_bytes) || netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_HW_GSO_WIRE_PACKETS, tx->hw_gso_wire_packets) || netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_HW_GSO_WIRE_BYTES, tx->hw_gso_wire_bytes) || netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_HW_DROP_RATELIMITS, tx->hw_drop_ratelimits) || netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_STOP, tx->stop) || netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_WAKE, tx->wake)) return -EMSGSIZE; return 0; } static int netdev_nl_stats_queue(struct net_device *netdev, struct sk_buff *rsp, u32 q_type, int i, const struct genl_info *info) { const struct netdev_stat_ops *ops = netdev->stat_ops; struct netdev_queue_stats_rx rx; struct netdev_queue_stats_tx tx; void *hdr; hdr = genlmsg_iput(rsp, info); if (!hdr) return -EMSGSIZE; if (nla_put_u32(rsp, NETDEV_A_QSTATS_IFINDEX, netdev->ifindex) || nla_put_u32(rsp, NETDEV_A_QSTATS_QUEUE_TYPE, q_type) || nla_put_u32(rsp, NETDEV_A_QSTATS_QUEUE_ID, i)) goto nla_put_failure; switch (q_type) { case NETDEV_QUEUE_TYPE_RX: memset(&rx, 0xff, sizeof(rx)); ops->get_queue_stats_rx(netdev, i, &rx); if (!memchr_inv(&rx, 0xff, sizeof(rx))) goto nla_cancel; if (netdev_nl_stats_write_rx(rsp, &rx)) goto nla_put_failure; break; case NETDEV_QUEUE_TYPE_TX: memset(&tx, 0xff, sizeof(tx)); ops->get_queue_stats_tx(netdev, i, &tx); if (!memchr_inv(&tx, 0xff, sizeof(tx))) goto nla_cancel; if (netdev_nl_stats_write_tx(rsp, &tx)) goto nla_put_failure; break; } genlmsg_end(rsp, hdr); return 0; nla_cancel: genlmsg_cancel(rsp, hdr); return 0; nla_put_failure: genlmsg_cancel(rsp, hdr); return -EMSGSIZE; } static int netdev_nl_stats_by_queue(struct net_device *netdev, struct sk_buff *rsp, const struct genl_info *info, struct netdev_nl_dump_ctx *ctx) { const struct netdev_stat_ops *ops = netdev->stat_ops; int i, err; if (!(netdev->flags & IFF_UP)) return 0; i = ctx->rxq_idx; while (ops->get_queue_stats_rx && i < netdev->real_num_rx_queues) { err = netdev_nl_stats_queue(netdev, rsp, NETDEV_QUEUE_TYPE_RX, i, info); if (err) return err; ctx->rxq_idx = ++i; } i = ctx->txq_idx; while (ops->get_queue_stats_tx && i < netdev->real_num_tx_queues) { err = netdev_nl_stats_queue(netdev, rsp, NETDEV_QUEUE_TYPE_TX, i, info); if (err) return err; ctx->txq_idx = ++i; } ctx->rxq_idx = 0; ctx->txq_idx = 0; return 0; } /** * netdev_stat_queue_sum() - add up queue stats from range of queues * @netdev: net_device * @rx_start: index of the first Rx queue to query * @rx_end: index after the last Rx queue (first *not* to query) * @rx_sum: output Rx stats, should be already initialized * @tx_start: index of the first Tx queue to query * @tx_end: index after the last Tx queue (first *not* to query) * @tx_sum: output Tx stats, should be already initialized * * Add stats from [start, end) range of queue IDs to *x_sum structs. * The sum structs must be already initialized. Usually this * helper is invoked from the .get_base_stats callbacks of drivers * to account for stats of disabled queues. In that case the ranges * are usually [netdev->real_num_*x_queues, netdev->num_*x_queues). */ void netdev_stat_queue_sum(struct net_device *netdev, int rx_start, int rx_end, struct netdev_queue_stats_rx *rx_sum, int tx_start, int tx_end, struct netdev_queue_stats_tx *tx_sum) { const struct netdev_stat_ops *ops; struct netdev_queue_stats_rx rx; struct netdev_queue_stats_tx tx; int i; ops = netdev->stat_ops; for (i = rx_start; i < rx_end; i++) { memset(&rx, 0xff, sizeof(rx)); if (ops->get_queue_stats_rx) ops->get_queue_stats_rx(netdev, i, &rx); netdev_nl_stats_add(rx_sum, &rx, sizeof(rx)); } for (i = tx_start; i < tx_end; i++) { memset(&tx, 0xff, sizeof(tx)); if (ops->get_queue_stats_tx) ops->get_queue_stats_tx(netdev, i, &tx); netdev_nl_stats_add(tx_sum, &tx, sizeof(tx)); } } EXPORT_SYMBOL(netdev_stat_queue_sum); static int netdev_nl_stats_by_netdev(struct net_device *netdev, struct sk_buff *rsp, const struct genl_info *info) { struct netdev_queue_stats_rx rx_sum; struct netdev_queue_stats_tx tx_sum; void *hdr; /* Netdev can't guarantee any complete counters */ if (!netdev->stat_ops->get_base_stats) return 0; memset(&rx_sum, 0xff, sizeof(rx_sum)); memset(&tx_sum, 0xff, sizeof(tx_sum)); netdev->stat_ops->get_base_stats(netdev, &rx_sum, &tx_sum); /* The op was there, but nothing reported, don't bother */ if (!memchr_inv(&rx_sum, 0xff, sizeof(rx_sum)) && !memchr_inv(&tx_sum, 0xff, sizeof(tx_sum))) return 0; hdr = genlmsg_iput(rsp, info); if (!hdr) return -EMSGSIZE; if (nla_put_u32(rsp, NETDEV_A_QSTATS_IFINDEX, netdev->ifindex)) goto nla_put_failure; netdev_stat_queue_sum(netdev, 0, netdev->real_num_rx_queues, &rx_sum, 0, netdev->real_num_tx_queues, &tx_sum); if (netdev_nl_stats_write_rx(rsp, &rx_sum) || netdev_nl_stats_write_tx(rsp, &tx_sum)) goto nla_put_failure; genlmsg_end(rsp, hdr); return 0; nla_put_failure: genlmsg_cancel(rsp, hdr); return -EMSGSIZE; } static int netdev_nl_qstats_get_dump_one(struct net_device *netdev, unsigned int scope, struct sk_buff *skb, const struct genl_info *info, struct netdev_nl_dump_ctx *ctx) { if (!netdev->stat_ops) return 0; switch (scope) { case 0: return netdev_nl_stats_by_netdev(netdev, skb, info); case NETDEV_QSTATS_SCOPE_QUEUE: return netdev_nl_stats_by_queue(netdev, skb, info, ctx); } return -EINVAL; /* Should not happen, per netlink policy */ } int netdev_nl_qstats_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb) { struct netdev_nl_dump_ctx *ctx = netdev_dump_ctx(cb); const struct genl_info *info = genl_info_dump(cb); struct net *net = sock_net(skb->sk); struct net_device *netdev; unsigned int ifindex; unsigned int scope; int err = 0; scope = 0; if (info->attrs[NETDEV_A_QSTATS_SCOPE]) scope = nla_get_uint(info->attrs[NETDEV_A_QSTATS_SCOPE]); ifindex = 0; if (info->attrs[NETDEV_A_QSTATS_IFINDEX]) ifindex = nla_get_u32(info->attrs[NETDEV_A_QSTATS_IFINDEX]); if (ifindex) { netdev = netdev_get_by_index_lock_ops_compat(net, ifindex); if (!netdev) { NL_SET_BAD_ATTR(info->extack, info->attrs[NETDEV_A_QSTATS_IFINDEX]); return -ENODEV; } if (netdev->stat_ops) { err = netdev_nl_qstats_get_dump_one(netdev, scope, skb, info, ctx); } else { NL_SET_BAD_ATTR(info->extack, info->attrs[NETDEV_A_QSTATS_IFINDEX]); err = -EOPNOTSUPP; } netdev_unlock_ops_compat(netdev); return err; } for_each_netdev_lock_ops_compat_scoped(net, netdev, ctx->ifindex) { err = netdev_nl_qstats_get_dump_one(netdev, scope, skb, info, ctx); if (err < 0) break; } return err; } int netdev_nl_bind_rx_doit(struct sk_buff *skb, struct genl_info *info) { struct nlattr *tb[ARRAY_SIZE(netdev_queue_id_nl_policy)]; struct net_devmem_dmabuf_binding *binding; u32 ifindex, dmabuf_fd, rxq_idx; struct netdev_nl_sock *priv; struct net_device *netdev; struct sk_buff *rsp; struct nlattr *attr; int rem, err = 0; void *hdr; if (GENL_REQ_ATTR_CHECK(info, NETDEV_A_DEV_IFINDEX) || GENL_REQ_ATTR_CHECK(info, NETDEV_A_DMABUF_FD) || GENL_REQ_ATTR_CHECK(info, NETDEV_A_DMABUF_QUEUES)) return -EINVAL; ifindex = nla_get_u32(info->attrs[NETDEV_A_DEV_IFINDEX]); dmabuf_fd = nla_get_u32(info->attrs[NETDEV_A_DMABUF_FD]); priv = genl_sk_priv_get(&netdev_nl_family, NETLINK_CB(skb).sk); if (IS_ERR(priv)) return PTR_ERR(priv); rsp = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!rsp) return -ENOMEM; hdr = genlmsg_iput(rsp, info); if (!hdr) { err = -EMSGSIZE; goto err_genlmsg_free; } mutex_lock(&priv->lock); err = 0; netdev = netdev_get_by_index_lock(genl_info_net(info), ifindex); if (!netdev) { err = -ENODEV; goto err_unlock_sock; } if (!netif_device_present(netdev)) err = -ENODEV; else if (!netdev_need_ops_lock(netdev)) err = -EOPNOTSUPP; if (err) { NL_SET_BAD_ATTR(info->extack, info->attrs[NETDEV_A_DEV_IFINDEX]); goto err_unlock; } binding = net_devmem_bind_dmabuf(netdev, DMA_FROM_DEVICE, dmabuf_fd, priv, info->extack); if (IS_ERR(binding)) { err = PTR_ERR(binding); goto err_unlock; } nla_for_each_attr_type(attr, NETDEV_A_DMABUF_QUEUES, genlmsg_data(info->genlhdr), genlmsg_len(info->genlhdr), rem) { err = nla_parse_nested( tb, ARRAY_SIZE(netdev_queue_id_nl_policy) - 1, attr, netdev_queue_id_nl_policy, info->extack); if (err < 0) goto err_unbind; if (NL_REQ_ATTR_CHECK(info->extack, attr, tb, NETDEV_A_QUEUE_ID) || NL_REQ_ATTR_CHECK(info->extack, attr, tb, NETDEV_A_QUEUE_TYPE)) { err = -EINVAL; goto err_unbind; } if (nla_get_u32(tb[NETDEV_A_QUEUE_TYPE]) != NETDEV_QUEUE_TYPE_RX) { NL_SET_BAD_ATTR(info->extack, tb[NETDEV_A_QUEUE_TYPE]); err = -EINVAL; goto err_unbind; } rxq_idx = nla_get_u32(tb[NETDEV_A_QUEUE_ID]); err = net_devmem_bind_dmabuf_to_queue(netdev, rxq_idx, binding, info->extack); if (err) goto err_unbind; } nla_put_u32(rsp, NETDEV_A_DMABUF_ID, binding->id); genlmsg_end(rsp, hdr); err = genlmsg_reply(rsp, info); if (err) goto err_unbind; netdev_unlock(netdev); mutex_unlock(&priv->lock); return 0; err_unbind: net_devmem_unbind_dmabuf(binding); err_unlock: netdev_unlock(netdev); err_unlock_sock: mutex_unlock(&priv->lock); err_genlmsg_free: nlmsg_free(rsp); return err; } int netdev_nl_bind_tx_doit(struct sk_buff *skb, struct genl_info *info) { struct net_devmem_dmabuf_binding *binding; struct netdev_nl_sock *priv; struct net_device *netdev; u32 ifindex, dmabuf_fd; struct sk_buff *rsp; int err = 0; void *hdr; if (GENL_REQ_ATTR_CHECK(info, NETDEV_A_DEV_IFINDEX) || GENL_REQ_ATTR_CHECK(info, NETDEV_A_DMABUF_FD)) return -EINVAL; ifindex = nla_get_u32(info->attrs[NETDEV_A_DEV_IFINDEX]); dmabuf_fd = nla_get_u32(info->attrs[NETDEV_A_DMABUF_FD]); priv = genl_sk_priv_get(&netdev_nl_family, NETLINK_CB(skb).sk); if (IS_ERR(priv)) return PTR_ERR(priv); rsp = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!rsp) return -ENOMEM; hdr = genlmsg_iput(rsp, info); if (!hdr) { err = -EMSGSIZE; goto err_genlmsg_free; } mutex_lock(&priv->lock); netdev = netdev_get_by_index_lock(genl_info_net(info), ifindex); if (!netdev) { err = -ENODEV; goto err_unlock_sock; } if (!netif_device_present(netdev)) { err = -ENODEV; goto err_unlock_netdev; } if (!netdev->netmem_tx) { err = -EOPNOTSUPP; NL_SET_ERR_MSG(info->extack, "Driver does not support netmem TX"); goto err_unlock_netdev; } binding = net_devmem_bind_dmabuf(netdev, DMA_TO_DEVICE, dmabuf_fd, priv, info->extack); if (IS_ERR(binding)) { err = PTR_ERR(binding); goto err_unlock_netdev; } nla_put_u32(rsp, NETDEV_A_DMABUF_ID, binding->id); genlmsg_end(rsp, hdr); netdev_unlock(netdev); mutex_unlock(&priv->lock); return genlmsg_reply(rsp, info); err_unlock_netdev: netdev_unlock(netdev); err_unlock_sock: mutex_unlock(&priv->lock); err_genlmsg_free: nlmsg_free(rsp); return err; } void netdev_nl_sock_priv_init(struct netdev_nl_sock *priv) { INIT_LIST_HEAD(&priv->bindings); mutex_init(&priv->lock); } void netdev_nl_sock_priv_destroy(struct netdev_nl_sock *priv) { struct net_devmem_dmabuf_binding *binding; struct net_devmem_dmabuf_binding *temp; netdevice_tracker dev_tracker; struct net_device *dev; mutex_lock(&priv->lock); list_for_each_entry_safe(binding, temp, &priv->bindings, list) { mutex_lock(&binding->lock); dev = binding->dev; if (!dev) { mutex_unlock(&binding->lock); net_devmem_unbind_dmabuf(binding); continue; } netdev_hold(dev, &dev_tracker, GFP_KERNEL); mutex_unlock(&binding->lock); netdev_lock(dev); net_devmem_unbind_dmabuf(binding); netdev_unlock(dev); netdev_put(dev, &dev_tracker); } mutex_unlock(&priv->lock); } static int netdev_genl_netdevice_event(struct notifier_block *nb, unsigned long event, void *ptr) { struct net_device *netdev = netdev_notifier_info_to_dev(ptr); switch (event) { case NETDEV_REGISTER: netdev_lock_ops_to_full(netdev); netdev_genl_dev_notify(netdev, NETDEV_CMD_DEV_ADD_NTF); netdev_unlock_full_to_ops(netdev); break; case NETDEV_UNREGISTER: netdev_lock(netdev); netdev_genl_dev_notify(netdev, NETDEV_CMD_DEV_DEL_NTF); netdev_unlock(netdev); break; case NETDEV_XDP_FEAT_CHANGE: netdev_genl_dev_notify(netdev, NETDEV_CMD_DEV_CHANGE_NTF); break; } return NOTIFY_OK; } static struct notifier_block netdev_genl_nb = { .notifier_call = netdev_genl_netdevice_event, }; static int __init netdev_genl_init(void) { int err; err = register_netdevice_notifier(&netdev_genl_nb); if (err) return err; err = genl_register_family(&netdev_nl_family); if (err) goto err_unreg_ntf; return 0; err_unreg_ntf: unregister_netdevice_notifier(&netdev_genl_nb); return err; } subsys_initcall(netdev_genl_init); |
| 5079 106 8014 105 60 8058 8068 8079 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 | // SPDX-License-Identifier: GPL-2.0 #include <linux/compiler.h> #include <linux/export.h> #include <linux/fault-inject-usercopy.h> #include <linux/kasan-checks.h> #include <linux/thread_info.h> #include <linux/uaccess.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/mm.h> #include <asm/byteorder.h> #include <asm/word-at-a-time.h> #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS #define IS_UNALIGNED(src, dst) 0 #else #define IS_UNALIGNED(src, dst) \ (((long) dst | (long) src) & (sizeof(long) - 1)) #endif /* * Do a strncpy, return length of string without final '\0'. * 'count' is the user-supplied count (return 'count' if we * hit it), 'max' is the address space maximum (and we return * -EFAULT if we hit it). */ static __always_inline long do_strncpy_from_user(char *dst, const char __user *src, unsigned long count, unsigned long max) { const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS; unsigned long res = 0; if (IS_UNALIGNED(src, dst)) goto byte_at_a_time; while (max >= sizeof(unsigned long)) { unsigned long c, data, mask; /* Fall back to byte-at-a-time if we get a page fault */ unsafe_get_user(c, (unsigned long __user *)(src+res), byte_at_a_time); /* * Note that we mask out the bytes following the NUL. This is * important to do because string oblivious code may read past * the NUL. For those routines, we don't want to give them * potentially random bytes after the NUL in `src`. * * One example of such code is BPF map keys. BPF treats map keys * as an opaque set of bytes. Without the post-NUL mask, any BPF * maps keyed by strings returned from strncpy_from_user() may * have multiple entries for semantically identical strings. */ if (has_zero(c, &data, &constants)) { data = prep_zero_mask(c, data, &constants); data = create_zero_mask(data); mask = zero_bytemask(data); *(unsigned long *)(dst+res) = c & mask; return res + find_zero(data); } *(unsigned long *)(dst+res) = c; res += sizeof(unsigned long); max -= sizeof(unsigned long); } byte_at_a_time: while (max) { char c; unsafe_get_user(c,src+res, efault); dst[res] = c; if (!c) return res; res++; max--; } /* * Uhhuh. We hit 'max'. But was that the user-specified maximum * too? If so, that's ok - we got as much as the user asked for. */ if (res >= count) return res; /* * Nope: we hit the address space limit, and we still had more * characters the caller would have wanted. That's an EFAULT. */ efault: return -EFAULT; } /** * strncpy_from_user: - Copy a NUL terminated string from userspace. * @dst: Destination address, in kernel space. This buffer must be at * least @count bytes long. * @src: Source address, in user space. * @count: Maximum number of bytes to copy, including the trailing NUL. * * Copies a NUL-terminated string from userspace to kernel space. * * On success, returns the length of the string (not including the trailing * NUL). * * If access to userspace fails, returns -EFAULT (some data may have been * copied). * * If @count is smaller than the length of the string, copies @count bytes * and returns @count. */ long strncpy_from_user(char *dst, const char __user *src, long count) { unsigned long max_addr, src_addr; might_fault(); if (should_fail_usercopy()) return -EFAULT; if (unlikely(count <= 0)) return 0; kasan_check_write(dst, count); check_object_size(dst, count, false); if (can_do_masked_user_access()) { long retval; src = masked_user_access_begin(src); retval = do_strncpy_from_user(dst, src, count, count); user_read_access_end(); return retval; } max_addr = TASK_SIZE_MAX; src_addr = (unsigned long)untagged_addr(src); if (likely(src_addr < max_addr)) { unsigned long max = max_addr - src_addr; long retval; /* * Truncate 'max' to the user-specified limit, so that * we only have one limit we need to check in the loop */ if (max > count) max = count; if (user_read_access_begin(src, max)) { retval = do_strncpy_from_user(dst, src, count, max); user_read_access_end(); return retval; } } return -EFAULT; } EXPORT_SYMBOL(strncpy_from_user); |
| 2 2 9 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 | /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_TTY_H #define _LINUX_TTY_H #include <linux/fs.h> #include <linux/major.h> #include <linux/termios.h> #include <linux/workqueue.h> #include <linux/tty_driver.h> #include <linux/tty_ldisc.h> #include <linux/tty_port.h> #include <linux/mutex.h> #include <linux/tty_flags.h> #include <uapi/linux/tty.h> #include <linux/rwsem.h> #include <linux/llist.h> /* * (Note: the *_driver.minor_start values 1, 64, 128, 192 are * hardcoded at present.) */ #define NR_UNIX98_PTY_DEFAULT 4096 /* Default maximum for Unix98 ptys */ #define NR_UNIX98_PTY_RESERVE 1024 /* Default reserve for main devpts */ #define NR_UNIX98_PTY_MAX (1 << MINORBITS) /* Absolute limit */ /* * This character is the same as _POSIX_VDISABLE: it cannot be used as * a c_cc[] character, but indicates that a particular special character * isn't in use (eg VINTR has no character etc) */ #define __DISABLED_CHAR '\0' #define INTR_CHAR(tty) ((tty)->termios.c_cc[VINTR]) #define QUIT_CHAR(tty) ((tty)->termios.c_cc[VQUIT]) #define ERASE_CHAR(tty) ((tty)->termios.c_cc[VERASE]) #define KILL_CHAR(tty) ((tty)->termios.c_cc[VKILL]) #define EOF_CHAR(tty) ((tty)->termios.c_cc[VEOF]) #define TIME_CHAR(tty) ((tty)->termios.c_cc[VTIME]) #define MIN_CHAR(tty) ((tty)->termios.c_cc[VMIN]) #define SWTC_CHAR(tty) ((tty)->termios.c_cc[VSWTC]) #define START_CHAR(tty) ((tty)->termios.c_cc[VSTART]) #define STOP_CHAR(tty) ((tty)->termios.c_cc[VSTOP]) #define SUSP_CHAR(tty) ((tty)->termios.c_cc[VSUSP]) #define EOL_CHAR(tty) ((tty)->termios.c_cc[VEOL]) #define REPRINT_CHAR(tty) ((tty)->termios.c_cc[VREPRINT]) #define DISCARD_CHAR(tty) ((tty)->termios.c_cc[VDISCARD]) #define WERASE_CHAR(tty) ((tty)->termios.c_cc[VWERASE]) #define LNEXT_CHAR(tty) ((tty)->termios.c_cc[VLNEXT]) #define EOL2_CHAR(tty) ((tty)->termios.c_cc[VEOL2]) #define _I_FLAG(tty, f) ((tty)->termios.c_iflag & (f)) #define _O_FLAG(tty, f) ((tty)->termios.c_oflag & (f)) #define _C_FLAG(tty, f) ((tty)->termios.c_cflag & (f)) #define _L_FLAG(tty, f) ((tty)->termios.c_lflag & (f)) #define I_IGNBRK(tty) _I_FLAG((tty), IGNBRK) #define I_BRKINT(tty) _I_FLAG((tty), BRKINT) #define I_IGNPAR(tty) _I_FLAG((tty), IGNPAR) #define I_PARMRK(tty) _I_FLAG((tty), PARMRK) #define I_INPCK(tty) _I_FLAG((tty), INPCK) #define I_ISTRIP(tty) _I_FLAG((tty), ISTRIP) #define I_INLCR(tty) _I_FLAG((tty), INLCR) #define I_IGNCR(tty) _I_FLAG((tty), IGNCR) #define I_ICRNL(tty) _I_FLAG((tty), ICRNL) #define I_IUCLC(tty) _I_FLAG((tty), IUCLC) #define I_IXON(tty) _I_FLAG((tty), IXON) #define I_IXANY(tty) _I_FLAG((tty), IXANY) #define I_IXOFF(tty) _I_FLAG((tty), IXOFF) #define I_IMAXBEL(tty) _I_FLAG((tty), IMAXBEL) #define I_IUTF8(tty) _I_FLAG((tty), IUTF8) #define O_OPOST(tty) _O_FLAG((tty), OPOST) #define O_OLCUC(tty) _O_FLAG((tty), OLCUC) #define O_ONLCR(tty) _O_FLAG((tty), ONLCR) #define O_OCRNL(tty) _O_FLAG((tty), OCRNL) #define O_ONOCR(tty) _O_FLAG((tty), ONOCR) #define O_ONLRET(tty) _O_FLAG((tty), ONLRET) #define O_OFILL(tty) _O_FLAG((tty), OFILL) #define O_OFDEL(tty) _O_FLAG((tty), OFDEL) #define O_NLDLY(tty) _O_FLAG((tty), NLDLY) #define O_CRDLY(tty) _O_FLAG((tty), CRDLY) #define O_TABDLY(tty) _O_FLAG((tty), TABDLY) #define O_BSDLY(tty) _O_FLAG((tty), BSDLY) #define O_VTDLY(tty) _O_FLAG((tty), VTDLY) #define O_FFDLY(tty) _O_FLAG((tty), FFDLY) #define C_BAUD(tty) _C_FLAG((tty), CBAUD) #define C_CSIZE(tty) _C_FLAG((tty), CSIZE) #define C_CSTOPB(tty) _C_FLAG((tty), CSTOPB) #define C_CREAD(tty) _C_FLAG((tty), CREAD) #define C_PARENB(tty) _C_FLAG((tty), PARENB) #define C_PARODD(tty) _C_FLAG((tty), PARODD) #define C_HUPCL(tty) _C_FLAG((tty), HUPCL) #define C_CLOCAL(tty) _C_FLAG((tty), CLOCAL) #define C_CIBAUD(tty) _C_FLAG((tty), CIBAUD) #define C_CRTSCTS(tty) _C_FLAG((tty), CRTSCTS) #define C_CMSPAR(tty) _C_FLAG((tty), CMSPAR) #define L_ISIG(tty) _L_FLAG((tty), ISIG) #define L_ICANON(tty) _L_FLAG((tty), ICANON) #define L_XCASE(tty) _L_FLAG((tty), XCASE) #define L_ECHO(tty) _L_FLAG((tty), ECHO) #define L_ECHOE(tty) _L_FLAG((tty), ECHOE) #define L_ECHOK(tty) _L_FLAG((tty), ECHOK) #define L_ECHONL(tty) _L_FLAG((tty), ECHONL) #define L_NOFLSH(tty) _L_FLAG((tty), NOFLSH) #define L_TOSTOP(tty) _L_FLAG((tty), TOSTOP) #define L_ECHOCTL(tty) _L_FLAG((tty), ECHOCTL) #define L_ECHOPRT(tty) _L_FLAG((tty), ECHOPRT) #define L_ECHOKE(tty) _L_FLAG((tty), ECHOKE) #define L_FLUSHO(tty) _L_FLAG((tty), FLUSHO) #define L_PENDIN(tty) _L_FLAG((tty), PENDIN) #define L_IEXTEN(tty) _L_FLAG((tty), IEXTEN) #define L_EXTPROC(tty) _L_FLAG((tty), EXTPROC) struct device; struct signal_struct; struct tty_operations; /** * struct tty_struct - state associated with a tty while open * * @kref: reference counting by tty_kref_get() and tty_kref_put(), reaching zero * frees the structure * @dev: class device or %NULL (e.g. ptys, serdev) * @driver: &struct tty_driver operating this tty * @ops: &struct tty_operations of @driver for this tty (open, close, etc.) * @index: index of this tty (e.g. to construct @name like tty12) * @ldisc_sem: protects line discipline changes (@ldisc) -- lock tty not pty * @ldisc: the current line discipline for this tty (n_tty by default) * @atomic_write_lock: protects against concurrent writers, i.e. locks * @write_cnt, @write_buf and similar * @legacy_mutex: leftover from history (BKL -> BTM -> @legacy_mutex), * protecting several operations on this tty * @throttle_mutex: protects against concurrent tty_throttle_safe() and * tty_unthrottle_safe() (but not tty_unthrottle()) * @termios_rwsem: protects @termios and @termios_locked * @winsize_mutex: protects @winsize * @termios: termios for the current tty, copied from/to @driver.termios * @termios_locked: locked termios (by %TIOCGLCKTRMIOS and %TIOCSLCKTRMIOS * ioctls) * @name: name of the tty constructed by tty_line_name() (e.g. ttyS3) * @flags: bitwise OR of %TTY_THROTTLED, %TTY_IO_ERROR, ... * @count: count of open processes, reaching zero cancels all the work for * this tty and drops a @kref too (but does not free this tty) * @winsize: size of the terminal "window" (cf. @winsize_mutex) * @flow: flow settings grouped together * @flow.lock: lock for @flow members * @flow.stopped: tty stopped/started by stop_tty()/start_tty() * @flow.tco_stopped: tty stopped/started by %TCOOFF/%TCOON ioctls (it has * precedence over @flow.stopped) * @ctrl: control settings grouped together * @ctrl.lock: lock for @ctrl members * @ctrl.pgrp: process group of this tty (setpgrp(2)) * @ctrl.session: session of this tty (setsid(2)). Writes are protected by both * @ctrl.lock and @legacy_mutex, readers must use at least one of * them. * @ctrl.pktstatus: packet mode status (bitwise OR of %TIOCPKT_ constants) * @ctrl.packet: packet mode enabled * @hw_stopped: not controlled by the tty layer, under @driver's control for CTS * handling * @receive_room: bytes permitted to feed to @ldisc without any being lost * @flow_change: controls behavior of throttling, see tty_throttle_safe() and * tty_unthrottle_safe() * @link: link to another pty (master -> slave and vice versa) * @fasync: state for %O_ASYNC (for %SIGIO); managed by fasync_helper() * @write_wait: concurrent writers are waiting in this queue until they are * allowed to write * @read_wait: readers wait for data in this queue * @hangup_work: normally a work to perform a hangup (do_tty_hangup()); while * freeing the tty, (re)used to release_one_tty() * @disc_data: pointer to @ldisc's private data (e.g. to &struct n_tty_data) * @driver_data: pointer to @driver's private data (e.g. &struct uart_state) * @files_lock: protects @tty_files list * @tty_files: list of (re)openers of this tty (i.e. linked &struct * tty_file_private) * @closing: when set during close, n_tty processes only START & STOP chars * @write_buf: temporary buffer used during tty_write() to copy user data to * @write_cnt: count of bytes written in tty_write() to @write_buf * @SAK_work: if the tty has a pending do_SAK, it is queued here * @port: persistent storage for this device (i.e. &struct tty_port) * * All of the state associated with a tty while the tty is open. Persistent * storage for tty devices is referenced here as @port and is documented in * &struct tty_port. */ struct tty_struct { struct kref kref; int index; struct device *dev; struct tty_driver *driver; struct tty_port *port; const struct tty_operations *ops; struct tty_ldisc *ldisc; struct ld_semaphore ldisc_sem; struct mutex atomic_write_lock; struct mutex legacy_mutex; struct mutex throttle_mutex; struct rw_semaphore termios_rwsem; struct mutex winsize_mutex; struct ktermios termios, termios_locked; char name[64]; unsigned long flags; int count; unsigned int receive_room; struct winsize winsize; struct { spinlock_t lock; bool stopped; bool tco_stopped; } flow; struct { struct pid *pgrp; struct pid *session; spinlock_t lock; unsigned char pktstatus; bool packet; } ctrl; bool hw_stopped; bool closing; int flow_change; struct tty_struct *link; struct fasync_struct *fasync; wait_queue_head_t write_wait; wait_queue_head_t read_wait; struct work_struct hangup_work; void *disc_data; void *driver_data; spinlock_t files_lock; int write_cnt; u8 *write_buf; struct list_head tty_files; struct work_struct SAK_work; } __randomize_layout; /* Each of a tty's open files has private_data pointing to tty_file_private */ struct tty_file_private { struct tty_struct *tty; struct file *file; struct list_head list; }; /** * enum tty_struct_flags - TTY Struct Flags * * These bits are used in the :c:member:`tty_struct.flags` field. * * So that interrupts won't be able to mess up the queues, * copy_to_cooked must be atomic with respect to itself, as must * tty->write. Thus, you must use the inline functions set_bit() and * clear_bit() to make things atomic. * * @TTY_THROTTLED: * Driver input is throttled. The ldisc should call * :c:member:`tty_driver.unthrottle()` in order to resume reception when * it is ready to process more data (at threshold min). * * @TTY_IO_ERROR: * If set, causes all subsequent userspace read/write calls on the tty to * fail, returning -%EIO. (May be no ldisc too.) * * @TTY_OTHER_CLOSED: * Device is a pty and the other side has closed. * * @TTY_EXCLUSIVE: * Exclusive open mode (a single opener). * * @TTY_DO_WRITE_WAKEUP: * If set, causes the driver to call the * :c:member:`tty_ldisc_ops.write_wakeup()` method in order to resume * transmission when it can accept more data to transmit. * * @TTY_LDISC_OPEN: * Indicates that a line discipline is open. For debugging purposes only. * * @TTY_PTY_LOCK: * A flag private to pty code to implement %TIOCSPTLCK/%TIOCGPTLCK logic. * * @TTY_NO_WRITE_SPLIT: * Prevent driver from splitting up writes into smaller chunks (preserve * write boundaries to driver). * * @TTY_HUPPED: * The TTY was hung up. This is set post :c:member:`tty_driver.hangup()`. * * @TTY_HUPPING: * The TTY is in the process of hanging up to abort potential readers. * * @TTY_LDISC_CHANGING: * Line discipline for this TTY is being changed. I/O should not block * when this is set. Use tty_io_nonblock() to check. * * @TTY_LDISC_HALTED: * Line discipline for this TTY was stopped. No work should be queued to * this ldisc. */ enum tty_struct_flags { TTY_THROTTLED, TTY_IO_ERROR, TTY_OTHER_CLOSED, TTY_EXCLUSIVE, TTY_DO_WRITE_WAKEUP, TTY_LDISC_OPEN, TTY_PTY_LOCK, TTY_NO_WRITE_SPLIT, TTY_HUPPED, TTY_HUPPING, TTY_LDISC_CHANGING, TTY_LDISC_HALTED, }; static inline bool tty_io_nonblock(struct tty_struct *tty, struct file *file) { return file->f_flags & O_NONBLOCK || test_bit(TTY_LDISC_CHANGING, &tty->flags); } static inline bool tty_io_error(struct tty_struct *tty) { return test_bit(TTY_IO_ERROR, &tty->flags); } static inline bool tty_throttled(struct tty_struct *tty) { return test_bit(TTY_THROTTLED, &tty->flags); } #ifdef CONFIG_TTY void tty_kref_put(struct tty_struct *tty); struct pid *tty_get_pgrp(struct tty_struct *tty); void tty_vhangup_self(void); void disassociate_ctty(int priv); dev_t tty_devnum(struct tty_struct *tty); void proc_clear_tty(struct task_struct *p); struct tty_struct *get_current_tty(void); /* tty_io.c */ int __init tty_init(void); const char *tty_name(const struct tty_struct *tty); struct tty_struct *tty_kopen_exclusive(dev_t device); struct tty_struct *tty_kopen_shared(dev_t device); void tty_kclose(struct tty_struct *tty); int tty_dev_name_to_number(const char *name, dev_t *number); #else static inline void tty_kref_put(struct tty_struct *tty) { } static inline struct pid *tty_get_pgrp(struct tty_struct *tty) { return NULL; } static inline void tty_vhangup_self(void) { } static inline void disassociate_ctty(int priv) { } static inline dev_t tty_devnum(struct tty_struct *tty) { return 0; } static inline void proc_clear_tty(struct task_struct *p) { } static inline struct tty_struct *get_current_tty(void) { return NULL; } /* tty_io.c */ static inline int __init tty_init(void) { return 0; } static inline const char *tty_name(const struct tty_struct *tty) { return "(none)"; } static inline struct tty_struct *tty_kopen_exclusive(dev_t device) { return ERR_PTR(-ENODEV); } static inline void tty_kclose(struct tty_struct *tty) { } static inline int tty_dev_name_to_number(const char *name, dev_t *number) { return -ENOTSUPP; } #endif extern struct ktermios tty_std_termios; int vcs_init(void); extern const struct class tty_class; /** * tty_kref_get - get a tty reference * @tty: tty device * * Returns: a new reference to a tty object * * Locking: The caller must hold sufficient locks/counts to ensure that their * existing reference cannot go away. */ static inline struct tty_struct *tty_kref_get(struct tty_struct *tty) { if (tty) kref_get(&tty->kref); return tty; } const char *tty_driver_name(const struct tty_struct *tty); void tty_wait_until_sent(struct tty_struct *tty, long timeout); void stop_tty(struct tty_struct *tty); void start_tty(struct tty_struct *tty); void tty_write_message(struct tty_struct *tty, char *msg); int tty_send_xchar(struct tty_struct *tty, u8 ch); int tty_put_char(struct tty_struct *tty, u8 c); unsigned int tty_chars_in_buffer(struct tty_struct *tty); unsigned int tty_write_room(struct tty_struct *tty); void tty_driver_flush_buffer(struct tty_struct *tty); void tty_unthrottle(struct tty_struct *tty); bool tty_throttle_safe(struct tty_struct *tty); bool tty_unthrottle_safe(struct tty_struct *tty); int tty_do_resize(struct tty_struct *tty, struct winsize *ws); int tty_get_icount(struct tty_struct *tty, struct serial_icounter_struct *icount); int tty_get_tiocm(struct tty_struct *tty); int is_current_pgrp_orphaned(void); void tty_hangup(struct tty_struct *tty); void tty_vhangup(struct tty_struct *tty); int tty_hung_up_p(struct file *filp); void do_SAK(struct tty_struct *tty); void __do_SAK(struct tty_struct *tty); void no_tty(void); speed_t tty_termios_baud_rate(const struct ktermios *termios); void tty_termios_encode_baud_rate(struct ktermios *termios, speed_t ibaud, speed_t obaud); void tty_encode_baud_rate(struct tty_struct *tty, speed_t ibaud, speed_t obaud); /** * tty_get_baud_rate - get tty bit rates * @tty: tty to query * * Returns: the baud rate as an integer for this terminal * * Locking: The termios lock must be held by the caller. */ static inline speed_t tty_get_baud_rate(const struct tty_struct *tty) { return tty_termios_baud_rate(&tty->termios); } unsigned char tty_get_char_size(unsigned int cflag); unsigned char tty_get_frame_size(unsigned int cflag); void tty_termios_copy_hw(struct ktermios *new, const struct ktermios *old); bool tty_termios_hw_change(const struct ktermios *a, const struct ktermios *b); int tty_set_termios(struct tty_struct *tty, struct ktermios *kt); void tty_wakeup(struct tty_struct *tty); int tty_mode_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg); int tty_perform_flush(struct tty_struct *tty, unsigned long arg); struct tty_struct *tty_init_dev(struct tty_driver *driver, int idx); void tty_release_struct(struct tty_struct *tty, int idx); void tty_init_termios(struct tty_struct *tty); void tty_save_termios(struct tty_struct *tty); int tty_standard_install(struct tty_driver *driver, struct tty_struct *tty); extern struct mutex tty_mutex; /* n_tty.c */ void n_tty_inherit_ops(struct tty_ldisc_ops *ops); #ifdef CONFIG_TTY void __init n_tty_init(void); #else static inline void n_tty_init(void) { } #endif /* tty_audit.c */ #ifdef CONFIG_AUDIT void tty_audit_exit(void); void tty_audit_fork(struct signal_struct *sig); int tty_audit_push(void); #else static inline void tty_audit_exit(void) { } static inline void tty_audit_fork(struct signal_struct *sig) { } static inline int tty_audit_push(void) { return 0; } #endif /* tty_ioctl.c */ int n_tty_ioctl_helper(struct tty_struct *tty, unsigned int cmd, unsigned long arg); /* vt.c */ int vt_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg); long vt_compat_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg); /* tty_mutex.c */ /* functions for preparation of BKL removal */ void tty_lock(struct tty_struct *tty); int tty_lock_interruptible(struct tty_struct *tty); void tty_unlock(struct tty_struct *tty); void tty_lock_slave(struct tty_struct *tty); void tty_unlock_slave(struct tty_struct *tty); void tty_set_lock_subclass(struct tty_struct *tty); #endif |
| 1 3 2 4 9 9 6 1 4 3 1 1 1 6 2 1 1 1 7 5 1 1 3 1 1 1 1 1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 | // SPDX-License-Identifier: GPL-2.0-only /* fs/fat/nfs.c */ #include <linux/exportfs.h> #include "fat.h" struct fat_fid { u32 i_gen; u32 i_pos_low; u16 i_pos_hi; u16 parent_i_pos_hi; u32 parent_i_pos_low; u32 parent_i_gen; }; #define FAT_FID_SIZE_WITHOUT_PARENT 3 #define FAT_FID_SIZE_WITH_PARENT (sizeof(struct fat_fid)/sizeof(u32)) /* * Look up a directory inode given its starting cluster. */ static struct inode *fat_dget(struct super_block *sb, int i_logstart) { struct msdos_sb_info *sbi = MSDOS_SB(sb); struct hlist_head *head; struct msdos_inode_info *i; struct inode *inode = NULL; head = sbi->dir_hashtable + fat_dir_hash(i_logstart); spin_lock(&sbi->dir_hash_lock); hlist_for_each_entry(i, head, i_dir_hash) { BUG_ON(i->vfs_inode.i_sb != sb); if (i->i_logstart != i_logstart) continue; inode = igrab(&i->vfs_inode); if (inode) break; } spin_unlock(&sbi->dir_hash_lock); return inode; } static struct inode *fat_ilookup(struct super_block *sb, u64 ino, loff_t i_pos) { if (MSDOS_SB(sb)->options.nfs == FAT_NFS_NOSTALE_RO) return fat_iget(sb, i_pos); else { if ((ino < MSDOS_ROOT_INO) || (ino == MSDOS_FSINFO_INO)) return NULL; return ilookup(sb, ino); } } static struct inode *__fat_nfs_get_inode(struct super_block *sb, u64 ino, u32 generation, loff_t i_pos) { struct inode *inode = fat_ilookup(sb, ino, i_pos); if (inode && generation && (inode->i_generation != generation)) { iput(inode); inode = NULL; } if (inode == NULL && MSDOS_SB(sb)->options.nfs == FAT_NFS_NOSTALE_RO) { struct buffer_head *bh = NULL; struct msdos_dir_entry *de ; sector_t blocknr; int offset; fat_get_blknr_offset(MSDOS_SB(sb), i_pos, &blocknr, &offset); bh = sb_bread(sb, blocknr); if (!bh) { fat_msg(sb, KERN_ERR, "unable to read block(%llu) for building NFS inode", (llu)blocknr); return inode; } de = (struct msdos_dir_entry *)bh->b_data; /* If a file is deleted on server and client is not updated * yet, we must not build the inode upon a lookup call. */ if (IS_FREE(de[offset].name)) inode = NULL; else inode = fat_build_inode(sb, &de[offset], i_pos); brelse(bh); } return inode; } static struct inode *fat_nfs_get_inode(struct super_block *sb, u64 ino, u32 generation) { return __fat_nfs_get_inode(sb, ino, generation, 0); } static int fat_encode_fh_nostale(struct inode *inode, __u32 *fh, int *lenp, struct inode *parent) { int len = *lenp; struct msdos_sb_info *sbi = MSDOS_SB(inode->i_sb); struct fat_fid *fid = (struct fat_fid *) fh; loff_t i_pos; int type = FILEID_FAT_WITHOUT_PARENT; if (parent) { if (len < FAT_FID_SIZE_WITH_PARENT) { *lenp = FAT_FID_SIZE_WITH_PARENT; return FILEID_INVALID; } } else { if (len < FAT_FID_SIZE_WITHOUT_PARENT) { *lenp = FAT_FID_SIZE_WITHOUT_PARENT; return FILEID_INVALID; } } i_pos = fat_i_pos_read(sbi, inode); *lenp = FAT_FID_SIZE_WITHOUT_PARENT; fid->i_gen = inode->i_generation; fid->i_pos_low = i_pos & 0xFFFFFFFF; fid->i_pos_hi = (i_pos >> 32) & 0xFFFF; if (parent) { i_pos = fat_i_pos_read(sbi, parent); fid->parent_i_pos_hi = (i_pos >> 32) & 0xFFFF; fid->parent_i_pos_low = i_pos & 0xFFFFFFFF; fid->parent_i_gen = parent->i_generation; type = FILEID_FAT_WITH_PARENT; *lenp = FAT_FID_SIZE_WITH_PARENT; } else { /* * We need to initialize this field because the fh is actually * 12 bytes long */ fid->parent_i_pos_hi = 0; } return type; } /* * Map a NFS file handle to a corresponding dentry. * The dentry may or may not be connected to the filesystem root. */ static struct dentry *fat_fh_to_dentry(struct super_block *sb, struct fid *fid, int fh_len, int fh_type) { return generic_fh_to_dentry(sb, fid, fh_len, fh_type, fat_nfs_get_inode); } static struct dentry *fat_fh_to_dentry_nostale(struct super_block *sb, struct fid *fh, int fh_len, int fh_type) { struct inode *inode = NULL; struct fat_fid *fid = (struct fat_fid *)fh; loff_t i_pos; switch (fh_type) { case FILEID_FAT_WITHOUT_PARENT: if (fh_len < FAT_FID_SIZE_WITHOUT_PARENT) return NULL; break; case FILEID_FAT_WITH_PARENT: if (fh_len < FAT_FID_SIZE_WITH_PARENT) return NULL; break; default: return NULL; } i_pos = fid->i_pos_hi; i_pos = (i_pos << 32) | (fid->i_pos_low); inode = __fat_nfs_get_inode(sb, 0, fid->i_gen, i_pos); return d_obtain_alias(inode); } /* * Find the parent for a file specified by NFS handle. * This requires that the handle contain the i_ino of the parent. */ static struct dentry *fat_fh_to_parent(struct super_block *sb, struct fid *fid, int fh_len, int fh_type) { return generic_fh_to_parent(sb, fid, fh_len, fh_type, fat_nfs_get_inode); } static struct dentry *fat_fh_to_parent_nostale(struct super_block *sb, struct fid *fh, int fh_len, int fh_type) { struct inode *inode = NULL; struct fat_fid *fid = (struct fat_fid *)fh; loff_t i_pos; if (fh_len < FAT_FID_SIZE_WITH_PARENT) return NULL; switch (fh_type) { case FILEID_FAT_WITH_PARENT: i_pos = fid->parent_i_pos_hi; i_pos = (i_pos << 32) | (fid->parent_i_pos_low); inode = __fat_nfs_get_inode(sb, 0, fid->parent_i_gen, i_pos); break; } return d_obtain_alias(inode); } /* * Rebuild the parent for a directory that is not connected * to the filesystem root */ static struct inode *fat_rebuild_parent(struct super_block *sb, int parent_logstart) { int search_clus, clus_to_match; struct msdos_dir_entry *de; struct inode *parent = NULL; struct inode *dummy_grand_parent = NULL; struct fat_slot_info sinfo; struct msdos_sb_info *sbi = MSDOS_SB(sb); sector_t blknr = fat_clus_to_blknr(sbi, parent_logstart); struct buffer_head *parent_bh = sb_bread(sb, blknr); if (!parent_bh) { fat_msg(sb, KERN_ERR, "unable to read cluster of parent directory"); return NULL; } de = (struct msdos_dir_entry *) parent_bh->b_data; clus_to_match = fat_get_start(sbi, &de[0]); search_clus = fat_get_start(sbi, &de[1]); dummy_grand_parent = fat_dget(sb, search_clus); if (!dummy_grand_parent) { dummy_grand_parent = new_inode(sb); if (!dummy_grand_parent) { brelse(parent_bh); return parent; } dummy_grand_parent->i_ino = iunique(sb, MSDOS_ROOT_INO); fat_fill_inode(dummy_grand_parent, &de[1]); MSDOS_I(dummy_grand_parent)->i_pos = -1; } if (!fat_scan_logstart(dummy_grand_parent, clus_to_match, &sinfo)) parent = fat_build_inode(sb, sinfo.de, sinfo.i_pos); brelse(parent_bh); iput(dummy_grand_parent); return parent; } /* * Find the parent for a directory that is not currently connected to * the filesystem root. * * On entry, the caller holds d_inode(child_dir)->i_mutex. */ static struct dentry *fat_get_parent(struct dentry *child_dir) { struct super_block *sb = child_dir->d_sb; struct buffer_head *bh = NULL; struct msdos_dir_entry *de; struct inode *parent_inode = NULL; struct msdos_sb_info *sbi = MSDOS_SB(sb); if (!fat_get_dotdot_entry(d_inode(child_dir), &bh, &de)) { int parent_logstart = fat_get_start(sbi, de); parent_inode = fat_dget(sb, parent_logstart); if (!parent_inode && sbi->options.nfs == FAT_NFS_NOSTALE_RO) parent_inode = fat_rebuild_parent(sb, parent_logstart); } brelse(bh); return d_obtain_alias(parent_inode); } const struct export_operations fat_export_ops = { .encode_fh = generic_encode_ino32_fh, .fh_to_dentry = fat_fh_to_dentry, .fh_to_parent = fat_fh_to_parent, .get_parent = fat_get_parent, }; const struct export_operations fat_export_ops_nostale = { .encode_fh = fat_encode_fh_nostale, .fh_to_dentry = fat_fh_to_dentry_nostale, .fh_to_parent = fat_fh_to_parent_nostale, .get_parent = fat_get_parent, }; |
| 45 18 1 54 11 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 | /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _BCACHEFS_KEYLIST_H #define _BCACHEFS_KEYLIST_H #include "keylist_types.h" int bch2_keylist_realloc(struct keylist *, u64 *, size_t, size_t); void bch2_keylist_pop_front(struct keylist *); static inline void bch2_keylist_init(struct keylist *l, u64 *inline_keys) { l->top_p = l->keys_p = inline_keys; } static inline void bch2_keylist_free(struct keylist *l, u64 *inline_keys) { if (l->keys_p != inline_keys) kfree(l->keys_p); } static inline void bch2_keylist_push(struct keylist *l) { l->top = bkey_next(l->top); } static inline void bch2_keylist_add(struct keylist *l, const struct bkey_i *k) { bkey_copy(l->top, k); bch2_keylist_push(l); } static inline bool bch2_keylist_empty(struct keylist *l) { return l->top == l->keys; } static inline size_t bch2_keylist_u64s(struct keylist *l) { return l->top_p - l->keys_p; } static inline size_t bch2_keylist_bytes(struct keylist *l) { return bch2_keylist_u64s(l) * sizeof(u64); } static inline struct bkey_i *bch2_keylist_front(struct keylist *l) { return l->keys; } #define for_each_keylist_key(_keylist, _k) \ for (struct bkey_i *_k = (_keylist)->keys; \ _k != (_keylist)->top; \ _k = bkey_next(_k)) static inline u64 keylist_sectors(struct keylist *keys) { u64 ret = 0; for_each_keylist_key(keys, k) ret += k->k.size; return ret; } #ifdef CONFIG_BCACHEFS_DEBUG void bch2_verify_keylist_sorted(struct keylist *); #else static inline void bch2_verify_keylist_sorted(struct keylist *l) {} #endif #endif /* _BCACHEFS_KEYLIST_H */ |
| 2 2 2 2 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 | // SPDX-License-Identifier: GPL-2.0 /* * Framework for userspace DMA-BUF allocations * * Copyright (C) 2011 Google, Inc. * Copyright (C) 2019 Linaro Ltd. */ #include <linux/cdev.h> #include <linux/device.h> #include <linux/dma-buf.h> #include <linux/dma-heap.h> #include <linux/err.h> #include <linux/list.h> #include <linux/nospec.h> #include <linux/syscalls.h> #include <linux/uaccess.h> #include <linux/xarray.h> #include <uapi/linux/dma-heap.h> #define DEVNAME "dma_heap" #define NUM_HEAP_MINORS 128 /** * struct dma_heap - represents a dmabuf heap in the system * @name: used for debugging/device-node name * @ops: ops struct for this heap * @priv: private data for this heap * @heap_devt: heap device node * @list: list head connecting to list of heaps * @heap_cdev: heap char device * * Represents a heap of memory from which buffers can be made. */ struct dma_heap { const char *name; const struct dma_heap_ops *ops; void *priv; dev_t heap_devt; struct list_head list; struct cdev heap_cdev; }; static LIST_HEAD(heap_list); static DEFINE_MUTEX(heap_list_lock); static dev_t dma_heap_devt; static struct class *dma_heap_class; static DEFINE_XARRAY_ALLOC(dma_heap_minors); static int dma_heap_buffer_alloc(struct dma_heap *heap, size_t len, u32 fd_flags, u64 heap_flags) { struct dma_buf *dmabuf; int fd; /* * Allocations from all heaps have to begin * and end on page boundaries. */ len = PAGE_ALIGN(len); if (!len) return -EINVAL; dmabuf = heap->ops->allocate(heap, len, fd_flags, heap_flags); if (IS_ERR(dmabuf)) return PTR_ERR(dmabuf); fd = dma_buf_fd(dmabuf, fd_flags); if (fd < 0) { dma_buf_put(dmabuf); /* just return, as put will call release and that will free */ } return fd; } static int dma_heap_open(struct inode *inode, struct file *file) { struct dma_heap *heap; heap = xa_load(&dma_heap_minors, iminor(inode)); if (!heap) { pr_err("dma_heap: minor %d unknown.\n", iminor(inode)); return -ENODEV; } /* instance data as context */ file->private_data = heap; nonseekable_open(inode, file); return 0; } static long dma_heap_ioctl_allocate(struct file *file, void *data) { struct dma_heap_allocation_data *heap_allocation = data; struct dma_heap *heap = file->private_data; int fd; if (heap_allocation->fd) return -EINVAL; if (heap_allocation->fd_flags & ~DMA_HEAP_VALID_FD_FLAGS) return -EINVAL; if (heap_allocation->heap_flags & ~DMA_HEAP_VALID_HEAP_FLAGS) return -EINVAL; fd = dma_heap_buffer_alloc(heap, heap_allocation->len, heap_allocation->fd_flags, heap_allocation->heap_flags); if (fd < 0) return fd; heap_allocation->fd = fd; return 0; } static unsigned int dma_heap_ioctl_cmds[] = { DMA_HEAP_IOCTL_ALLOC, }; static long dma_heap_ioctl(struct file *file, unsigned int ucmd, unsigned long arg) { char stack_kdata[128]; char *kdata = stack_kdata; unsigned int kcmd; unsigned int in_size, out_size, drv_size, ksize; int nr = _IOC_NR(ucmd); int ret = 0; if (nr >= ARRAY_SIZE(dma_heap_ioctl_cmds)) return -EINVAL; nr = array_index_nospec(nr, ARRAY_SIZE(dma_heap_ioctl_cmds)); /* Get the kernel ioctl cmd that matches */ kcmd = dma_heap_ioctl_cmds[nr]; /* Figure out the delta between user cmd size and kernel cmd size */ drv_size = _IOC_SIZE(kcmd); out_size = _IOC_SIZE(ucmd); in_size = out_size; if ((ucmd & kcmd & IOC_IN) == 0) in_size = 0; if ((ucmd & kcmd & IOC_OUT) == 0) out_size = 0; ksize = max(max(in_size, out_size), drv_size); /* If necessary, allocate buffer for ioctl argument */ if (ksize > sizeof(stack_kdata)) { kdata = kmalloc(ksize, GFP_KERNEL); if (!kdata) return -ENOMEM; } if (copy_from_user(kdata, (void __user *)arg, in_size) != 0) { ret = -EFAULT; goto err; } /* zero out any difference between the kernel/user structure size */ if (ksize > in_size) memset(kdata + in_size, 0, ksize - in_size); switch (kcmd) { case DMA_HEAP_IOCTL_ALLOC: ret = dma_heap_ioctl_allocate(file, kdata); break; default: ret = -ENOTTY; goto err; } if (copy_to_user((void __user *)arg, kdata, out_size) != 0) ret = -EFAULT; err: if (kdata != stack_kdata) kfree(kdata); return ret; } static const struct file_operations dma_heap_fops = { .owner = THIS_MODULE, .open = dma_heap_open, .unlocked_ioctl = dma_heap_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = dma_heap_ioctl, #endif }; /** * dma_heap_get_drvdata - get per-heap driver data * @heap: DMA-Heap to retrieve private data for * * Returns: * The per-heap data for the heap. */ void *dma_heap_get_drvdata(struct dma_heap *heap) { return heap->priv; } /** * dma_heap_get_name - get heap name * @heap: DMA-Heap to retrieve the name of * * Returns: * The char* for the heap name. */ const char *dma_heap_get_name(struct dma_heap *heap) { return heap->name; } /** * dma_heap_add - adds a heap to dmabuf heaps * @exp_info: information needed to register this heap */ struct dma_heap *dma_heap_add(const struct dma_heap_export_info *exp_info) { struct dma_heap *heap, *h, *err_ret; struct device *dev_ret; unsigned int minor; int ret; if (!exp_info->name || !strcmp(exp_info->name, "")) { pr_err("dma_heap: Cannot add heap without a name\n"); return ERR_PTR(-EINVAL); } if (!exp_info->ops || !exp_info->ops->allocate) { pr_err("dma_heap: Cannot add heap with invalid ops struct\n"); return ERR_PTR(-EINVAL); } heap = kzalloc(sizeof(*heap), GFP_KERNEL); if (!heap) return ERR_PTR(-ENOMEM); heap->name = exp_info->name; heap->ops = exp_info->ops; heap->priv = exp_info->priv; /* Find unused minor number */ ret = xa_alloc(&dma_heap_minors, &minor, heap, XA_LIMIT(0, NUM_HEAP_MINORS - 1), GFP_KERNEL); if (ret < 0) { pr_err("dma_heap: Unable to get minor number for heap\n"); err_ret = ERR_PTR(ret); goto err0; } /* Create device */ heap->heap_devt = MKDEV(MAJOR(dma_heap_devt), minor); cdev_init(&heap->heap_cdev, &dma_heap_fops); ret = cdev_add(&heap->heap_cdev, heap->heap_devt, 1); if (ret < 0) { pr_err("dma_heap: Unable to add char device\n"); err_ret = ERR_PTR(ret); goto err1; } dev_ret = device_create(dma_heap_class, NULL, heap->heap_devt, NULL, heap->name); if (IS_ERR(dev_ret)) { pr_err("dma_heap: Unable to create device\n"); err_ret = ERR_CAST(dev_ret); goto err2; } mutex_lock(&heap_list_lock); /* check the name is unique */ list_for_each_entry(h, &heap_list, list) { if (!strcmp(h->name, exp_info->name)) { mutex_unlock(&heap_list_lock); pr_err("dma_heap: Already registered heap named %s\n", exp_info->name); err_ret = ERR_PTR(-EINVAL); goto err3; } } /* Add heap to the list */ list_add(&heap->list, &heap_list); mutex_unlock(&heap_list_lock); return heap; err3: device_destroy(dma_heap_class, heap->heap_devt); err2: cdev_del(&heap->heap_cdev); err1: xa_erase(&dma_heap_minors, minor); err0: kfree(heap); return err_ret; } static char *dma_heap_devnode(const struct device *dev, umode_t *mode) { return kasprintf(GFP_KERNEL, "dma_heap/%s", dev_name(dev)); } static int dma_heap_init(void) { int ret; ret = alloc_chrdev_region(&dma_heap_devt, 0, NUM_HEAP_MINORS, DEVNAME); if (ret) return ret; dma_heap_class = class_create(DEVNAME); if (IS_ERR(dma_heap_class)) { unregister_chrdev_region(dma_heap_devt, NUM_HEAP_MINORS); return PTR_ERR(dma_heap_class); } dma_heap_class->devnode = dma_heap_devnode; return 0; } subsys_initcall(dma_heap_init); |
| 3615 286 5543 6106 14 17 50 3524 6605 6632 192 192 7 7 1734 1736 1736 4 4 350 350 3912 3919 3906 3521 3524 3529 100 100 436 439 3889 3866 16 3791 3793 319 319 2367 2367 210 212 221 221 4142 4153 16 16 2362 2367 2362 45 45 14 14 6112 6106 6126 2503 2516 2506 28 28 1681 9 1671 1684 1684 9 1670 1672 40 40 1436 1438 70 70 578 579 146 146 227 228 234 234 136 136 578 576 578 232 232 42 1 41 160 1 159 4 4 1426 1429 183 184 181 181 362 362 234 234 695 697 145 146 147 147 711 711 640 86 716 37 37 310 312 8120 1 8151 937 2 936 808 811 234 1 234 196 196 196 196 146 146 124 124 177 177 274 275 60 60 127 127 127 127 127 127 155 157 194 195 2625 2629 135 135 19 19 68 68 36 36 1895 1897 5672 5718 5553 5543 5561 2582 2596 4297 4311 4301 3617 3634 94 1205 1293 1295 1287 1294 64 65 206 210 18 18 56 56 4865 4875 4521 4532 303 305 13 14 14 286 286 287 43 43 4808 4817 121 121 170 171 6233 6233 6208 5 5 2 2 18 18 203 203 202 203 4 4 4 4 1700 1706 5 5 5 5 5 5 17 17 17 21 22 34 35 49 50 50 18 18 60 61 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 3422 3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434 3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456 3457 3458 3459 3460 3461 3462 3463 3464 3465 3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479 3480 3481 3482 3483 3484 3485 3486 3487 3488 3489 3490 3491 3492 3493 3494 3495 3496 3497 3498 3499 3500 3501 3502 3503 3504 3505 3506 3507 3508 3509 3510 3511 3512 3513 3514 3515 3516 3517 3518 3519 3520 3521 3522 3523 3524 3525 3526 3527 3528 3529 3530 3531 3532 3533 3534 3535 3536 3537 3538 3539 3540 3541 3542 3543 3544 3545 3546 3547 3548 3549 3550 3551 3552 3553 3554 3555 3556 3557 3558 3559 3560 3561 3562 3563 3564 3565 3566 3567 3568 3569 3570 3571 3572 3573 3574 3575 3576 3577 3578 3579 3580 3581 3582 3583 3584 3585 3586 3587 3588 3589 3590 3591 3592 3593 3594 3595 3596 3597 3598 3599 3600 3601 3602 3603 3604 3605 3606 3607 3608 3609 3610 3611 3612 3613 3614 3615 3616 3617 3618 3619 3620 3621 3622 3623 3624 3625 3626 3627 3628 3629 3630 3631 3632 3633 3634 3635 3636 3637 3638 3639 3640 3641 3642 3643 3644 3645 3646 3647 3648 3649 3650 3651 3652 3653 3654 3655 3656 3657 3658 3659 3660 3661 3662 3663 3664 3665 3666 3667 3668 3669 3670 3671 3672 3673 3674 3675 3676 3677 3678 3679 3680 3681 3682 3683 3684 3685 3686 3687 3688 3689 3690 3691 3692 3693 3694 3695 3696 3697 3698 3699 3700 3701 3702 3703 3704 3705 3706 3707 3708 3709 3710 3711 3712 3713 3714 3715 3716 3717 3718 3719 3720 3721 3722 3723 3724 3725 3726 3727 3728 3729 3730 3731 3732 3733 3734 3735 3736 3737 3738 3739 3740 3741 3742 3743 3744 3745 3746 3747 3748 3749 3750 3751 3752 3753 3754 3755 3756 3757 3758 3759 3760 3761 3762 3763 3764 3765 3766 3767 3768 3769 3770 3771 3772 3773 3774 3775 3776 3777 3778 3779 3780 3781 3782 3783 3784 3785 3786 3787 3788 3789 3790 3791 3792 3793 3794 3795 3796 3797 3798 3799 3800 3801 3802 3803 3804 3805 3806 3807 3808 3809 3810 3811 3812 3813 3814 3815 3816 3817 3818 3819 3820 3821 3822 3823 3824 3825 3826 3827 3828 3829 3830 3831 3832 3833 3834 3835 3836 3837 3838 3839 3840 3841 3842 3843 3844 3845 3846 3847 3848 3849 3850 3851 3852 3853 3854 3855 3856 3857 3858 3859 3860 3861 3862 3863 3864 3865 3866 3867 3868 3869 3870 3871 3872 3873 3874 3875 3876 3877 3878 3879 3880 3881 3882 3883 3884 3885 3886 3887 3888 3889 3890 3891 3892 3893 3894 3895 3896 3897 3898 3899 3900 3901 3902 3903 3904 3905 3906 3907 3908 3909 3910 3911 3912 3913 3914 3915 3916 3917 3918 3919 3920 3921 3922 3923 3924 3925 3926 3927 3928 3929 3930 3931 3932 3933 3934 3935 3936 3937 3938 3939 3940 3941 3942 3943 3944 3945 3946 3947 3948 3949 3950 3951 3952 3953 3954 3955 3956 3957 3958 3959 3960 3961 3962 3963 3964 3965 3966 3967 3968 3969 3970 3971 3972 3973 3974 3975 3976 3977 3978 3979 3980 3981 3982 3983 3984 3985 3986 3987 3988 3989 3990 3991 3992 3993 3994 3995 3996 3997 3998 3999 4000 4001 4002 4003 4004 4005 4006 4007 4008 4009 4010 4011 4012 4013 4014 4015 4016 4017 4018 4019 4020 4021 4022 4023 4024 4025 4026 4027 4028 4029 4030 4031 4032 4033 4034 4035 4036 4037 4038 4039 4040 4041 4042 4043 4044 4045 4046 4047 4048 4049 4050 4051 4052 4053 4054 4055 4056 4057 4058 4059 4060 4061 4062 4063 4064 4065 4066 4067 4068 4069 4070 4071 4072 4073 4074 4075 4076 4077 4078 4079 4080 4081 4082 4083 4084 4085 4086 4087 4088 4089 4090 4091 4092 4093 4094 4095 4096 4097 4098 4099 4100 4101 4102 4103 4104 4105 4106 4107 4108 4109 4110 4111 4112 4113 4114 4115 4116 4117 4118 4119 4120 4121 4122 4123 4124 4125 4126 4127 4128 4129 4130 4131 4132 4133 4134 4135 4136 4137 4138 4139 4140 4141 4142 4143 4144 4145 4146 4147 4148 4149 4150 4151 4152 4153 4154 4155 4156 4157 4158 4159 4160 4161 4162 4163 4164 4165 4166 4167 4168 4169 4170 4171 4172 4173 4174 4175 4176 4177 4178 4179 4180 4181 4182 4183 4184 4185 4186 4187 4188 4189 4190 4191 4192 4193 4194 4195 4196 4197 4198 4199 4200 4201 4202 4203 4204 4205 4206 4207 4208 4209 4210 4211 4212 4213 4214 4215 4216 4217 4218 4219 4220 4221 4222 4223 4224 4225 4226 4227 4228 4229 4230 4231 4232 4233 4234 4235 4236 4237 4238 4239 4240 4241 4242 4243 4244 4245 4246 4247 4248 4249 4250 4251 4252 4253 4254 4255 4256 4257 4258 4259 4260 4261 4262 4263 4264 4265 4266 4267 4268 4269 4270 4271 4272 4273 4274 4275 4276 4277 4278 4279 4280 4281 4282 4283 4284 4285 4286 4287 4288 4289 4290 4291 4292 4293 4294 4295 4296 4297 4298 4299 4300 4301 4302 4303 4304 4305 4306 4307 4308 4309 4310 4311 4312 4313 4314 4315 4316 4317 4318 4319 4320 4321 4322 4323 4324 4325 4326 4327 4328 4329 4330 4331 4332 4333 4334 4335 4336 4337 4338 4339 4340 4341 4342 4343 4344 4345 4346 4347 4348 4349 4350 4351 4352 4353 4354 4355 4356 4357 4358 4359 4360 4361 4362 4363 4364 4365 4366 4367 4368 4369 4370 4371 4372 4373 4374 4375 4376 4377 4378 4379 4380 4381 4382 4383 4384 4385 4386 4387 4388 4389 4390 4391 4392 4393 4394 4395 4396 4397 4398 4399 4400 4401 4402 4403 4404 4405 4406 4407 4408 4409 4410 4411 4412 4413 4414 4415 4416 4417 4418 4419 4420 4421 4422 4423 4424 4425 4426 4427 4428 4429 4430 4431 4432 4433 4434 4435 4436 4437 4438 4439 4440 4441 4442 4443 4444 4445 4446 4447 4448 4449 4450 4451 4452 4453 4454 4455 4456 4457 4458 4459 4460 4461 4462 4463 4464 4465 4466 4467 4468 4469 4470 4471 4472 4473 4474 4475 4476 4477 4478 4479 4480 4481 4482 4483 4484 4485 4486 4487 4488 4489 4490 4491 4492 4493 4494 4495 4496 4497 4498 4499 4500 4501 4502 4503 4504 4505 4506 4507 4508 4509 4510 4511 4512 4513 4514 4515 4516 4517 4518 4519 4520 4521 4522 4523 4524 4525 4526 4527 4528 4529 4530 4531 4532 4533 4534 4535 4536 4537 4538 4539 4540 4541 4542 4543 4544 4545 4546 4547 4548 4549 4550 4551 4552 4553 4554 4555 4556 4557 4558 4559 4560 4561 4562 4563 4564 4565 4566 4567 4568 4569 4570 4571 4572 4573 4574 4575 4576 4577 4578 4579 4580 4581 4582 4583 4584 4585 4586 4587 4588 4589 4590 4591 4592 4593 4594 4595 4596 4597 4598 4599 4600 4601 4602 4603 4604 4605 4606 4607 4608 4609 4610 4611 4612 4613 4614 4615 4616 4617 4618 4619 4620 4621 4622 4623 4624 4625 4626 4627 4628 4629 4630 4631 4632 4633 4634 4635 4636 4637 4638 4639 4640 4641 4642 4643 4644 4645 4646 4647 4648 4649 4650 4651 4652 4653 4654 4655 4656 4657 4658 4659 4660 4661 4662 4663 4664 4665 4666 4667 4668 4669 4670 4671 4672 4673 4674 4675 4676 4677 4678 4679 4680 4681 4682 4683 4684 4685 4686 4687 4688 4689 4690 4691 4692 4693 4694 4695 4696 4697 4698 4699 4700 4701 4702 4703 4704 4705 4706 4707 4708 4709 4710 4711 4712 4713 4714 4715 4716 4717 4718 4719 4720 4721 4722 4723 4724 4725 4726 4727 4728 4729 4730 4731 4732 4733 4734 4735 4736 4737 4738 4739 4740 4741 4742 4743 4744 4745 4746 4747 4748 4749 4750 4751 4752 4753 4754 4755 4756 4757 4758 4759 4760 4761 4762 4763 4764 4765 4766 4767 4768 4769 4770 4771 4772 4773 4774 4775 4776 4777 4778 4779 4780 4781 4782 4783 4784 4785 4786 4787 4788 4789 4790 4791 4792 4793 4794 4795 4796 4797 4798 4799 4800 4801 4802 4803 4804 4805 4806 4807 4808 4809 4810 4811 4812 4813 4814 4815 4816 4817 4818 4819 4820 4821 4822 4823 4824 4825 4826 4827 4828 4829 4830 4831 4832 4833 4834 4835 4836 4837 4838 4839 4840 4841 4842 4843 4844 4845 4846 4847 4848 4849 4850 4851 4852 4853 4854 4855 4856 4857 4858 4859 4860 4861 4862 4863 4864 4865 4866 4867 4868 4869 4870 4871 4872 4873 4874 4875 4876 4877 4878 4879 4880 4881 4882 4883 4884 4885 4886 4887 4888 4889 4890 4891 4892 4893 4894 4895 4896 4897 4898 4899 4900 4901 4902 4903 4904 4905 4906 4907 4908 4909 4910 4911 4912 4913 4914 4915 4916 4917 4918 4919 4920 4921 4922 4923 4924 4925 4926 4927 4928 4929 4930 4931 4932 4933 4934 4935 4936 4937 4938 4939 4940 4941 4942 4943 4944 4945 4946 4947 4948 4949 4950 4951 4952 4953 4954 4955 4956 4957 4958 4959 4960 4961 4962 4963 4964 4965 4966 4967 4968 4969 4970 4971 4972 4973 4974 4975 4976 4977 4978 4979 4980 4981 4982 4983 4984 4985 4986 4987 4988 4989 4990 4991 4992 4993 4994 4995 4996 4997 4998 4999 5000 5001 5002 5003 5004 5005 5006 5007 5008 5009 5010 5011 5012 5013 5014 5015 5016 5017 5018 5019 5020 5021 5022 5023 5024 5025 5026 5027 5028 5029 5030 5031 5032 5033 5034 5035 5036 5037 5038 5039 5040 5041 5042 5043 5044 5045 5046 5047 5048 5049 5050 5051 5052 5053 5054 5055 5056 5057 5058 5059 5060 5061 5062 5063 5064 5065 5066 5067 5068 5069 5070 5071 5072 5073 5074 5075 5076 5077 5078 5079 5080 5081 5082 5083 5084 5085 5086 5087 5088 5089 5090 5091 5092 5093 5094 5095 5096 5097 5098 5099 5100 5101 5102 5103 5104 5105 5106 5107 5108 5109 5110 5111 5112 5113 5114 5115 5116 5117 5118 5119 5120 5121 5122 5123 5124 5125 5126 5127 5128 5129 5130 5131 5132 5133 5134 5135 5136 5137 5138 5139 5140 5141 5142 5143 5144 5145 5146 5147 5148 5149 5150 5151 5152 5153 5154 5155 5156 5157 5158 5159 5160 5161 5162 5163 5164 5165 5166 5167 5168 5169 5170 5171 5172 5173 5174 5175 5176 5177 5178 5179 5180 5181 5182 5183 5184 5185 5186 5187 5188 5189 5190 5191 5192 5193 5194 5195 5196 5197 5198 5199 5200 5201 5202 5203 5204 5205 5206 5207 5208 5209 5210 5211 5212 5213 5214 5215 5216 5217 5218 5219 5220 5221 5222 5223 5224 5225 5226 5227 5228 5229 5230 5231 5232 5233 5234 5235 5236 5237 5238 5239 5240 5241 5242 5243 5244 5245 5246 5247 5248 5249 5250 5251 5252 5253 5254 5255 5256 5257 5258 5259 5260 5261 5262 5263 5264 5265 5266 5267 5268 5269 5270 5271 5272 5273 5274 5275 5276 5277 5278 5279 5280 5281 5282 5283 5284 5285 5286 5287 5288 5289 5290 5291 5292 5293 5294 5295 5296 5297 5298 5299 5300 5301 5302 5303 5304 5305 5306 5307 5308 5309 5310 5311 5312 5313 5314 5315 5316 5317 5318 5319 5320 5321 5322 5323 5324 5325 5326 5327 5328 5329 5330 5331 5332 5333 5334 5335 5336 5337 5338 5339 5340 5341 5342 5343 5344 5345 5346 5347 5348 5349 5350 5351 5352 5353 5354 5355 5356 5357 5358 5359 5360 5361 5362 5363 5364 5365 5366 5367 5368 5369 5370 5371 5372 5373 5374 5375 5376 5377 5378 5379 5380 5381 5382 5383 5384 5385 5386 5387 5388 5389 5390 5391 5392 5393 5394 5395 5396 5397 5398 5399 5400 5401 5402 5403 5404 5405 5406 5407 5408 5409 5410 5411 5412 5413 5414 5415 5416 5417 5418 5419 5420 5421 5422 5423 5424 5425 5426 5427 5428 5429 5430 5431 5432 5433 5434 5435 5436 5437 5438 5439 5440 5441 5442 5443 5444 5445 5446 5447 5448 5449 5450 5451 5452 5453 5454 5455 5456 5457 5458 5459 5460 5461 5462 5463 5464 5465 5466 5467 5468 5469 5470 5471 5472 5473 5474 5475 5476 5477 5478 5479 5480 5481 5482 5483 5484 5485 5486 5487 5488 5489 5490 5491 5492 5493 5494 5495 5496 5497 5498 5499 5500 5501 5502 5503 5504 5505 5506 5507 5508 5509 5510 5511 5512 5513 5514 5515 5516 5517 5518 5519 5520 5521 5522 5523 5524 5525 5526 5527 5528 5529 5530 5531 5532 5533 5534 5535 5536 5537 5538 5539 5540 5541 5542 5543 5544 5545 5546 5547 5548 5549 5550 5551 5552 5553 5554 5555 5556 5557 5558 5559 5560 5561 5562 5563 5564 5565 5566 5567 5568 5569 5570 5571 5572 5573 5574 5575 5576 5577 5578 5579 5580 5581 5582 5583 5584 5585 5586 5587 5588 5589 5590 5591 5592 5593 5594 5595 5596 5597 5598 5599 5600 5601 5602 5603 5604 5605 5606 5607 5608 5609 5610 5611 5612 5613 5614 5615 5616 5617 5618 5619 5620 5621 5622 5623 5624 5625 5626 5627 5628 5629 5630 5631 5632 5633 5634 5635 5636 5637 5638 5639 5640 5641 5642 5643 5644 5645 5646 5647 5648 5649 5650 5651 5652 5653 5654 5655 5656 5657 5658 5659 5660 5661 5662 5663 5664 5665 5666 5667 5668 5669 5670 5671 5672 5673 5674 5675 5676 5677 5678 5679 5680 5681 5682 5683 5684 5685 5686 5687 5688 5689 5690 5691 5692 5693 5694 5695 5696 5697 5698 5699 5700 5701 5702 5703 5704 5705 5706 5707 5708 5709 5710 5711 5712 5713 5714 5715 5716 5717 5718 5719 5720 5721 5722 5723 5724 5725 5726 5727 5728 5729 5730 5731 5732 5733 5734 5735 5736 5737 5738 5739 5740 5741 5742 5743 5744 5745 5746 5747 5748 5749 5750 5751 5752 5753 5754 5755 5756 5757 5758 5759 5760 5761 5762 5763 5764 5765 5766 5767 5768 5769 5770 5771 5772 5773 5774 5775 5776 5777 5778 5779 5780 5781 5782 5783 5784 5785 5786 5787 5788 5789 5790 5791 5792 5793 5794 5795 5796 5797 5798 5799 5800 5801 5802 5803 5804 5805 5806 5807 5808 5809 5810 5811 5812 5813 5814 5815 5816 5817 5818 5819 5820 5821 5822 5823 5824 5825 5826 5827 5828 5829 5830 5831 5832 5833 5834 5835 5836 5837 5838 5839 5840 5841 5842 5843 5844 5845 5846 5847 5848 5849 5850 5851 5852 5853 5854 5855 5856 5857 5858 5859 5860 5861 5862 5863 5864 5865 5866 5867 5868 5869 5870 5871 5872 5873 5874 5875 5876 5877 5878 5879 5880 5881 5882 5883 5884 5885 5886 5887 5888 5889 5890 5891 5892 5893 5894 5895 5896 5897 5898 5899 5900 5901 5902 5903 5904 5905 5906 5907 5908 5909 5910 5911 5912 5913 5914 5915 5916 5917 5918 5919 5920 5921 5922 5923 5924 5925 5926 5927 5928 5929 5930 5931 5932 5933 5934 5935 5936 5937 5938 5939 5940 5941 5942 5943 5944 5945 5946 5947 5948 5949 5950 5951 5952 5953 5954 5955 5956 5957 5958 5959 5960 5961 5962 5963 5964 5965 5966 5967 5968 5969 5970 5971 5972 5973 5974 5975 5976 5977 5978 5979 5980 5981 5982 5983 5984 5985 5986 5987 5988 5989 5990 5991 5992 5993 5994 5995 5996 5997 5998 5999 6000 6001 6002 6003 6004 6005 6006 6007 6008 6009 6010 6011 6012 6013 6014 6015 6016 6017 6018 6019 6020 6021 6022 6023 6024 6025 6026 6027 6028 6029 6030 6031 6032 6033 6034 6035 6036 6037 6038 6039 6040 6041 6042 6043 6044 6045 6046 6047 6048 6049 6050 6051 6052 6053 6054 6055 6056 | // SPDX-License-Identifier: GPL-2.0-or-later /* * Security plug functions * * Copyright (C) 2001 WireX Communications, Inc <chris@wirex.com> * Copyright (C) 2001-2002 Greg Kroah-Hartman <greg@kroah.com> * Copyright (C) 2001 Networks Associates Technology, Inc <ssmalley@nai.com> * Copyright (C) 2016 Mellanox Technologies * Copyright (C) 2023 Microsoft Corporation <paul@paul-moore.com> */ #define pr_fmt(fmt) "LSM: " fmt #include <linux/bpf.h> #include <linux/capability.h> #include <linux/dcache.h> #include <linux/export.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/kernel_read_file.h> #include <linux/lsm_hooks.h> #include <linux/mman.h> #include <linux/mount.h> #include <linux/personality.h> #include <linux/backing-dev.h> #include <linux/string.h> #include <linux/xattr.h> #include <linux/msg.h> #include <linux/overflow.h> #include <linux/perf_event.h> #include <linux/fs.h> #include <net/flow.h> #include <net/sock.h> #define SECURITY_HOOK_ACTIVE_KEY(HOOK, IDX) security_hook_active_##HOOK##_##IDX /* * Identifier for the LSM static calls. * HOOK is an LSM hook as defined in linux/lsm_hookdefs.h * IDX is the index of the static call. 0 <= NUM < MAX_LSM_COUNT */ #define LSM_STATIC_CALL(HOOK, IDX) lsm_static_call_##HOOK##_##IDX /* * Call the macro M for each LSM hook MAX_LSM_COUNT times. */ #define LSM_LOOP_UNROLL(M, ...) \ do { \ UNROLL(MAX_LSM_COUNT, M, __VA_ARGS__) \ } while (0) #define LSM_DEFINE_UNROLL(M, ...) UNROLL(MAX_LSM_COUNT, M, __VA_ARGS__) /* * These are descriptions of the reasons that can be passed to the * security_locked_down() LSM hook. Placing this array here allows * all security modules to use the same descriptions for auditing * purposes. */ const char *const lockdown_reasons[LOCKDOWN_CONFIDENTIALITY_MAX + 1] = { [LOCKDOWN_NONE] = "none", [LOCKDOWN_MODULE_SIGNATURE] = "unsigned module loading", [LOCKDOWN_DEV_MEM] = "/dev/mem,kmem,port", [LOCKDOWN_EFI_TEST] = "/dev/efi_test access", [LOCKDOWN_KEXEC] = "kexec of unsigned images", [LOCKDOWN_HIBERNATION] = "hibernation", [LOCKDOWN_PCI_ACCESS] = "direct PCI access", [LOCKDOWN_IOPORT] = "raw io port access", [LOCKDOWN_MSR] = "raw MSR access", [LOCKDOWN_ACPI_TABLES] = "modifying ACPI tables", [LOCKDOWN_DEVICE_TREE] = "modifying device tree contents", [LOCKDOWN_PCMCIA_CIS] = "direct PCMCIA CIS storage", [LOCKDOWN_TIOCSSERIAL] = "reconfiguration of serial port IO", [LOCKDOWN_MODULE_PARAMETERS] = "unsafe module parameters", [LOCKDOWN_MMIOTRACE] = "unsafe mmio", [LOCKDOWN_DEBUGFS] = "debugfs access", [LOCKDOWN_XMON_WR] = "xmon write access", [LOCKDOWN_BPF_WRITE_USER] = "use of bpf to write user RAM", [LOCKDOWN_DBG_WRITE_KERNEL] = "use of kgdb/kdb to write kernel RAM", [LOCKDOWN_RTAS_ERROR_INJECTION] = "RTAS error injection", [LOCKDOWN_INTEGRITY_MAX] = "integrity", [LOCKDOWN_KCORE] = "/proc/kcore access", [LOCKDOWN_KPROBES] = "use of kprobes", [LOCKDOWN_BPF_READ_KERNEL] = "use of bpf to read kernel RAM", [LOCKDOWN_DBG_READ_KERNEL] = "use of kgdb/kdb to read kernel RAM", [LOCKDOWN_PERF] = "unsafe use of perf", [LOCKDOWN_TRACEFS] = "use of tracefs", [LOCKDOWN_XMON_RW] = "xmon read and write access", [LOCKDOWN_XFRM_SECRET] = "xfrm SA secret", [LOCKDOWN_CONFIDENTIALITY_MAX] = "confidentiality", }; static BLOCKING_NOTIFIER_HEAD(blocking_lsm_notifier_chain); static struct kmem_cache *lsm_file_cache; static struct kmem_cache *lsm_inode_cache; char *lsm_names; static struct lsm_blob_sizes blob_sizes __ro_after_init; /* Boot-time LSM user choice */ static __initdata const char *chosen_lsm_order; static __initdata const char *chosen_major_lsm; static __initconst const char *const builtin_lsm_order = CONFIG_LSM; /* Ordered list of LSMs to initialize. */ static __initdata struct lsm_info *ordered_lsms[MAX_LSM_COUNT + 1]; static __initdata struct lsm_info *exclusive; #ifdef CONFIG_HAVE_STATIC_CALL #define LSM_HOOK_TRAMP(NAME, NUM) \ &STATIC_CALL_TRAMP(LSM_STATIC_CALL(NAME, NUM)) #else #define LSM_HOOK_TRAMP(NAME, NUM) NULL #endif /* * Define static calls and static keys for each LSM hook. */ #define DEFINE_LSM_STATIC_CALL(NUM, NAME, RET, ...) \ DEFINE_STATIC_CALL_NULL(LSM_STATIC_CALL(NAME, NUM), \ *((RET(*)(__VA_ARGS__))NULL)); \ DEFINE_STATIC_KEY_FALSE(SECURITY_HOOK_ACTIVE_KEY(NAME, NUM)); #define LSM_HOOK(RET, DEFAULT, NAME, ...) \ LSM_DEFINE_UNROLL(DEFINE_LSM_STATIC_CALL, NAME, RET, __VA_ARGS__) #include <linux/lsm_hook_defs.h> #undef LSM_HOOK #undef DEFINE_LSM_STATIC_CALL /* * Initialise a table of static calls for each LSM hook. * DEFINE_STATIC_CALL_NULL invocation above generates a key (STATIC_CALL_KEY) * and a trampoline (STATIC_CALL_TRAMP) which are used to call * __static_call_update when updating the static call. * * The static calls table is used by early LSMs, some architectures can fault on * unaligned accesses and the fault handling code may not be ready by then. * Thus, the static calls table should be aligned to avoid any unhandled faults * in early init. */ struct lsm_static_calls_table static_calls_table __ro_after_init __aligned(sizeof(u64)) = { #define INIT_LSM_STATIC_CALL(NUM, NAME) \ (struct lsm_static_call) { \ .key = &STATIC_CALL_KEY(LSM_STATIC_CALL(NAME, NUM)), \ .trampoline = LSM_HOOK_TRAMP(NAME, NUM), \ .active = &SECURITY_HOOK_ACTIVE_KEY(NAME, NUM), \ }, #define LSM_HOOK(RET, DEFAULT, NAME, ...) \ .NAME = { \ LSM_DEFINE_UNROLL(INIT_LSM_STATIC_CALL, NAME) \ }, #include <linux/lsm_hook_defs.h> #undef LSM_HOOK #undef INIT_LSM_STATIC_CALL }; static __initdata bool debug; #define init_debug(...) \ do { \ if (debug) \ pr_info(__VA_ARGS__); \ } while (0) static bool __init is_enabled(struct lsm_info *lsm) { if (!lsm->enabled) return false; return *lsm->enabled; } /* Mark an LSM's enabled flag. */ static int lsm_enabled_true __initdata = 1; static int lsm_enabled_false __initdata = 0; static void __init set_enabled(struct lsm_info *lsm, bool enabled) { /* * When an LSM hasn't configured an enable variable, we can use * a hard-coded location for storing the default enabled state. */ if (!lsm->enabled) { if (enabled) lsm->enabled = &lsm_enabled_true; else lsm->enabled = &lsm_enabled_false; } else if (lsm->enabled == &lsm_enabled_true) { if (!enabled) lsm->enabled = &lsm_enabled_false; } else if (lsm->enabled == &lsm_enabled_false) { if (enabled) lsm->enabled = &lsm_enabled_true; } else { *lsm->enabled = enabled; } } /* Is an LSM already listed in the ordered LSMs list? */ static bool __init exists_ordered_lsm(struct lsm_info *lsm) { struct lsm_info **check; for (check = ordered_lsms; *check; check++) if (*check == lsm) return true; return false; } /* Append an LSM to the list of ordered LSMs to initialize. */ static int last_lsm __initdata; static void __init append_ordered_lsm(struct lsm_info *lsm, const char *from) { /* Ignore duplicate selections. */ if (exists_ordered_lsm(lsm)) return; if (WARN(last_lsm == MAX_LSM_COUNT, "%s: out of LSM static calls!?\n", from)) return; /* Enable this LSM, if it is not already set. */ if (!lsm->enabled) lsm->enabled = &lsm_enabled_true; ordered_lsms[last_lsm++] = lsm; init_debug("%s ordered: %s (%s)\n", from, lsm->name, is_enabled(lsm) ? "enabled" : "disabled"); } /* Is an LSM allowed to be initialized? */ static bool __init lsm_allowed(struct lsm_info *lsm) { /* Skip if the LSM is disabled. */ if (!is_enabled(lsm)) return false; /* Not allowed if another exclusive LSM already initialized. */ if ((lsm->flags & LSM_FLAG_EXCLUSIVE) && exclusive) { init_debug("exclusive disabled: %s\n", lsm->name); return false; } return true; } static void __init lsm_set_blob_size(int *need, int *lbs) { int offset; if (*need <= 0) return; offset = ALIGN(*lbs, sizeof(void *)); *lbs = offset + *need; *need = offset; } static void __init lsm_set_blob_sizes(struct lsm_blob_sizes *needed) { if (!needed) return; lsm_set_blob_size(&needed->lbs_cred, &blob_sizes.lbs_cred); lsm_set_blob_size(&needed->lbs_file, &blob_sizes.lbs_file); lsm_set_blob_size(&needed->lbs_ib, &blob_sizes.lbs_ib); /* * The inode blob gets an rcu_head in addition to * what the modules might need. */ if (needed->lbs_inode && blob_sizes.lbs_inode == 0) blob_sizes.lbs_inode = sizeof(struct rcu_head); lsm_set_blob_size(&needed->lbs_inode, &blob_sizes.lbs_inode); lsm_set_blob_size(&needed->lbs_ipc, &blob_sizes.lbs_ipc); lsm_set_blob_size(&needed->lbs_key, &blob_sizes.lbs_key); lsm_set_blob_size(&needed->lbs_msg_msg, &blob_sizes.lbs_msg_msg); lsm_set_blob_size(&needed->lbs_perf_event, &blob_sizes.lbs_perf_event); lsm_set_blob_size(&needed->lbs_sock, &blob_sizes.lbs_sock); lsm_set_blob_size(&needed->lbs_superblock, &blob_sizes.lbs_superblock); lsm_set_blob_size(&needed->lbs_task, &blob_sizes.lbs_task); lsm_set_blob_size(&needed->lbs_tun_dev, &blob_sizes.lbs_tun_dev); lsm_set_blob_size(&needed->lbs_xattr_count, &blob_sizes.lbs_xattr_count); lsm_set_blob_size(&needed->lbs_bdev, &blob_sizes.lbs_bdev); } /* Prepare LSM for initialization. */ static void __init prepare_lsm(struct lsm_info *lsm) { int enabled = lsm_allowed(lsm); /* Record enablement (to handle any following exclusive LSMs). */ set_enabled(lsm, enabled); /* If enabled, do pre-initialization work. */ if (enabled) { if ((lsm->flags & LSM_FLAG_EXCLUSIVE) && !exclusive) { exclusive = lsm; init_debug("exclusive chosen: %s\n", lsm->name); } lsm_set_blob_sizes(lsm->blobs); } } /* Initialize a given LSM, if it is enabled. */ static void __init initialize_lsm(struct lsm_info *lsm) { if (is_enabled(lsm)) { int ret; init_debug("initializing %s\n", lsm->name); ret = lsm->init(); WARN(ret, "%s failed to initialize: %d\n", lsm->name, ret); } } /* * Current index to use while initializing the lsm id list. */ u32 lsm_active_cnt __ro_after_init; const struct lsm_id *lsm_idlist[MAX_LSM_COUNT]; /* Populate ordered LSMs list from comma-separated LSM name list. */ static void __init ordered_lsm_parse(const char *order, const char *origin) { struct lsm_info *lsm; char *sep, *name, *next; /* LSM_ORDER_FIRST is always first. */ for (lsm = __start_lsm_info; lsm < __end_lsm_info; lsm++) { if (lsm->order == LSM_ORDER_FIRST) append_ordered_lsm(lsm, " first"); } /* Process "security=", if given. */ if (chosen_major_lsm) { struct lsm_info *major; /* * To match the original "security=" behavior, this * explicitly does NOT fallback to another Legacy Major * if the selected one was separately disabled: disable * all non-matching Legacy Major LSMs. */ for (major = __start_lsm_info; major < __end_lsm_info; major++) { if ((major->flags & LSM_FLAG_LEGACY_MAJOR) && strcmp(major->name, chosen_major_lsm) != 0) { set_enabled(major, false); init_debug("security=%s disabled: %s (only one legacy major LSM)\n", chosen_major_lsm, major->name); } } } sep = kstrdup(order, GFP_KERNEL); next = sep; /* Walk the list, looking for matching LSMs. */ while ((name = strsep(&next, ",")) != NULL) { bool found = false; for (lsm = __start_lsm_info; lsm < __end_lsm_info; lsm++) { if (strcmp(lsm->name, name) == 0) { if (lsm->order == LSM_ORDER_MUTABLE) append_ordered_lsm(lsm, origin); found = true; } } if (!found) init_debug("%s ignored: %s (not built into kernel)\n", origin, name); } /* Process "security=", if given. */ if (chosen_major_lsm) { for (lsm = __start_lsm_info; lsm < __end_lsm_info; lsm++) { if (exists_ordered_lsm(lsm)) continue; if (strcmp(lsm->name, chosen_major_lsm) == 0) append_ordered_lsm(lsm, "security="); } } /* LSM_ORDER_LAST is always last. */ for (lsm = __start_lsm_info; lsm < __end_lsm_info; lsm++) { if (lsm->order == LSM_ORDER_LAST) append_ordered_lsm(lsm, " last"); } /* Disable all LSMs not in the ordered list. */ for (lsm = __start_lsm_info; lsm < __end_lsm_info; lsm++) { if (exists_ordered_lsm(lsm)) continue; set_enabled(lsm, false); init_debug("%s skipped: %s (not in requested order)\n", origin, lsm->name); } kfree(sep); } static void __init lsm_static_call_init(struct security_hook_list *hl) { struct lsm_static_call *scall = hl->scalls; int i; for (i = 0; i < MAX_LSM_COUNT; i++) { /* Update the first static call that is not used yet */ if (!scall->hl) { __static_call_update(scall->key, scall->trampoline, hl->hook.lsm_func_addr); scall->hl = hl; static_branch_enable(scall->active); return; } scall++; } panic("%s - Ran out of static slots.\n", __func__); } static void __init lsm_early_cred(struct cred *cred); static void __init lsm_early_task(struct task_struct *task); static int lsm_append(const char *new, char **result); static void __init report_lsm_order(void) { struct lsm_info **lsm, *early; int first = 0; pr_info("initializing lsm="); /* Report each enabled LSM name, comma separated. */ for (early = __start_early_lsm_info; early < __end_early_lsm_info; early++) if (is_enabled(early)) pr_cont("%s%s", first++ == 0 ? "" : ",", early->name); for (lsm = ordered_lsms; *lsm; lsm++) if (is_enabled(*lsm)) pr_cont("%s%s", first++ == 0 ? "" : ",", (*lsm)->name); pr_cont("\n"); } static void __init ordered_lsm_init(void) { struct lsm_info **lsm; if (chosen_lsm_order) { if (chosen_major_lsm) { pr_warn("security=%s is ignored because it is superseded by lsm=%s\n", chosen_major_lsm, chosen_lsm_order); chosen_major_lsm = NULL; } ordered_lsm_parse(chosen_lsm_order, "cmdline"); } else ordered_lsm_parse(builtin_lsm_order, "builtin"); for (lsm = ordered_lsms; *lsm; lsm++) prepare_lsm(*lsm); report_lsm_order(); init_debug("cred blob size = %d\n", blob_sizes.lbs_cred); init_debug("file blob size = %d\n", blob_sizes.lbs_file); init_debug("ib blob size = %d\n", blob_sizes.lbs_ib); init_debug("inode blob size = %d\n", blob_sizes.lbs_inode); init_debug("ipc blob size = %d\n", blob_sizes.lbs_ipc); #ifdef CONFIG_KEYS init_debug("key blob size = %d\n", blob_sizes.lbs_key); #endif /* CONFIG_KEYS */ init_debug("msg_msg blob size = %d\n", blob_sizes.lbs_msg_msg); init_debug("sock blob size = %d\n", blob_sizes.lbs_sock); init_debug("superblock blob size = %d\n", blob_sizes.lbs_superblock); init_debug("perf event blob size = %d\n", blob_sizes.lbs_perf_event); init_debug("task blob size = %d\n", blob_sizes.lbs_task); init_debug("tun device blob size = %d\n", blob_sizes.lbs_tun_dev); init_debug("xattr slots = %d\n", blob_sizes.lbs_xattr_count); init_debug("bdev blob size = %d\n", blob_sizes.lbs_bdev); /* * Create any kmem_caches needed for blobs */ if (blob_sizes.lbs_file) lsm_file_cache = kmem_cache_create("lsm_file_cache", blob_sizes.lbs_file, 0, SLAB_PANIC, NULL); if (blob_sizes.lbs_inode) lsm_inode_cache = kmem_cache_create("lsm_inode_cache", blob_sizes.lbs_inode, 0, SLAB_PANIC, NULL); lsm_early_cred((struct cred *) current->cred); lsm_early_task(current); for (lsm = ordered_lsms; *lsm; lsm++) initialize_lsm(*lsm); } int __init early_security_init(void) { struct lsm_info *lsm; for (lsm = __start_early_lsm_info; lsm < __end_early_lsm_info; lsm++) { if (!lsm->enabled) lsm->enabled = &lsm_enabled_true; prepare_lsm(lsm); initialize_lsm(lsm); } return 0; } /** * security_init - initializes the security framework * * This should be called early in the kernel initialization sequence. */ int __init security_init(void) { struct lsm_info *lsm; init_debug("legacy security=%s\n", chosen_major_lsm ? : " *unspecified*"); init_debug(" CONFIG_LSM=%s\n", builtin_lsm_order); init_debug("boot arg lsm=%s\n", chosen_lsm_order ? : " *unspecified*"); /* * Append the names of the early LSM modules now that kmalloc() is * available */ for (lsm = __start_early_lsm_info; lsm < __end_early_lsm_info; lsm++) { init_debug(" early started: %s (%s)\n", lsm->name, is_enabled(lsm) ? "enabled" : "disabled"); if (lsm->enabled) lsm_append(lsm->name, &lsm_names); } /* Load LSMs in specified order. */ ordered_lsm_init(); return 0; } /* Save user chosen LSM */ static int __init choose_major_lsm(char *str) { chosen_major_lsm = str; return 1; } __setup("security=", choose_major_lsm); /* Explicitly choose LSM initialization order. */ static int __init choose_lsm_order(char *str) { chosen_lsm_order = str; return 1; } __setup("lsm=", choose_lsm_order); /* Enable LSM order debugging. */ static int __init enable_debug(char *str) { debug = true; return 1; } __setup("lsm.debug", enable_debug); static bool match_last_lsm(const char *list, const char *lsm) { const char *last; if (WARN_ON(!list || !lsm)) return false; last = strrchr(list, ','); if (last) /* Pass the comma, strcmp() will check for '\0' */ last++; else last = list; return !strcmp(last, lsm); } static int lsm_append(const char *new, char **result) { char *cp; if (*result == NULL) { *result = kstrdup(new, GFP_KERNEL); if (*result == NULL) return -ENOMEM; } else { /* Check if it is the last registered name */ if (match_last_lsm(*result, new)) return 0; cp = kasprintf(GFP_KERNEL, "%s,%s", *result, new); if (cp == NULL) return -ENOMEM; kfree(*result); *result = cp; } return 0; } /** * security_add_hooks - Add a modules hooks to the hook lists. * @hooks: the hooks to add * @count: the number of hooks to add * @lsmid: the identification information for the security module * * Each LSM has to register its hooks with the infrastructure. */ void __init security_add_hooks(struct security_hook_list *hooks, int count, const struct lsm_id *lsmid) { int i; /* * A security module may call security_add_hooks() more * than once during initialization, and LSM initialization * is serialized. Landlock is one such case. * Look at the previous entry, if there is one, for duplication. */ if (lsm_active_cnt == 0 || lsm_idlist[lsm_active_cnt - 1] != lsmid) { if (lsm_active_cnt >= MAX_LSM_COUNT) panic("%s Too many LSMs registered.\n", __func__); lsm_idlist[lsm_active_cnt++] = lsmid; } for (i = 0; i < count; i++) { hooks[i].lsmid = lsmid; lsm_static_call_init(&hooks[i]); } /* * Don't try to append during early_security_init(), we'll come back * and fix this up afterwards. */ if (slab_is_available()) { if (lsm_append(lsmid->name, &lsm_names) < 0) panic("%s - Cannot get early memory.\n", __func__); } } int call_blocking_lsm_notifier(enum lsm_event event, void *data) { return blocking_notifier_call_chain(&blocking_lsm_notifier_chain, event, data); } EXPORT_SYMBOL(call_blocking_lsm_notifier); int register_blocking_lsm_notifier(struct notifier_block *nb) { return blocking_notifier_chain_register(&blocking_lsm_notifier_chain, nb); } EXPORT_SYMBOL(register_blocking_lsm_notifier); int unregister_blocking_lsm_notifier(struct notifier_block *nb) { return blocking_notifier_chain_unregister(&blocking_lsm_notifier_chain, nb); } EXPORT_SYMBOL(unregister_blocking_lsm_notifier); /** * lsm_blob_alloc - allocate a composite blob * @dest: the destination for the blob * @size: the size of the blob * @gfp: allocation type * * Allocate a blob for all the modules * * Returns 0, or -ENOMEM if memory can't be allocated. */ static int lsm_blob_alloc(void **dest, size_t size, gfp_t gfp) { if (size == 0) { *dest = NULL; return 0; } *dest = kzalloc(size, gfp); if (*dest == NULL) return -ENOMEM; return 0; } /** * lsm_cred_alloc - allocate a composite cred blob * @cred: the cred that needs a blob * @gfp: allocation type * * Allocate the cred blob for all the modules * * Returns 0, or -ENOMEM if memory can't be allocated. */ static int lsm_cred_alloc(struct cred *cred, gfp_t gfp) { return lsm_blob_alloc(&cred->security, blob_sizes.lbs_cred, gfp); } /** * lsm_early_cred - during initialization allocate a composite cred blob * @cred: the cred that needs a blob * * Allocate the cred blob for all the modules */ static void __init lsm_early_cred(struct cred *cred) { int rc = lsm_cred_alloc(cred, GFP_KERNEL); if (rc) panic("%s: Early cred alloc failed.\n", __func__); } /** * lsm_file_alloc - allocate a composite file blob * @file: the file that needs a blob * * Allocate the file blob for all the modules * * Returns 0, or -ENOMEM if memory can't be allocated. */ static int lsm_file_alloc(struct file *file) { if (!lsm_file_cache) { file->f_security = NULL; return 0; } file->f_security = kmem_cache_zalloc(lsm_file_cache, GFP_KERNEL); if (file->f_security == NULL) return -ENOMEM; return 0; } /** * lsm_inode_alloc - allocate a composite inode blob * @inode: the inode that needs a blob * @gfp: allocation flags * * Allocate the inode blob for all the modules * * Returns 0, or -ENOMEM if memory can't be allocated. */ static int lsm_inode_alloc(struct inode *inode, gfp_t gfp) { if (!lsm_inode_cache) { inode->i_security = NULL; return 0; } inode->i_security = kmem_cache_zalloc(lsm_inode_cache, gfp); if (inode->i_security == NULL) return -ENOMEM; return 0; } /** * lsm_task_alloc - allocate a composite task blob * @task: the task that needs a blob * * Allocate the task blob for all the modules * * Returns 0, or -ENOMEM if memory can't be allocated. */ static int lsm_task_alloc(struct task_struct *task) { return lsm_blob_alloc(&task->security, blob_sizes.lbs_task, GFP_KERNEL); } /** * lsm_ipc_alloc - allocate a composite ipc blob * @kip: the ipc that needs a blob * * Allocate the ipc blob for all the modules * * Returns 0, or -ENOMEM if memory can't be allocated. */ static int lsm_ipc_alloc(struct kern_ipc_perm *kip) { return lsm_blob_alloc(&kip->security, blob_sizes.lbs_ipc, GFP_KERNEL); } #ifdef CONFIG_KEYS /** * lsm_key_alloc - allocate a composite key blob * @key: the key that needs a blob * * Allocate the key blob for all the modules * * Returns 0, or -ENOMEM if memory can't be allocated. */ static int lsm_key_alloc(struct key *key) { return lsm_blob_alloc(&key->security, blob_sizes.lbs_key, GFP_KERNEL); } #endif /* CONFIG_KEYS */ /** * lsm_msg_msg_alloc - allocate a composite msg_msg blob * @mp: the msg_msg that needs a blob * * Allocate the ipc blob for all the modules * * Returns 0, or -ENOMEM if memory can't be allocated. */ static int lsm_msg_msg_alloc(struct msg_msg *mp) { return lsm_blob_alloc(&mp->security, blob_sizes.lbs_msg_msg, GFP_KERNEL); } /** * lsm_bdev_alloc - allocate a composite block_device blob * @bdev: the block_device that needs a blob * * Allocate the block_device blob for all the modules * * Returns 0, or -ENOMEM if memory can't be allocated. */ static int lsm_bdev_alloc(struct block_device *bdev) { if (blob_sizes.lbs_bdev == 0) { bdev->bd_security = NULL; return 0; } bdev->bd_security = kzalloc(blob_sizes.lbs_bdev, GFP_KERNEL); if (!bdev->bd_security) return -ENOMEM; return 0; } /** * lsm_early_task - during initialization allocate a composite task blob * @task: the task that needs a blob * * Allocate the task blob for all the modules */ static void __init lsm_early_task(struct task_struct *task) { int rc = lsm_task_alloc(task); if (rc) panic("%s: Early task alloc failed.\n", __func__); } /** * lsm_superblock_alloc - allocate a composite superblock blob * @sb: the superblock that needs a blob * * Allocate the superblock blob for all the modules * * Returns 0, or -ENOMEM if memory can't be allocated. */ static int lsm_superblock_alloc(struct super_block *sb) { return lsm_blob_alloc(&sb->s_security, blob_sizes.lbs_superblock, GFP_KERNEL); } /** * lsm_fill_user_ctx - Fill a user space lsm_ctx structure * @uctx: a userspace LSM context to be filled * @uctx_len: available uctx size (input), used uctx size (output) * @val: the new LSM context value * @val_len: the size of the new LSM context value * @id: LSM id * @flags: LSM defined flags * * Fill all of the fields in a userspace lsm_ctx structure. If @uctx is NULL * simply calculate the required size to output via @utc_len and return * success. * * Returns 0 on success, -E2BIG if userspace buffer is not large enough, * -EFAULT on a copyout error, -ENOMEM if memory can't be allocated. */ int lsm_fill_user_ctx(struct lsm_ctx __user *uctx, u32 *uctx_len, void *val, size_t val_len, u64 id, u64 flags) { struct lsm_ctx *nctx = NULL; size_t nctx_len; int rc = 0; nctx_len = ALIGN(struct_size(nctx, ctx, val_len), sizeof(void *)); if (nctx_len > *uctx_len) { rc = -E2BIG; goto out; } /* no buffer - return success/0 and set @uctx_len to the req size */ if (!uctx) goto out; nctx = kzalloc(nctx_len, GFP_KERNEL); if (nctx == NULL) { rc = -ENOMEM; goto out; } nctx->id = id; nctx->flags = flags; nctx->len = nctx_len; nctx->ctx_len = val_len; memcpy(nctx->ctx, val, val_len); if (copy_to_user(uctx, nctx, nctx_len)) rc = -EFAULT; out: kfree(nctx); *uctx_len = nctx_len; return rc; } /* * The default value of the LSM hook is defined in linux/lsm_hook_defs.h and * can be accessed with: * * LSM_RET_DEFAULT(<hook_name>) * * The macros below define static constants for the default value of each * LSM hook. */ #define LSM_RET_DEFAULT(NAME) (NAME##_default) #define DECLARE_LSM_RET_DEFAULT_void(DEFAULT, NAME) #define DECLARE_LSM_RET_DEFAULT_int(DEFAULT, NAME) \ static const int __maybe_unused LSM_RET_DEFAULT(NAME) = (DEFAULT); #define LSM_HOOK(RET, DEFAULT, NAME, ...) \ DECLARE_LSM_RET_DEFAULT_##RET(DEFAULT, NAME) #include <linux/lsm_hook_defs.h> #undef LSM_HOOK /* * Hook list operation macros. * * call_void_hook: * This is a hook that does not return a value. * * call_int_hook: * This is a hook that returns a value. */ #define __CALL_STATIC_VOID(NUM, HOOK, ...) \ do { \ if (static_branch_unlikely(&SECURITY_HOOK_ACTIVE_KEY(HOOK, NUM))) { \ static_call(LSM_STATIC_CALL(HOOK, NUM))(__VA_ARGS__); \ } \ } while (0); #define call_void_hook(HOOK, ...) \ do { \ LSM_LOOP_UNROLL(__CALL_STATIC_VOID, HOOK, __VA_ARGS__); \ } while (0) #define __CALL_STATIC_INT(NUM, R, HOOK, LABEL, ...) \ do { \ if (static_branch_unlikely(&SECURITY_HOOK_ACTIVE_KEY(HOOK, NUM))) { \ R = static_call(LSM_STATIC_CALL(HOOK, NUM))(__VA_ARGS__); \ if (R != LSM_RET_DEFAULT(HOOK)) \ goto LABEL; \ } \ } while (0); #define call_int_hook(HOOK, ...) \ ({ \ __label__ OUT; \ int RC = LSM_RET_DEFAULT(HOOK); \ \ LSM_LOOP_UNROLL(__CALL_STATIC_INT, RC, HOOK, OUT, __VA_ARGS__); \ OUT: \ RC; \ }) #define lsm_for_each_hook(scall, NAME) \ for (scall = static_calls_table.NAME; \ scall - static_calls_table.NAME < MAX_LSM_COUNT; scall++) \ if (static_key_enabled(&scall->active->key)) /* Security operations */ /** * security_binder_set_context_mgr() - Check if becoming binder ctx mgr is ok * @mgr: task credentials of current binder process * * Check whether @mgr is allowed to be the binder context manager. * * Return: Return 0 if permission is granted. */ int security_binder_set_context_mgr(const struct cred *mgr) { return call_int_hook(binder_set_context_mgr, mgr); } /** * security_binder_transaction() - Check if a binder transaction is allowed * @from: sending process * @to: receiving process * * Check whether @from is allowed to invoke a binder transaction call to @to. * * Return: Returns 0 if permission is granted. */ int security_binder_transaction(const struct cred *from, const struct cred *to) { return call_int_hook(binder_transaction, from, to); } /** * security_binder_transfer_binder() - Check if a binder transfer is allowed * @from: sending process * @to: receiving process * * Check whether @from is allowed to transfer a binder reference to @to. * * Return: Returns 0 if permission is granted. */ int security_binder_transfer_binder(const struct cred *from, const struct cred *to) { return call_int_hook(binder_transfer_binder, from, to); } /** * security_binder_transfer_file() - Check if a binder file xfer is allowed * @from: sending process * @to: receiving process * @file: file being transferred * * Check whether @from is allowed to transfer @file to @to. * * Return: Returns 0 if permission is granted. */ int security_binder_transfer_file(const struct cred *from, const struct cred *to, const struct file *file) { return call_int_hook(binder_transfer_file, from, to, file); } /** * security_ptrace_access_check() - Check if tracing is allowed * @child: target process * @mode: PTRACE_MODE flags * * Check permission before allowing the current process to trace the @child * process. Security modules may also want to perform a process tracing check * during an execve in the set_security or apply_creds hooks of tracing check * during an execve in the bprm_set_creds hook of binprm_security_ops if the * process is being traced and its security attributes would be changed by the * execve. * * Return: Returns 0 if permission is granted. */ int security_ptrace_access_check(struct task_struct *child, unsigned int mode) { return call_int_hook(ptrace_access_check, child, mode); } /** * security_ptrace_traceme() - Check if tracing is allowed * @parent: tracing process * * Check that the @parent process has sufficient permission to trace the * current process before allowing the current process to present itself to the * @parent process for tracing. * * Return: Returns 0 if permission is granted. */ int security_ptrace_traceme(struct task_struct *parent) { return call_int_hook(ptrace_traceme, parent); } /** * security_capget() - Get the capability sets for a process * @target: target process * @effective: effective capability set * @inheritable: inheritable capability set * @permitted: permitted capability set * * Get the @effective, @inheritable, and @permitted capability sets for the * @target process. The hook may also perform permission checking to determine * if the current process is allowed to see the capability sets of the @target * process. * * Return: Returns 0 if the capability sets were successfully obtained. */ int security_capget(const struct task_struct *target, kernel_cap_t *effective, kernel_cap_t *inheritable, kernel_cap_t *permitted) { return call_int_hook(capget, target, effective, inheritable, permitted); } /** * security_capset() - Set the capability sets for a process * @new: new credentials for the target process * @old: current credentials of the target process * @effective: effective capability set * @inheritable: inheritable capability set * @permitted: permitted capability set * * Set the @effective, @inheritable, and @permitted capability sets for the * current process. * * Return: Returns 0 and update @new if permission is granted. */ int security_capset(struct cred *new, const struct cred *old, const kernel_cap_t *effective, const kernel_cap_t *inheritable, const kernel_cap_t *permitted) { return call_int_hook(capset, new, old, effective, inheritable, permitted); } /** * security_capable() - Check if a process has the necessary capability * @cred: credentials to examine * @ns: user namespace * @cap: capability requested * @opts: capability check options * * Check whether the @tsk process has the @cap capability in the indicated * credentials. @cap contains the capability <include/linux/capability.h>. * @opts contains options for the capable check <include/linux/security.h>. * * Return: Returns 0 if the capability is granted. */ int security_capable(const struct cred *cred, struct user_namespace *ns, int cap, unsigned int opts) { return call_int_hook(capable, cred, ns, cap, opts); } /** * security_quotactl() - Check if a quotactl() syscall is allowed for this fs * @cmds: commands * @type: type * @id: id * @sb: filesystem * * Check whether the quotactl syscall is allowed for this @sb. * * Return: Returns 0 if permission is granted. */ int security_quotactl(int cmds, int type, int id, const struct super_block *sb) { return call_int_hook(quotactl, cmds, type, id, sb); } /** * security_quota_on() - Check if QUOTAON is allowed for a dentry * @dentry: dentry * * Check whether QUOTAON is allowed for @dentry. * * Return: Returns 0 if permission is granted. */ int security_quota_on(struct dentry *dentry) { return call_int_hook(quota_on, dentry); } /** * security_syslog() - Check if accessing the kernel message ring is allowed * @type: SYSLOG_ACTION_* type * * Check permission before accessing the kernel message ring or changing * logging to the console. See the syslog(2) manual page for an explanation of * the @type values. * * Return: Return 0 if permission is granted. */ int security_syslog(int type) { return call_int_hook(syslog, type); } /** * security_settime64() - Check if changing the system time is allowed * @ts: new time * @tz: timezone * * Check permission to change the system time, struct timespec64 is defined in * <include/linux/time64.h> and timezone is defined in <include/linux/time.h>. * * Return: Returns 0 if permission is granted. */ int security_settime64(const struct timespec64 *ts, const struct timezone *tz) { return call_int_hook(settime, ts, tz); } /** * security_vm_enough_memory_mm() - Check if allocating a new mem map is allowed * @mm: mm struct * @pages: number of pages * * Check permissions for allocating a new virtual mapping. If all LSMs return * a positive value, __vm_enough_memory() will be called with cap_sys_admin * set. If at least one LSM returns 0 or negative, __vm_enough_memory() will be * called with cap_sys_admin cleared. * * Return: Returns 0 if permission is granted by the LSM infrastructure to the * caller. */ int security_vm_enough_memory_mm(struct mm_struct *mm, long pages) { struct lsm_static_call *scall; int cap_sys_admin = 1; int rc; /* * The module will respond with 0 if it thinks the __vm_enough_memory() * call should be made with the cap_sys_admin set. If all of the modules * agree that it should be set it will. If any module thinks it should * not be set it won't. */ lsm_for_each_hook(scall, vm_enough_memory) { rc = scall->hl->hook.vm_enough_memory(mm, pages); if (rc < 0) { cap_sys_admin = 0; break; } } return __vm_enough_memory(mm, pages, cap_sys_admin); } /** * security_bprm_creds_for_exec() - Prepare the credentials for exec() * @bprm: binary program information * * If the setup in prepare_exec_creds did not setup @bprm->cred->security * properly for executing @bprm->file, update the LSM's portion of * @bprm->cred->security to be what commit_creds needs to install for the new * program. This hook may also optionally check permissions (e.g. for * transitions between security domains). The hook must set @bprm->secureexec * to 1 if AT_SECURE should be set to request libc enable secure mode. @bprm * contains the linux_binprm structure. * * If execveat(2) is called with the AT_EXECVE_CHECK flag, bprm->is_check is * set. The result must be the same as without this flag even if the execution * will never really happen and @bprm will always be dropped. * * This hook must not change current->cred, only @bprm->cred. * * Return: Returns 0 if the hook is successful and permission is granted. */ int security_bprm_creds_for_exec(struct linux_binprm *bprm) { return call_int_hook(bprm_creds_for_exec, bprm); } /** * security_bprm_creds_from_file() - Update linux_binprm creds based on file * @bprm: binary program information * @file: associated file * * If @file is setpcap, suid, sgid or otherwise marked to change privilege upon * exec, update @bprm->cred to reflect that change. This is called after * finding the binary that will be executed without an interpreter. This * ensures that the credentials will not be derived from a script that the * binary will need to reopen, which when reopend may end up being a completely * different file. This hook may also optionally check permissions (e.g. for * transitions between security domains). The hook must set @bprm->secureexec * to 1 if AT_SECURE should be set to request libc enable secure mode. The * hook must add to @bprm->per_clear any personality flags that should be * cleared from current->personality. @bprm contains the linux_binprm * structure. * * Return: Returns 0 if the hook is successful and permission is granted. */ int security_bprm_creds_from_file(struct linux_binprm *bprm, const struct file *file) { return call_int_hook(bprm_creds_from_file, bprm, file); } /** * security_bprm_check() - Mediate binary handler search * @bprm: binary program information * * This hook mediates the point when a search for a binary handler will begin. * It allows a check against the @bprm->cred->security value which was set in * the preceding creds_for_exec call. The argv list and envp list are reliably * available in @bprm. This hook may be called multiple times during a single * execve. @bprm contains the linux_binprm structure. * * Return: Returns 0 if the hook is successful and permission is granted. */ int security_bprm_check(struct linux_binprm *bprm) { return call_int_hook(bprm_check_security, bprm); } /** * security_bprm_committing_creds() - Install creds for a process during exec() * @bprm: binary program information * * Prepare to install the new security attributes of a process being * transformed by an execve operation, based on the old credentials pointed to * by @current->cred and the information set in @bprm->cred by the * bprm_creds_for_exec hook. @bprm points to the linux_binprm structure. This * hook is a good place to perform state changes on the process such as closing * open file descriptors to which access will no longer be granted when the * attributes are changed. This is called immediately before commit_creds(). */ void security_bprm_committing_creds(const struct linux_binprm *bprm) { call_void_hook(bprm_committing_creds, bprm); } /** * security_bprm_committed_creds() - Tidy up after cred install during exec() * @bprm: binary program information * * Tidy up after the installation of the new security attributes of a process * being transformed by an execve operation. The new credentials have, by this * point, been set to @current->cred. @bprm points to the linux_binprm * structure. This hook is a good place to perform state changes on the * process such as clearing out non-inheritable signal state. This is called * immediately after commit_creds(). */ void security_bprm_committed_creds(const struct linux_binprm *bprm) { call_void_hook(bprm_committed_creds, bprm); } /** * security_fs_context_submount() - Initialise fc->security * @fc: new filesystem context * @reference: dentry reference for submount/remount * * Fill out the ->security field for a new fs_context. * * Return: Returns 0 on success or negative error code on failure. */ int security_fs_context_submount(struct fs_context *fc, struct super_block *reference) { return call_int_hook(fs_context_submount, fc, reference); } /** * security_fs_context_dup() - Duplicate a fs_context LSM blob * @fc: destination filesystem context * @src_fc: source filesystem context * * Allocate and attach a security structure to sc->security. This pointer is * initialised to NULL by the caller. @fc indicates the new filesystem context. * @src_fc indicates the original filesystem context. * * Return: Returns 0 on success or a negative error code on failure. */ int security_fs_context_dup(struct fs_context *fc, struct fs_context *src_fc) { return call_int_hook(fs_context_dup, fc, src_fc); } /** * security_fs_context_parse_param() - Configure a filesystem context * @fc: filesystem context * @param: filesystem parameter * * Userspace provided a parameter to configure a superblock. The LSM can * consume the parameter or return it to the caller for use elsewhere. * * Return: If the parameter is used by the LSM it should return 0, if it is * returned to the caller -ENOPARAM is returned, otherwise a negative * error code is returned. */ int security_fs_context_parse_param(struct fs_context *fc, struct fs_parameter *param) { struct lsm_static_call *scall; int trc; int rc = -ENOPARAM; lsm_for_each_hook(scall, fs_context_parse_param) { trc = scall->hl->hook.fs_context_parse_param(fc, param); if (trc == 0) rc = 0; else if (trc != -ENOPARAM) return trc; } return rc; } /** * security_sb_alloc() - Allocate a super_block LSM blob * @sb: filesystem superblock * * Allocate and attach a security structure to the sb->s_security field. The * s_security field is initialized to NULL when the structure is allocated. * @sb contains the super_block structure to be modified. * * Return: Returns 0 if operation was successful. */ int security_sb_alloc(struct super_block *sb) { int rc = lsm_superblock_alloc(sb); if (unlikely(rc)) return rc; rc = call_int_hook(sb_alloc_security, sb); if (unlikely(rc)) security_sb_free(sb); return rc; } /** * security_sb_delete() - Release super_block LSM associated objects * @sb: filesystem superblock * * Release objects tied to a superblock (e.g. inodes). @sb contains the * super_block structure being released. */ void security_sb_delete(struct super_block *sb) { call_void_hook(sb_delete, sb); } /** * security_sb_free() - Free a super_block LSM blob * @sb: filesystem superblock * * Deallocate and clear the sb->s_security field. @sb contains the super_block * structure to be modified. */ void security_sb_free(struct super_block *sb) { call_void_hook(sb_free_security, sb); kfree(sb->s_security); sb->s_security = NULL; } /** * security_free_mnt_opts() - Free memory associated with mount options * @mnt_opts: LSM processed mount options * * Free memory associated with @mnt_ops. */ void security_free_mnt_opts(void **mnt_opts) { if (!*mnt_opts) return; call_void_hook(sb_free_mnt_opts, *mnt_opts); *mnt_opts = NULL; } EXPORT_SYMBOL(security_free_mnt_opts); /** * security_sb_eat_lsm_opts() - Consume LSM mount options * @options: mount options * @mnt_opts: LSM processed mount options * * Eat (scan @options) and save them in @mnt_opts. * * Return: Returns 0 on success, negative values on failure. */ int security_sb_eat_lsm_opts(char *options, void **mnt_opts) { return call_int_hook(sb_eat_lsm_opts, options, mnt_opts); } EXPORT_SYMBOL(security_sb_eat_lsm_opts); /** * security_sb_mnt_opts_compat() - Check if new mount options are allowed * @sb: filesystem superblock * @mnt_opts: new mount options * * Determine if the new mount options in @mnt_opts are allowed given the * existing mounted filesystem at @sb. @sb superblock being compared. * * Return: Returns 0 if options are compatible. */ int security_sb_mnt_opts_compat(struct super_block *sb, void *mnt_opts) { return call_int_hook(sb_mnt_opts_compat, sb, mnt_opts); } EXPORT_SYMBOL(security_sb_mnt_opts_compat); /** * security_sb_remount() - Verify no incompatible mount changes during remount * @sb: filesystem superblock * @mnt_opts: (re)mount options * * Extracts security system specific mount options and verifies no changes are * being made to those options. * * Return: Returns 0 if permission is granted. */ int security_sb_remount(struct super_block *sb, void *mnt_opts) { return call_int_hook(sb_remount, sb, mnt_opts); } EXPORT_SYMBOL(security_sb_remount); /** * security_sb_kern_mount() - Check if a kernel mount is allowed * @sb: filesystem superblock * * Mount this @sb if allowed by permissions. * * Return: Returns 0 if permission is granted. */ int security_sb_kern_mount(const struct super_block *sb) { return call_int_hook(sb_kern_mount, sb); } /** * security_sb_show_options() - Output the mount options for a superblock * @m: output file * @sb: filesystem superblock * * Show (print on @m) mount options for this @sb. * * Return: Returns 0 on success, negative values on failure. */ int security_sb_show_options(struct seq_file *m, struct super_block *sb) { return call_int_hook(sb_show_options, m, sb); } /** * security_sb_statfs() - Check if accessing fs stats is allowed * @dentry: superblock handle * * Check permission before obtaining filesystem statistics for the @mnt * mountpoint. @dentry is a handle on the superblock for the filesystem. * * Return: Returns 0 if permission is granted. */ int security_sb_statfs(struct dentry *dentry) { return call_int_hook(sb_statfs, dentry); } /** * security_sb_mount() - Check permission for mounting a filesystem * @dev_name: filesystem backing device * @path: mount point * @type: filesystem type * @flags: mount flags * @data: filesystem specific data * * Check permission before an object specified by @dev_name is mounted on the * mount point named by @nd. For an ordinary mount, @dev_name identifies a * device if the file system type requires a device. For a remount * (@flags & MS_REMOUNT), @dev_name is irrelevant. For a loopback/bind mount * (@flags & MS_BIND), @dev_name identifies the pathname of the object being * mounted. * * Return: Returns 0 if permission is granted. */ int security_sb_mount(const char *dev_name, const struct path *path, const char *type, unsigned long flags, void *data) { return call_int_hook(sb_mount, dev_name, path, type, flags, data); } /** * security_sb_umount() - Check permission for unmounting a filesystem * @mnt: mounted filesystem * @flags: unmount flags * * Check permission before the @mnt file system is unmounted. * * Return: Returns 0 if permission is granted. */ int security_sb_umount(struct vfsmount *mnt, int flags) { return call_int_hook(sb_umount, mnt, flags); } /** * security_sb_pivotroot() - Check permissions for pivoting the rootfs * @old_path: new location for current rootfs * @new_path: location of the new rootfs * * Check permission before pivoting the root filesystem. * * Return: Returns 0 if permission is granted. */ int security_sb_pivotroot(const struct path *old_path, const struct path *new_path) { return call_int_hook(sb_pivotroot, old_path, new_path); } /** * security_sb_set_mnt_opts() - Set the mount options for a filesystem * @sb: filesystem superblock * @mnt_opts: binary mount options * @kern_flags: kernel flags (in) * @set_kern_flags: kernel flags (out) * * Set the security relevant mount options used for a superblock. * * Return: Returns 0 on success, error on failure. */ int security_sb_set_mnt_opts(struct super_block *sb, void *mnt_opts, unsigned long kern_flags, unsigned long *set_kern_flags) { struct lsm_static_call *scall; int rc = mnt_opts ? -EOPNOTSUPP : LSM_RET_DEFAULT(sb_set_mnt_opts); lsm_for_each_hook(scall, sb_set_mnt_opts) { rc = scall->hl->hook.sb_set_mnt_opts(sb, mnt_opts, kern_flags, set_kern_flags); if (rc != LSM_RET_DEFAULT(sb_set_mnt_opts)) break; } return rc; } EXPORT_SYMBOL(security_sb_set_mnt_opts); /** * security_sb_clone_mnt_opts() - Duplicate superblock mount options * @oldsb: source superblock * @newsb: destination superblock * @kern_flags: kernel flags (in) * @set_kern_flags: kernel flags (out) * * Copy all security options from a given superblock to another. * * Return: Returns 0 on success, error on failure. */ int security_sb_clone_mnt_opts(const struct super_block *oldsb, struct super_block *newsb, unsigned long kern_flags, unsigned long *set_kern_flags) { return call_int_hook(sb_clone_mnt_opts, oldsb, newsb, kern_flags, set_kern_flags); } EXPORT_SYMBOL(security_sb_clone_mnt_opts); /** * security_move_mount() - Check permissions for moving a mount * @from_path: source mount point * @to_path: destination mount point * * Check permission before a mount is moved. * * Return: Returns 0 if permission is granted. */ int security_move_mount(const struct path *from_path, const struct path *to_path) { return call_int_hook(move_mount, from_path, to_path); } /** * security_path_notify() - Check if setting a watch is allowed * @path: file path * @mask: event mask * @obj_type: file path type * * Check permissions before setting a watch on events as defined by @mask, on * an object at @path, whose type is defined by @obj_type. * * Return: Returns 0 if permission is granted. */ int security_path_notify(const struct path *path, u64 mask, unsigned int obj_type) { return call_int_hook(path_notify, path, mask, obj_type); } /** * security_inode_alloc() - Allocate an inode LSM blob * @inode: the inode * @gfp: allocation flags * * Allocate and attach a security structure to @inode->i_security. The * i_security field is initialized to NULL when the inode structure is * allocated. * * Return: Return 0 if operation was successful. */ int security_inode_alloc(struct inode *inode, gfp_t gfp) { int rc = lsm_inode_alloc(inode, gfp); if (unlikely(rc)) return rc; rc = call_int_hook(inode_alloc_security, inode); if (unlikely(rc)) security_inode_free(inode); return rc; } static void inode_free_by_rcu(struct rcu_head *head) { /* The rcu head is at the start of the inode blob */ call_void_hook(inode_free_security_rcu, head); kmem_cache_free(lsm_inode_cache, head); } /** * security_inode_free() - Free an inode's LSM blob * @inode: the inode * * Release any LSM resources associated with @inode, although due to the * inode's RCU protections it is possible that the resources will not be * fully released until after the current RCU grace period has elapsed. * * It is important for LSMs to note that despite being present in a call to * security_inode_free(), @inode may still be referenced in a VFS path walk * and calls to security_inode_permission() may be made during, or after, * a call to security_inode_free(). For this reason the inode->i_security * field is released via a call_rcu() callback and any LSMs which need to * retain inode state for use in security_inode_permission() should only * release that state in the inode_free_security_rcu() LSM hook callback. */ void security_inode_free(struct inode *inode) { call_void_hook(inode_free_security, inode); if (!inode->i_security) return; call_rcu((struct rcu_head *)inode->i_security, inode_free_by_rcu); } /** * security_dentry_init_security() - Perform dentry initialization * @dentry: the dentry to initialize * @mode: mode used to determine resource type * @name: name of the last path component * @xattr_name: name of the security/LSM xattr * @lsmctx: pointer to the resulting LSM context * * Compute a context for a dentry as the inode is not yet available since NFSv4 * has no label backed by an EA anyway. It is important to note that * @xattr_name does not need to be free'd by the caller, it is a static string. * * Return: Returns 0 on success, negative values on failure. */ int security_dentry_init_security(struct dentry *dentry, int mode, const struct qstr *name, const char **xattr_name, struct lsm_context *lsmctx) { return call_int_hook(dentry_init_security, dentry, mode, name, xattr_name, lsmctx); } EXPORT_SYMBOL(security_dentry_init_security); /** * security_dentry_create_files_as() - Perform dentry initialization * @dentry: the dentry to initialize * @mode: mode used to determine resource type * @name: name of the last path component * @old: creds to use for LSM context calculations * @new: creds to modify * * Compute a context for a dentry as the inode is not yet available and set * that context in passed in creds so that new files are created using that * context. Context is calculated using the passed in creds and not the creds * of the caller. * * Return: Returns 0 on success, error on failure. */ int security_dentry_create_files_as(struct dentry *dentry, int mode, struct qstr *name, const struct cred *old, struct cred *new) { return call_int_hook(dentry_create_files_as, dentry, mode, name, old, new); } EXPORT_SYMBOL(security_dentry_create_files_as); /** * security_inode_init_security() - Initialize an inode's LSM context * @inode: the inode * @dir: parent directory * @qstr: last component of the pathname * @initxattrs: callback function to write xattrs * @fs_data: filesystem specific data * * Obtain the security attribute name suffix and value to set on a newly * created inode and set up the incore security field for the new inode. This * hook is called by the fs code as part of the inode creation transaction and * provides for atomic labeling of the inode, unlike the post_create/mkdir/... * hooks called by the VFS. * * The hook function is expected to populate the xattrs array, by calling * lsm_get_xattr_slot() to retrieve the slots reserved by the security module * with the lbs_xattr_count field of the lsm_blob_sizes structure. For each * slot, the hook function should set ->name to the attribute name suffix * (e.g. selinux), to allocate ->value (will be freed by the caller) and set it * to the attribute value, to set ->value_len to the length of the value. If * the security module does not use security attributes or does not wish to put * a security attribute on this particular inode, then it should return * -EOPNOTSUPP to skip this processing. * * Return: Returns 0 if the LSM successfully initialized all of the inode * security attributes that are required, negative values otherwise. */ int security_inode_init_security(struct inode *inode, struct inode *dir, const struct qstr *qstr, const initxattrs initxattrs, void *fs_data) { struct lsm_static_call *scall; struct xattr *new_xattrs = NULL; int ret = -EOPNOTSUPP, xattr_count = 0; if (unlikely(IS_PRIVATE(inode))) return 0; if (!blob_sizes.lbs_xattr_count) return 0; if (initxattrs) { /* Allocate +1 as terminator. */ new_xattrs = kcalloc(blob_sizes.lbs_xattr_count + 1, sizeof(*new_xattrs), GFP_NOFS); if (!new_xattrs) return -ENOMEM; } lsm_for_each_hook(scall, inode_init_security) { ret = scall->hl->hook.inode_init_security(inode, dir, qstr, new_xattrs, &xattr_count); if (ret && ret != -EOPNOTSUPP) goto out; /* * As documented in lsm_hooks.h, -EOPNOTSUPP in this context * means that the LSM is not willing to provide an xattr, not * that it wants to signal an error. Thus, continue to invoke * the remaining LSMs. */ } /* If initxattrs() is NULL, xattr_count is zero, skip the call. */ if (!xattr_count) goto out; ret = initxattrs(inode, new_xattrs, fs_data); out: for (; xattr_count > 0; xattr_count--) kfree(new_xattrs[xattr_count - 1].value); kfree(new_xattrs); return (ret == -EOPNOTSUPP) ? 0 : ret; } EXPORT_SYMBOL(security_inode_init_security); /** * security_inode_init_security_anon() - Initialize an anonymous inode * @inode: the inode * @name: the anonymous inode class * @context_inode: an optional related inode * * Set up the incore security field for the new anonymous inode and return * whether the inode creation is permitted by the security module or not. * * Return: Returns 0 on success, -EACCES if the security module denies the * creation of this inode, or another -errno upon other errors. */ int security_inode_init_security_anon(struct inode *inode, const struct qstr *name, const struct inode *context_inode) { return call_int_hook(inode_init_security_anon, inode, name, context_inode); } #ifdef CONFIG_SECURITY_PATH /** * security_path_mknod() - Check if creating a special file is allowed * @dir: parent directory * @dentry: new file * @mode: new file mode * @dev: device number * * Check permissions when creating a file. Note that this hook is called even * if mknod operation is being done for a regular file. * * Return: Returns 0 if permission is granted. */ int security_path_mknod(const struct path *dir, struct dentry *dentry, umode_t mode, unsigned int dev) { if (unlikely(IS_PRIVATE(d_backing_inode(dir->dentry)))) return 0; return call_int_hook(path_mknod, dir, dentry, mode, dev); } EXPORT_SYMBOL(security_path_mknod); /** * security_path_post_mknod() - Update inode security after reg file creation * @idmap: idmap of the mount * @dentry: new file * * Update inode security field after a regular file has been created. */ void security_path_post_mknod(struct mnt_idmap *idmap, struct dentry *dentry) { if (unlikely(IS_PRIVATE(d_backing_inode(dentry)))) return; call_void_hook(path_post_mknod, idmap, dentry); } /** * security_path_mkdir() - Check if creating a new directory is allowed * @dir: parent directory * @dentry: new directory * @mode: new directory mode * * Check permissions to create a new directory in the existing directory. * * Return: Returns 0 if permission is granted. */ int security_path_mkdir(const struct path *dir, struct dentry *dentry, umode_t mode) { if (unlikely(IS_PRIVATE(d_backing_inode(dir->dentry)))) return 0; return call_int_hook(path_mkdir, dir, dentry, mode); } EXPORT_SYMBOL(security_path_mkdir); /** * security_path_rmdir() - Check if removing a directory is allowed * @dir: parent directory * @dentry: directory to remove * * Check the permission to remove a directory. * * Return: Returns 0 if permission is granted. */ int security_path_rmdir(const struct path *dir, struct dentry *dentry) { if (unlikely(IS_PRIVATE(d_backing_inode(dir->dentry)))) return 0; return call_int_hook(path_rmdir, dir, dentry); } /** * security_path_unlink() - Check if removing a hard link is allowed * @dir: parent directory * @dentry: file * * Check the permission to remove a hard link to a file. * * Return: Returns 0 if permission is granted. */ int security_path_unlink(const struct path *dir, struct dentry *dentry) { if (unlikely(IS_PRIVATE(d_backing_inode(dir->dentry)))) return 0; return call_int_hook(path_unlink, dir, dentry); } EXPORT_SYMBOL(security_path_unlink); /** * security_path_symlink() - Check if creating a symbolic link is allowed * @dir: parent directory * @dentry: symbolic link * @old_name: file pathname * * Check the permission to create a symbolic link to a file. * * Return: Returns 0 if permission is granted. */ int security_path_symlink(const struct path *dir, struct dentry *dentry, const char *old_name) { if (unlikely(IS_PRIVATE(d_backing_inode(dir->dentry)))) return 0; return call_int_hook(path_symlink, dir, dentry, old_name); } /** * security_path_link - Check if creating a hard link is allowed * @old_dentry: existing file * @new_dir: new parent directory * @new_dentry: new link * * Check permission before creating a new hard link to a file. * * Return: Returns 0 if permission is granted. */ int security_path_link(struct dentry *old_dentry, const struct path *new_dir, struct dentry *new_dentry) { if (unlikely(IS_PRIVATE(d_backing_inode(old_dentry)))) return 0; return call_int_hook(path_link, old_dentry, new_dir, new_dentry); } /** * security_path_rename() - Check if renaming a file is allowed * @old_dir: parent directory of the old file * @old_dentry: the old file * @new_dir: parent directory of the new file * @new_dentry: the new file * @flags: flags * * Check for permission to rename a file or directory. * * Return: Returns 0 if permission is granted. */ int security_path_rename(const struct path *old_dir, struct dentry *old_dentry, const struct path *new_dir, struct dentry *new_dentry, unsigned int flags) { if (unlikely(IS_PRIVATE(d_backing_inode(old_dentry)) || (d_is_positive(new_dentry) && IS_PRIVATE(d_backing_inode(new_dentry))))) return 0; return call_int_hook(path_rename, old_dir, old_dentry, new_dir, new_dentry, flags); } EXPORT_SYMBOL(security_path_rename); /** * security_path_truncate() - Check if truncating a file is allowed * @path: file * * Check permission before truncating the file indicated by path. Note that * truncation permissions may also be checked based on already opened files, * using the security_file_truncate() hook. * * Return: Returns 0 if permission is granted. */ int security_path_truncate(const struct path *path) { if (unlikely(IS_PRIVATE(d_backing_inode(path->dentry)))) return 0; return call_int_hook(path_truncate, path); } /** * security_path_chmod() - Check if changing the file's mode is allowed * @path: file * @mode: new mode * * Check for permission to change a mode of the file @path. The new mode is * specified in @mode which is a bitmask of constants from * <include/uapi/linux/stat.h>. * * Return: Returns 0 if permission is granted. */ int security_path_chmod(const struct path *path, umode_t mode) { if (unlikely(IS_PRIVATE(d_backing_inode(path->dentry)))) return 0; return call_int_hook(path_chmod, path, mode); } /** * security_path_chown() - Check if changing the file's owner/group is allowed * @path: file * @uid: file owner * @gid: file group * * Check for permission to change owner/group of a file or directory. * * Return: Returns 0 if permission is granted. */ int security_path_chown(const struct path *path, kuid_t uid, kgid_t gid) { if (unlikely(IS_PRIVATE(d_backing_inode(path->dentry)))) return 0; return call_int_hook(path_chown, path, uid, gid); } /** * security_path_chroot() - Check if changing the root directory is allowed * @path: directory * * Check for permission to change root directory. * * Return: Returns 0 if permission is granted. */ int security_path_chroot(const struct path *path) { return call_int_hook(path_chroot, path); } #endif /* CONFIG_SECURITY_PATH */ /** * security_inode_create() - Check if creating a file is allowed * @dir: the parent directory * @dentry: the file being created * @mode: requested file mode * * Check permission to create a regular file. * * Return: Returns 0 if permission is granted. */ int security_inode_create(struct inode *dir, struct dentry *dentry, umode_t mode) { if (unlikely(IS_PRIVATE(dir))) return 0; return call_int_hook(inode_create, dir, dentry, mode); } EXPORT_SYMBOL_GPL(security_inode_create); /** * security_inode_post_create_tmpfile() - Update inode security of new tmpfile * @idmap: idmap of the mount * @inode: inode of the new tmpfile * * Update inode security data after a tmpfile has been created. */ void security_inode_post_create_tmpfile(struct mnt_idmap *idmap, struct inode *inode) { if (unlikely(IS_PRIVATE(inode))) return; call_void_hook(inode_post_create_tmpfile, idmap, inode); } /** * security_inode_link() - Check if creating a hard link is allowed * @old_dentry: existing file * @dir: new parent directory * @new_dentry: new link * * Check permission before creating a new hard link to a file. * * Return: Returns 0 if permission is granted. */ int security_inode_link(struct dentry *old_dentry, struct inode *dir, struct dentry *new_dentry) { if (unlikely(IS_PRIVATE(d_backing_inode(old_dentry)))) return 0; return call_int_hook(inode_link, old_dentry, dir, new_dentry); } /** * security_inode_unlink() - Check if removing a hard link is allowed * @dir: parent directory * @dentry: file * * Check the permission to remove a hard link to a file. * * Return: Returns 0 if permission is granted. */ int security_inode_unlink(struct inode *dir, struct dentry *dentry) { if (unlikely(IS_PRIVATE(d_backing_inode(dentry)))) return 0; return call_int_hook(inode_unlink, dir, dentry); } /** * security_inode_symlink() - Check if creating a symbolic link is allowed * @dir: parent directory * @dentry: symbolic link * @old_name: existing filename * * Check the permission to create a symbolic link to a file. * * Return: Returns 0 if permission is granted. */ int security_inode_symlink(struct inode *dir, struct dentry *dentry, const char *old_name) { if (unlikely(IS_PRIVATE(dir))) return 0; return call_int_hook(inode_symlink, dir, dentry, old_name); } /** * security_inode_mkdir() - Check if creating a new directory is allowed * @dir: parent directory * @dentry: new directory * @mode: new directory mode * * Check permissions to create a new directory in the existing directory * associated with inode structure @dir. * * Return: Returns 0 if permission is granted. */ int security_inode_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) { if (unlikely(IS_PRIVATE(dir))) return 0; return call_int_hook(inode_mkdir, dir, dentry, mode); } EXPORT_SYMBOL_GPL(security_inode_mkdir); /** * security_inode_rmdir() - Check if removing a directory is allowed * @dir: parent directory * @dentry: directory to be removed * * Check the permission to remove a directory. * * Return: Returns 0 if permission is granted. */ int security_inode_rmdir(struct inode *dir, struct dentry *dentry) { if (unlikely(IS_PRIVATE(d_backing_inode(dentry)))) return 0; return call_int_hook(inode_rmdir, dir, dentry); } /** * security_inode_mknod() - Check if creating a special file is allowed * @dir: parent directory * @dentry: new file * @mode: new file mode * @dev: device number * * Check permissions when creating a special file (or a socket or a fifo file * created via the mknod system call). Note that if mknod operation is being * done for a regular file, then the create hook will be called and not this * hook. * * Return: Returns 0 if permission is granted. */ int security_inode_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev) { if (unlikely(IS_PRIVATE(dir))) return 0; return call_int_hook(inode_mknod, dir, dentry, mode, dev); } /** * security_inode_rename() - Check if renaming a file is allowed * @old_dir: parent directory of the old file * @old_dentry: the old file * @new_dir: parent directory of the new file * @new_dentry: the new file * @flags: flags * * Check for permission to rename a file or directory. * * Return: Returns 0 if permission is granted. */ int security_inode_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry, unsigned int flags) { if (unlikely(IS_PRIVATE(d_backing_inode(old_dentry)) || (d_is_positive(new_dentry) && IS_PRIVATE(d_backing_inode(new_dentry))))) return 0; if (flags & RENAME_EXCHANGE) { int err = call_int_hook(inode_rename, new_dir, new_dentry, old_dir, old_dentry); if (err) return err; } return call_int_hook(inode_rename, old_dir, old_dentry, new_dir, new_dentry); } /** * security_inode_readlink() - Check if reading a symbolic link is allowed * @dentry: link * * Check the permission to read the symbolic link. * * Return: Returns 0 if permission is granted. */ int security_inode_readlink(struct dentry *dentry) { if (unlikely(IS_PRIVATE(d_backing_inode(dentry)))) return 0; return call_int_hook(inode_readlink, dentry); } /** * security_inode_follow_link() - Check if following a symbolic link is allowed * @dentry: link dentry * @inode: link inode * @rcu: true if in RCU-walk mode * * Check permission to follow a symbolic link when looking up a pathname. If * @rcu is true, @inode is not stable. * * Return: Returns 0 if permission is granted. */ int security_inode_follow_link(struct dentry *dentry, struct inode *inode, bool rcu) { if (unlikely(IS_PRIVATE(inode))) return 0; return call_int_hook(inode_follow_link, dentry, inode, rcu); } /** * security_inode_permission() - Check if accessing an inode is allowed * @inode: inode * @mask: access mask * * Check permission before accessing an inode. This hook is called by the * existing Linux permission function, so a security module can use it to * provide additional checking for existing Linux permission checks. Notice * that this hook is called when a file is opened (as well as many other * operations), whereas the file_security_ops permission hook is called when * the actual read/write operations are performed. * * Return: Returns 0 if permission is granted. */ int security_inode_permission(struct inode *inode, int mask) { if (unlikely(IS_PRIVATE(inode))) return 0; return call_int_hook(inode_permission, inode, mask); } /** * security_inode_setattr() - Check if setting file attributes is allowed * @idmap: idmap of the mount * @dentry: file * @attr: new attributes * * Check permission before setting file attributes. Note that the kernel call * to notify_change is performed from several locations, whenever file * attributes change (such as when a file is truncated, chown/chmod operations, * transferring disk quotas, etc). * * Return: Returns 0 if permission is granted. */ int security_inode_setattr(struct mnt_idmap *idmap, struct dentry *dentry, struct iattr *attr) { if (unlikely(IS_PRIVATE(d_backing_inode(dentry)))) return 0; return call_int_hook(inode_setattr, idmap, dentry, attr); } EXPORT_SYMBOL_GPL(security_inode_setattr); /** * security_inode_post_setattr() - Update the inode after a setattr operation * @idmap: idmap of the mount * @dentry: file * @ia_valid: file attributes set * * Update inode security field after successful setting file attributes. */ void security_inode_post_setattr(struct mnt_idmap *idmap, struct dentry *dentry, int ia_valid) { if (unlikely(IS_PRIVATE(d_backing_inode(dentry)))) return; call_void_hook(inode_post_setattr, idmap, dentry, ia_valid); } /** * security_inode_getattr() - Check if getting file attributes is allowed * @path: file * * Check permission before obtaining file attributes. * * Return: Returns 0 if permission is granted. */ int security_inode_getattr(const struct path *path) { if (unlikely(IS_PRIVATE(d_backing_inode(path->dentry)))) return 0; return call_int_hook(inode_getattr, path); } /** * security_inode_setxattr() - Check if setting file xattrs is allowed * @idmap: idmap of the mount * @dentry: file * @name: xattr name * @value: xattr value * @size: size of xattr value * @flags: flags * * This hook performs the desired permission checks before setting the extended * attributes (xattrs) on @dentry. It is important to note that we have some * additional logic before the main LSM implementation calls to detect if we * need to perform an additional capability check at the LSM layer. * * Normally we enforce a capability check prior to executing the various LSM * hook implementations, but if a LSM wants to avoid this capability check, * it can register a 'inode_xattr_skipcap' hook and return a value of 1 for * xattrs that it wants to avoid the capability check, leaving the LSM fully * responsible for enforcing the access control for the specific xattr. If all * of the enabled LSMs refrain from registering a 'inode_xattr_skipcap' hook, * or return a 0 (the default return value), the capability check is still * performed. If no 'inode_xattr_skipcap' hooks are registered the capability * check is performed. * * Return: Returns 0 if permission is granted. */ int security_inode_setxattr(struct mnt_idmap *idmap, struct dentry *dentry, const char *name, const void *value, size_t size, int flags) { int rc; if (unlikely(IS_PRIVATE(d_backing_inode(dentry)))) return 0; /* enforce the capability checks at the lsm layer, if needed */ if (!call_int_hook(inode_xattr_skipcap, name)) { rc = cap_inode_setxattr(dentry, name, value, size, flags); if (rc) return rc; } return call_int_hook(inode_setxattr, idmap, dentry, name, value, size, flags); } /** * security_inode_set_acl() - Check if setting posix acls is allowed * @idmap: idmap of the mount * @dentry: file * @acl_name: acl name * @kacl: acl struct * * Check permission before setting posix acls, the posix acls in @kacl are * identified by @acl_name. * * Return: Returns 0 if permission is granted. */ int security_inode_set_acl(struct mnt_idmap *idmap, struct dentry *dentry, const char *acl_name, struct posix_acl *kacl) { if (unlikely(IS_PRIVATE(d_backing_inode(dentry)))) return 0; return call_int_hook(inode_set_acl, idmap, dentry, acl_name, kacl); } /** * security_inode_post_set_acl() - Update inode security from posix acls set * @dentry: file * @acl_name: acl name * @kacl: acl struct * * Update inode security data after successfully setting posix acls on @dentry. * The posix acls in @kacl are identified by @acl_name. */ void security_inode_post_set_acl(struct dentry *dentry, const char *acl_name, struct posix_acl *kacl) { if (unlikely(IS_PRIVATE(d_backing_inode(dentry)))) return; call_void_hook(inode_post_set_acl, dentry, acl_name, kacl); } /** * security_inode_get_acl() - Check if reading posix acls is allowed * @idmap: idmap of the mount * @dentry: file * @acl_name: acl name * * Check permission before getting osix acls, the posix acls are identified by * @acl_name. * * Return: Returns 0 if permission is granted. */ int security_inode_get_acl(struct mnt_idmap *idmap, struct dentry *dentry, const char *acl_name) { if (unlikely(IS_PRIVATE(d_backing_inode(dentry)))) return 0; return call_int_hook(inode_get_acl, idmap, dentry, acl_name); } /** * security_inode_remove_acl() - Check if removing a posix acl is allowed * @idmap: idmap of the mount * @dentry: file * @acl_name: acl name * * Check permission before removing posix acls, the posix acls are identified * by @acl_name. * * Return: Returns 0 if permission is granted. */ int security_inode_remove_acl(struct mnt_idmap *idmap, struct dentry *dentry, const char *acl_name) { if (unlikely(IS_PRIVATE(d_backing_inode(dentry)))) return 0; return call_int_hook(inode_remove_acl, idmap, dentry, acl_name); } /** * security_inode_post_remove_acl() - Update inode security after rm posix acls * @idmap: idmap of the mount * @dentry: file * @acl_name: acl name * * Update inode security data after successfully removing posix acls on * @dentry in @idmap. The posix acls are identified by @acl_name. */ void security_inode_post_remove_acl(struct mnt_idmap *idmap, struct dentry *dentry, const char *acl_name) { if (unlikely(IS_PRIVATE(d_backing_inode(dentry)))) return; call_void_hook(inode_post_remove_acl, idmap, dentry, acl_name); } /** * security_inode_post_setxattr() - Update the inode after a setxattr operation * @dentry: file * @name: xattr name * @value: xattr value * @size: xattr value size * @flags: flags * * Update inode security field after successful setxattr operation. */ void security_inode_post_setxattr(struct dentry *dentry, const char *name, const void *value, size_t size, int flags) { if (unlikely(IS_PRIVATE(d_backing_inode(dentry)))) return; call_void_hook(inode_post_setxattr, dentry, name, value, size, flags); } /** * security_inode_getxattr() - Check if xattr access is allowed * @dentry: file * @name: xattr name * * Check permission before obtaining the extended attributes identified by * @name for @dentry. * * Return: Returns 0 if permission is granted. */ int security_inode_getxattr(struct dentry *dentry, const char *name) { if (unlikely(IS_PRIVATE(d_backing_inode(dentry)))) return 0; return call_int_hook(inode_getxattr, dentry, name); } /** * security_inode_listxattr() - Check if listing xattrs is allowed * @dentry: file * * Check permission before obtaining the list of extended attribute names for * @dentry. * * Return: Returns 0 if permission is granted. */ int security_inode_listxattr(struct dentry *dentry) { if (unlikely(IS_PRIVATE(d_backing_inode(dentry)))) return 0; return call_int_hook(inode_listxattr, dentry); } /** * security_inode_removexattr() - Check if removing an xattr is allowed * @idmap: idmap of the mount * @dentry: file * @name: xattr name * * This hook performs the desired permission checks before setting the extended * attributes (xattrs) on @dentry. It is important to note that we have some * additional logic before the main LSM implementation calls to detect if we * need to perform an additional capability check at the LSM layer. * * Normally we enforce a capability check prior to executing the various LSM * hook implementations, but if a LSM wants to avoid this capability check, * it can register a 'inode_xattr_skipcap' hook and return a value of 1 for * xattrs that it wants to avoid the capability check, leaving the LSM fully * responsible for enforcing the access control for the specific xattr. If all * of the enabled LSMs refrain from registering a 'inode_xattr_skipcap' hook, * or return a 0 (the default return value), the capability check is still * performed. If no 'inode_xattr_skipcap' hooks are registered the capability * check is performed. * * Return: Returns 0 if permission is granted. */ int security_inode_removexattr(struct mnt_idmap *idmap, struct dentry *dentry, const char *name) { int rc; if (unlikely(IS_PRIVATE(d_backing_inode(dentry)))) return 0; /* enforce the capability checks at the lsm layer, if needed */ if (!call_int_hook(inode_xattr_skipcap, name)) { rc = cap_inode_removexattr(idmap, dentry, name); if (rc) return rc; } return call_int_hook(inode_removexattr, idmap, dentry, name); } /** * security_inode_post_removexattr() - Update the inode after a removexattr op * @dentry: file * @name: xattr name * * Update the inode after a successful removexattr operation. */ void security_inode_post_removexattr(struct dentry *dentry, const char *name) { if (unlikely(IS_PRIVATE(d_backing_inode(dentry)))) return; call_void_hook(inode_post_removexattr, dentry, name); } /** * security_inode_file_setattr() - check if setting fsxattr is allowed * @dentry: file to set filesystem extended attributes on * @fa: extended attributes to set on the inode * * Called when file_setattr() syscall or FS_IOC_FSSETXATTR ioctl() is called on * inode * * Return: Returns 0 if permission is granted. */ int security_inode_file_setattr(struct dentry *dentry, struct file_kattr *fa) { return call_int_hook(inode_file_setattr, dentry, fa); } /** * security_inode_file_getattr() - check if retrieving fsxattr is allowed * @dentry: file to retrieve filesystem extended attributes from * @fa: extended attributes to get * * Called when file_getattr() syscall or FS_IOC_FSGETXATTR ioctl() is called on * inode * * Return: Returns 0 if permission is granted. */ int security_inode_file_getattr(struct dentry *dentry, struct file_kattr *fa) { return call_int_hook(inode_file_getattr, dentry, fa); } /** * security_inode_need_killpriv() - Check if security_inode_killpriv() required * @dentry: associated dentry * * Called when an inode has been changed to determine if * security_inode_killpriv() should be called. * * Return: Return <0 on error to abort the inode change operation, return 0 if * security_inode_killpriv() does not need to be called, return >0 if * security_inode_killpriv() does need to be called. */ int security_inode_need_killpriv(struct dentry *dentry) { return call_int_hook(inode_need_killpriv, dentry); } /** * security_inode_killpriv() - The setuid bit is removed, update LSM state * @idmap: idmap of the mount * @dentry: associated dentry * * The @dentry's setuid bit is being removed. Remove similar security labels. * Called with the dentry->d_inode->i_mutex held. * * Return: Return 0 on success. If error is returned, then the operation * causing setuid bit removal is failed. */ int security_inode_killpriv(struct mnt_idmap *idmap, struct dentry *dentry) { return call_int_hook(inode_killpriv, idmap, dentry); } /** * security_inode_getsecurity() - Get the xattr security label of an inode * @idmap: idmap of the mount * @inode: inode * @name: xattr name * @buffer: security label buffer * @alloc: allocation flag * * Retrieve a copy of the extended attribute representation of the security * label associated with @name for @inode via @buffer. Note that @name is the * remainder of the attribute name after the security prefix has been removed. * @alloc is used to specify if the call should return a value via the buffer * or just the value length. * * Return: Returns size of buffer on success. */ int security_inode_getsecurity(struct mnt_idmap *idmap, struct inode *inode, const char *name, void **buffer, bool alloc) { if (unlikely(IS_PRIVATE(inode))) return LSM_RET_DEFAULT(inode_getsecurity); return call_int_hook(inode_getsecurity, idmap, inode, name, buffer, alloc); } /** * security_inode_setsecurity() - Set the xattr security label of an inode * @inode: inode * @name: xattr name * @value: security label * @size: length of security label * @flags: flags * * Set the security label associated with @name for @inode from the extended * attribute value @value. @size indicates the size of the @value in bytes. * @flags may be XATTR_CREATE, XATTR_REPLACE, or 0. Note that @name is the * remainder of the attribute name after the security. prefix has been removed. * * Return: Returns 0 on success. */ int security_inode_setsecurity(struct inode *inode, const char *name, const void *value, size_t size, int flags) { if (unlikely(IS_PRIVATE(inode))) return LSM_RET_DEFAULT(inode_setsecurity); return call_int_hook(inode_setsecurity, inode, name, value, size, flags); } /** * security_inode_listsecurity() - List the xattr security label names * @inode: inode * @buffer: buffer * @buffer_size: size of buffer * * Copy the extended attribute names for the security labels associated with * @inode into @buffer. The maximum size of @buffer is specified by * @buffer_size. @buffer may be NULL to request the size of the buffer * required. * * Return: Returns number of bytes used/required on success. */ int security_inode_listsecurity(struct inode *inode, char *buffer, size_t buffer_size) { if (unlikely(IS_PRIVATE(inode))) return 0; return call_int_hook(inode_listsecurity, inode, buffer, buffer_size); } EXPORT_SYMBOL(security_inode_listsecurity); /** * security_inode_getlsmprop() - Get an inode's LSM data * @inode: inode * @prop: lsm specific information to return * * Get the lsm specific information associated with the node. */ void security_inode_getlsmprop(struct inode *inode, struct lsm_prop *prop) { call_void_hook(inode_getlsmprop, inode, prop); } /** * security_inode_copy_up() - Create new creds for an overlayfs copy-up op * @src: union dentry of copy-up file * @new: newly created creds * * A file is about to be copied up from lower layer to upper layer of overlay * filesystem. Security module can prepare a set of new creds and modify as * need be and return new creds. Caller will switch to new creds temporarily to * create new file and release newly allocated creds. * * Return: Returns 0 on success or a negative error code on error. */ int security_inode_copy_up(struct dentry *src, struct cred **new) { return call_int_hook(inode_copy_up, src, new); } EXPORT_SYMBOL(security_inode_copy_up); /** * security_inode_copy_up_xattr() - Filter xattrs in an overlayfs copy-up op * @src: union dentry of copy-up file * @name: xattr name * * Filter the xattrs being copied up when a unioned file is copied up from a * lower layer to the union/overlay layer. The caller is responsible for * reading and writing the xattrs, this hook is merely a filter. * * Return: Returns 0 to accept the xattr, -ECANCELED to discard the xattr, * -EOPNOTSUPP if the security module does not know about attribute, * or a negative error code to abort the copy up. */ int security_inode_copy_up_xattr(struct dentry *src, const char *name) { int rc; rc = call_int_hook(inode_copy_up_xattr, src, name); if (rc != LSM_RET_DEFAULT(inode_copy_up_xattr)) return rc; return LSM_RET_DEFAULT(inode_copy_up_xattr); } EXPORT_SYMBOL(security_inode_copy_up_xattr); /** * security_inode_setintegrity() - Set the inode's integrity data * @inode: inode * @type: type of integrity, e.g. hash digest, signature, etc * @value: the integrity value * @size: size of the integrity value * * Register a verified integrity measurement of a inode with LSMs. * LSMs should free the previously saved data if @value is NULL. * * Return: Returns 0 on success, negative values on failure. */ int security_inode_setintegrity(const struct inode *inode, enum lsm_integrity_type type, const void *value, size_t size) { return call_int_hook(inode_setintegrity, inode, type, value, size); } EXPORT_SYMBOL(security_inode_setintegrity); /** * security_kernfs_init_security() - Init LSM context for a kernfs node * @kn_dir: parent kernfs node * @kn: the kernfs node to initialize * * Initialize the security context of a newly created kernfs node based on its * own and its parent's attributes. * * Return: Returns 0 if permission is granted. */ int security_kernfs_init_security(struct kernfs_node *kn_dir, struct kernfs_node *kn) { return call_int_hook(kernfs_init_security, kn_dir, kn); } /** * security_file_permission() - Check file permissions * @file: file * @mask: requested permissions * * Check file permissions before accessing an open file. This hook is called * by various operations that read or write files. A security module can use * this hook to perform additional checking on these operations, e.g. to * revalidate permissions on use to support privilege bracketing or policy * changes. Notice that this hook is used when the actual read/write * operations are performed, whereas the inode_security_ops hook is called when * a file is opened (as well as many other operations). Although this hook can * be used to revalidate permissions for various system call operations that * read or write files, it does not address the revalidation of permissions for * memory-mapped files. Security modules must handle this separately if they * need such revalidation. * * Return: Returns 0 if permission is granted. */ int security_file_permission(struct file *file, int mask) { return call_int_hook(file_permission, file, mask); } /** * security_file_alloc() - Allocate and init a file's LSM blob * @file: the file * * Allocate and attach a security structure to the file->f_security field. The * security field is initialized to NULL when the structure is first created. * * Return: Return 0 if the hook is successful and permission is granted. */ int security_file_alloc(struct file *file) { int rc = lsm_file_alloc(file); if (rc) return rc; rc = call_int_hook(file_alloc_security, file); if (unlikely(rc)) security_file_free(file); return rc; } /** * security_file_release() - Perform actions before releasing the file ref * @file: the file * * Perform actions before releasing the last reference to a file. */ void security_file_release(struct file *file) { call_void_hook(file_release, file); } /** * security_file_free() - Free a file's LSM blob * @file: the file * * Deallocate and free any security structures stored in file->f_security. */ void security_file_free(struct file *file) { void *blob; call_void_hook(file_free_security, file); blob = file->f_security; if (blob) { file->f_security = NULL; kmem_cache_free(lsm_file_cache, blob); } } /** * security_file_ioctl() - Check if an ioctl is allowed * @file: associated file * @cmd: ioctl cmd * @arg: ioctl arguments * * Check permission for an ioctl operation on @file. Note that @arg sometimes * represents a user space pointer; in other cases, it may be a simple integer * value. When @arg represents a user space pointer, it should never be used * by the security module. * * Return: Returns 0 if permission is granted. */ int security_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { return call_int_hook(file_ioctl, file, cmd, arg); } EXPORT_SYMBOL_GPL(security_file_ioctl); /** * security_file_ioctl_compat() - Check if an ioctl is allowed in compat mode * @file: associated file * @cmd: ioctl cmd * @arg: ioctl arguments * * Compat version of security_file_ioctl() that correctly handles 32-bit * processes running on 64-bit kernels. * * Return: Returns 0 if permission is granted. */ int security_file_ioctl_compat(struct file *file, unsigned int cmd, unsigned long arg) { return call_int_hook(file_ioctl_compat, file, cmd, arg); } EXPORT_SYMBOL_GPL(security_file_ioctl_compat); static inline unsigned long mmap_prot(struct file *file, unsigned long prot) { /* * Does we have PROT_READ and does the application expect * it to imply PROT_EXEC? If not, nothing to talk about... */ if ((prot & (PROT_READ | PROT_EXEC)) != PROT_READ) return prot; if (!(current->personality & READ_IMPLIES_EXEC)) return prot; /* * if that's an anonymous mapping, let it. */ if (!file) return prot | PROT_EXEC; /* * ditto if it's not on noexec mount, except that on !MMU we need * NOMMU_MAP_EXEC (== VM_MAYEXEC) in this case */ if (!path_noexec(&file->f_path)) { #ifndef CONFIG_MMU if (file->f_op->mmap_capabilities) { unsigned caps = file->f_op->mmap_capabilities(file); if (!(caps & NOMMU_MAP_EXEC)) return prot; } #endif return prot | PROT_EXEC; } /* anything on noexec mount won't get PROT_EXEC */ return prot; } /** * security_mmap_file() - Check if mmap'ing a file is allowed * @file: file * @prot: protection applied by the kernel * @flags: flags * * Check permissions for a mmap operation. The @file may be NULL, e.g. if * mapping anonymous memory. * * Return: Returns 0 if permission is granted. */ int security_mmap_file(struct file *file, unsigned long prot, unsigned long flags) { return call_int_hook(mmap_file, file, prot, mmap_prot(file, prot), flags); } /** * security_mmap_addr() - Check if mmap'ing an address is allowed * @addr: address * * Check permissions for a mmap operation at @addr. * * Return: Returns 0 if permission is granted. */ int security_mmap_addr(unsigned long addr) { return call_int_hook(mmap_addr, addr); } /** * security_file_mprotect() - Check if changing memory protections is allowed * @vma: memory region * @reqprot: application requested protection * @prot: protection applied by the kernel * * Check permissions before changing memory access permissions. * * Return: Returns 0 if permission is granted. */ int security_file_mprotect(struct vm_area_struct *vma, unsigned long reqprot, unsigned long prot) { return call_int_hook(file_mprotect, vma, reqprot, prot); } /** * security_file_lock() - Check if a file lock is allowed * @file: file * @cmd: lock operation (e.g. F_RDLCK, F_WRLCK) * * Check permission before performing file locking operations. Note the hook * mediates both flock and fcntl style locks. * * Return: Returns 0 if permission is granted. */ int security_file_lock(struct file *file, unsigned int cmd) { return call_int_hook(file_lock, file, cmd); } /** * security_file_fcntl() - Check if fcntl() op is allowed * @file: file * @cmd: fcntl command * @arg: command argument * * Check permission before allowing the file operation specified by @cmd from * being performed on the file @file. Note that @arg sometimes represents a * user space pointer; in other cases, it may be a simple integer value. When * @arg represents a user space pointer, it should never be used by the * security module. * * Return: Returns 0 if permission is granted. */ int security_file_fcntl(struct file *file, unsigned int cmd, unsigned long arg) { return call_int_hook(file_fcntl, file, cmd, arg); } /** * security_file_set_fowner() - Set the file owner info in the LSM blob * @file: the file * * Save owner security information (typically from current->security) in * file->f_security for later use by the send_sigiotask hook. * * This hook is called with file->f_owner.lock held. * * Return: Returns 0 on success. */ void security_file_set_fowner(struct file *file) { call_void_hook(file_set_fowner, file); } /** * security_file_send_sigiotask() - Check if sending SIGIO/SIGURG is allowed * @tsk: target task * @fown: signal sender * @sig: signal to be sent, SIGIO is sent if 0 * * Check permission for the file owner @fown to send SIGIO or SIGURG to the * process @tsk. Note that this hook is sometimes called from interrupt. Note * that the fown_struct, @fown, is never outside the context of a struct file, * so the file structure (and associated security information) can always be * obtained: container_of(fown, struct file, f_owner). * * Return: Returns 0 if permission is granted. */ int security_file_send_sigiotask(struct task_struct *tsk, struct fown_struct *fown, int sig) { return call_int_hook(file_send_sigiotask, tsk, fown, sig); } /** * security_file_receive() - Check if receiving a file via IPC is allowed * @file: file being received * * This hook allows security modules to control the ability of a process to * receive an open file descriptor via socket IPC. * * Return: Returns 0 if permission is granted. */ int security_file_receive(struct file *file) { return call_int_hook(file_receive, file); } /** * security_file_open() - Save open() time state for late use by the LSM * @file: * * Save open-time permission checking state for later use upon file_permission, * and recheck access if anything has changed since inode_permission. * * We can check if a file is opened for execution (e.g. execve(2) call), either * directly or indirectly (e.g. ELF's ld.so) by checking file->f_flags & * __FMODE_EXEC . * * Return: Returns 0 if permission is granted. */ int security_file_open(struct file *file) { return call_int_hook(file_open, file); } /** * security_file_post_open() - Evaluate a file after it has been opened * @file: the file * @mask: access mask * * Evaluate an opened file and the access mask requested with open(). The hook * is useful for LSMs that require the file content to be available in order to * make decisions. * * Return: Returns 0 if permission is granted. */ int security_file_post_open(struct file *file, int mask) { return call_int_hook(file_post_open, file, mask); } EXPORT_SYMBOL_GPL(security_file_post_open); /** * security_file_truncate() - Check if truncating a file is allowed * @file: file * * Check permission before truncating a file, i.e. using ftruncate. Note that * truncation permission may also be checked based on the path, using the * @path_truncate hook. * * Return: Returns 0 if permission is granted. */ int security_file_truncate(struct file *file) { return call_int_hook(file_truncate, file); } /** * security_task_alloc() - Allocate a task's LSM blob * @task: the task * @clone_flags: flags indicating what is being shared * * Handle allocation of task-related resources. * * Return: Returns a zero on success, negative values on failure. */ int security_task_alloc(struct task_struct *task, unsigned long clone_flags) { int rc = lsm_task_alloc(task); if (rc) return rc; rc = call_int_hook(task_alloc, task, clone_flags); if (unlikely(rc)) security_task_free(task); return rc; } /** * security_task_free() - Free a task's LSM blob and related resources * @task: task * * Handle release of task-related resources. Note that this can be called from * interrupt context. */ void security_task_free(struct task_struct *task) { call_void_hook(task_free, task); kfree(task->security); task->security = NULL; } /** * security_cred_alloc_blank() - Allocate the min memory to allow cred_transfer * @cred: credentials * @gfp: gfp flags * * Only allocate sufficient memory and attach to @cred such that * cred_transfer() will not get ENOMEM. * * Return: Returns 0 on success, negative values on failure. */ int security_cred_alloc_blank(struct cred *cred, gfp_t gfp) { int rc = lsm_cred_alloc(cred, gfp); if (rc) return rc; rc = call_int_hook(cred_alloc_blank, cred, gfp); if (unlikely(rc)) security_cred_free(cred); return rc; } /** * security_cred_free() - Free the cred's LSM blob and associated resources * @cred: credentials * * Deallocate and clear the cred->security field in a set of credentials. */ void security_cred_free(struct cred *cred) { /* * There is a failure case in prepare_creds() that * may result in a call here with ->security being NULL. */ if (unlikely(cred->security == NULL)) return; call_void_hook(cred_free, cred); kfree(cred->security); cred->security = NULL; } /** * security_prepare_creds() - Prepare a new set of credentials * @new: new credentials * @old: original credentials * @gfp: gfp flags * * Prepare a new set of credentials by copying the data from the old set. * * Return: Returns 0 on success, negative values on failure. */ int security_prepare_creds(struct cred *new, const struct cred *old, gfp_t gfp) { int rc = lsm_cred_alloc(new, gfp); if (rc) return rc; rc = call_int_hook(cred_prepare, new, old, gfp); if (unlikely(rc)) security_cred_free(new); return rc; } /** * security_transfer_creds() - Transfer creds * @new: target credentials * @old: original credentials * * Transfer data from original creds to new creds. */ void security_transfer_creds(struct cred *new, const struct cred *old) { call_void_hook(cred_transfer, new, old); } /** * security_cred_getsecid() - Get the secid from a set of credentials * @c: credentials * @secid: secid value * * Retrieve the security identifier of the cred structure @c. In case of * failure, @secid will be set to zero. */ void security_cred_getsecid(const struct cred *c, u32 *secid) { *secid = 0; call_void_hook(cred_getsecid, c, secid); } EXPORT_SYMBOL(security_cred_getsecid); /** * security_cred_getlsmprop() - Get the LSM data from a set of credentials * @c: credentials * @prop: destination for the LSM data * * Retrieve the security data of the cred structure @c. In case of * failure, @prop will be cleared. */ void security_cred_getlsmprop(const struct cred *c, struct lsm_prop *prop) { lsmprop_init(prop); call_void_hook(cred_getlsmprop, c, prop); } EXPORT_SYMBOL(security_cred_getlsmprop); /** * security_kernel_act_as() - Set the kernel credentials to act as secid * @new: credentials * @secid: secid * * Set the credentials for a kernel service to act as (subjective context). * The current task must be the one that nominated @secid. * * Return: Returns 0 if successful. */ int security_kernel_act_as(struct cred *new, u32 secid) { return call_int_hook(kernel_act_as, new, secid); } /** * security_kernel_create_files_as() - Set file creation context using an inode * @new: target credentials * @inode: reference inode * * Set the file creation context in a set of credentials to be the same as the * objective context of the specified inode. The current task must be the one * that nominated @inode. * * Return: Returns 0 if successful. */ int security_kernel_create_files_as(struct cred *new, struct inode *inode) { return call_int_hook(kernel_create_files_as, new, inode); } /** * security_kernel_module_request() - Check if loading a module is allowed * @kmod_name: module name * * Ability to trigger the kernel to automatically upcall to userspace for * userspace to load a kernel module with the given name. * * Return: Returns 0 if successful. */ int security_kernel_module_request(char *kmod_name) { return call_int_hook(kernel_module_request, kmod_name); } /** * security_kernel_read_file() - Read a file specified by userspace * @file: file * @id: file identifier * @contents: trust if security_kernel_post_read_file() will be called * * Read a file specified by userspace. * * Return: Returns 0 if permission is granted. */ int security_kernel_read_file(struct file *file, enum kernel_read_file_id id, bool contents) { return call_int_hook(kernel_read_file, file, id, contents); } EXPORT_SYMBOL_GPL(security_kernel_read_file); /** * security_kernel_post_read_file() - Read a file specified by userspace * @file: file * @buf: file contents * @size: size of file contents * @id: file identifier * * Read a file specified by userspace. This must be paired with a prior call * to security_kernel_read_file() call that indicated this hook would also be * called, see security_kernel_read_file() for more information. * * Return: Returns 0 if permission is granted. */ int security_kernel_post_read_file(struct file *file, char *buf, loff_t size, enum kernel_read_file_id id) { return call_int_hook(kernel_post_read_file, file, buf, size, id); } EXPORT_SYMBOL_GPL(security_kernel_post_read_file); /** * security_kernel_load_data() - Load data provided by userspace * @id: data identifier * @contents: true if security_kernel_post_load_data() will be called * * Load data provided by userspace. * * Return: Returns 0 if permission is granted. */ int security_kernel_load_data(enum kernel_load_data_id id, bool contents) { return call_int_hook(kernel_load_data, id, contents); } EXPORT_SYMBOL_GPL(security_kernel_load_data); /** * security_kernel_post_load_data() - Load userspace data from a non-file source * @buf: data * @size: size of data * @id: data identifier * @description: text description of data, specific to the id value * * Load data provided by a non-file source (usually userspace buffer). This * must be paired with a prior security_kernel_load_data() call that indicated * this hook would also be called, see security_kernel_load_data() for more * information. * * Return: Returns 0 if permission is granted. */ int security_kernel_post_load_data(char *buf, loff_t size, enum kernel_load_data_id id, char *description) { return call_int_hook(kernel_post_load_data, buf, size, id, description); } EXPORT_SYMBOL_GPL(security_kernel_post_load_data); /** * security_task_fix_setuid() - Update LSM with new user id attributes * @new: updated credentials * @old: credentials being replaced * @flags: LSM_SETID_* flag values * * Update the module's state after setting one or more of the user identity * attributes of the current process. The @flags parameter indicates which of * the set*uid system calls invoked this hook. If @new is the set of * credentials that will be installed. Modifications should be made to this * rather than to @current->cred. * * Return: Returns 0 on success. */ int security_task_fix_setuid(struct cred *new, const struct cred *old, int flags) { return call_int_hook(task_fix_setuid, new, old, flags); } /** * security_task_fix_setgid() - Update LSM with new group id attributes * @new: updated credentials * @old: credentials being replaced * @flags: LSM_SETID_* flag value * * Update the module's state after setting one or more of the group identity * attributes of the current process. The @flags parameter indicates which of * the set*gid system calls invoked this hook. @new is the set of credentials * that will be installed. Modifications should be made to this rather than to * @current->cred. * * Return: Returns 0 on success. */ int security_task_fix_setgid(struct cred *new, const struct cred *old, int flags) { return call_int_hook(task_fix_setgid, new, old, flags); } /** * security_task_fix_setgroups() - Update LSM with new supplementary groups * @new: updated credentials * @old: credentials being replaced * * Update the module's state after setting the supplementary group identity * attributes of the current process. @new is the set of credentials that will * be installed. Modifications should be made to this rather than to * @current->cred. * * Return: Returns 0 on success. */ int security_task_fix_setgroups(struct cred *new, const struct cred *old) { return call_int_hook(task_fix_setgroups, new, old); } /** * security_task_setpgid() - Check if setting the pgid is allowed * @p: task being modified * @pgid: new pgid * * Check permission before setting the process group identifier of the process * @p to @pgid. * * Return: Returns 0 if permission is granted. */ int security_task_setpgid(struct task_struct *p, pid_t pgid) { return call_int_hook(task_setpgid, p, pgid); } /** * security_task_getpgid() - Check if getting the pgid is allowed * @p: task * * Check permission before getting the process group identifier of the process * @p. * * Return: Returns 0 if permission is granted. */ int security_task_getpgid(struct task_struct *p) { return call_int_hook(task_getpgid, p); } /** * security_task_getsid() - Check if getting the session id is allowed * @p: task * * Check permission before getting the session identifier of the process @p. * * Return: Returns 0 if permission is granted. */ int security_task_getsid(struct task_struct *p) { return call_int_hook(task_getsid, p); } /** * security_current_getlsmprop_subj() - Current task's subjective LSM data * @prop: lsm specific information * * Retrieve the subjective security identifier of the current task and return * it in @prop. */ void security_current_getlsmprop_subj(struct lsm_prop *prop) { lsmprop_init(prop); call_void_hook(current_getlsmprop_subj, prop); } EXPORT_SYMBOL(security_current_getlsmprop_subj); /** * security_task_getlsmprop_obj() - Get a task's objective LSM data * @p: target task * @prop: lsm specific information * * Retrieve the objective security identifier of the task_struct in @p and * return it in @prop. */ void security_task_getlsmprop_obj(struct task_struct *p, struct lsm_prop *prop) { lsmprop_init(prop); call_void_hook(task_getlsmprop_obj, p, prop); } EXPORT_SYMBOL(security_task_getlsmprop_obj); /** * security_task_setnice() - Check if setting a task's nice value is allowed * @p: target task * @nice: nice value * * Check permission before setting the nice value of @p to @nice. * * Return: Returns 0 if permission is granted. */ int security_task_setnice(struct task_struct *p, int nice) { return call_int_hook(task_setnice, p, nice); } /** * security_task_setioprio() - Check if setting a task's ioprio is allowed * @p: target task * @ioprio: ioprio value * * Check permission before setting the ioprio value of @p to @ioprio. * * Return: Returns 0 if permission is granted. */ int security_task_setioprio(struct task_struct *p, int ioprio) { return call_int_hook(task_setioprio, p, ioprio); } /** * security_task_getioprio() - Check if getting a task's ioprio is allowed * @p: task * * Check permission before getting the ioprio value of @p. * * Return: Returns 0 if permission is granted. */ int security_task_getioprio(struct task_struct *p) { return call_int_hook(task_getioprio, p); } /** * security_task_prlimit() - Check if get/setting resources limits is allowed * @cred: current task credentials * @tcred: target task credentials * @flags: LSM_PRLIMIT_* flag bits indicating a get/set/both * * Check permission before getting and/or setting the resource limits of * another task. * * Return: Returns 0 if permission is granted. */ int security_task_prlimit(const struct cred *cred, const struct cred *tcred, unsigned int flags) { return call_int_hook(task_prlimit, cred, tcred, flags); } /** * security_task_setrlimit() - Check if setting a new rlimit value is allowed * @p: target task's group leader * @resource: resource whose limit is being set * @new_rlim: new resource limit * * Check permission before setting the resource limits of process @p for * @resource to @new_rlim. The old resource limit values can be examined by * dereferencing (p->signal->rlim + resource). * * Return: Returns 0 if permission is granted. */ int security_task_setrlimit(struct task_struct *p, unsigned int resource, struct rlimit *new_rlim) { return call_int_hook(task_setrlimit, p, resource, new_rlim); } /** * security_task_setscheduler() - Check if setting sched policy/param is allowed * @p: target task * * Check permission before setting scheduling policy and/or parameters of * process @p. * * Return: Returns 0 if permission is granted. */ int security_task_setscheduler(struct task_struct *p) { return call_int_hook(task_setscheduler, p); } /** * security_task_getscheduler() - Check if getting scheduling info is allowed * @p: target task * * Check permission before obtaining scheduling information for process @p. * * Return: Returns 0 if permission is granted. */ int security_task_getscheduler(struct task_struct *p) { return call_int_hook(task_getscheduler, p); } /** * security_task_movememory() - Check if moving memory is allowed * @p: task * * Check permission before moving memory owned by process @p. * * Return: Returns 0 if permission is granted. */ int security_task_movememory(struct task_struct *p) { return call_int_hook(task_movememory, p); } /** * security_task_kill() - Check if sending a signal is allowed * @p: target process * @info: signal information * @sig: signal value * @cred: credentials of the signal sender, NULL if @current * * Check permission before sending signal @sig to @p. @info can be NULL, the * constant 1, or a pointer to a kernel_siginfo structure. If @info is 1 or * SI_FROMKERNEL(info) is true, then the signal should be viewed as coming from * the kernel and should typically be permitted. SIGIO signals are handled * separately by the send_sigiotask hook in file_security_ops. * * Return: Returns 0 if permission is granted. */ int security_task_kill(struct task_struct *p, struct kernel_siginfo *info, int sig, const struct cred *cred) { return call_int_hook(task_kill, p, info, sig, cred); } /** * security_task_prctl() - Check if a prctl op is allowed * @option: operation * @arg2: argument * @arg3: argument * @arg4: argument * @arg5: argument * * Check permission before performing a process control operation on the * current process. * * Return: Return -ENOSYS if no-one wanted to handle this op, any other value * to cause prctl() to return immediately with that value. */ int security_task_prctl(int option, unsigned long arg2, unsigned long arg3, unsigned long arg4, unsigned long arg5) { int thisrc; int rc = LSM_RET_DEFAULT(task_prctl); struct lsm_static_call *scall; lsm_for_each_hook(scall, task_prctl) { thisrc = scall->hl->hook.task_prctl(option, arg2, arg3, arg4, arg5); if (thisrc != LSM_RET_DEFAULT(task_prctl)) { rc = thisrc; if (thisrc != 0) break; } } return rc; } /** * security_task_to_inode() - Set the security attributes of a task's inode * @p: task * @inode: inode * * Set the security attributes for an inode based on an associated task's * security attributes, e.g. for /proc/pid inodes. */ void security_task_to_inode(struct task_struct *p, struct inode *inode) { call_void_hook(task_to_inode, p, inode); } /** * security_create_user_ns() - Check if creating a new userns is allowed * @cred: prepared creds * * Check permission prior to creating a new user namespace. * * Return: Returns 0 if successful, otherwise < 0 error code. */ int security_create_user_ns(const struct cred *cred) { return call_int_hook(userns_create, cred); } /** * security_ipc_permission() - Check if sysv ipc access is allowed * @ipcp: ipc permission structure * @flag: requested permissions * * Check permissions for access to IPC. * * Return: Returns 0 if permission is granted. */ int security_ipc_permission(struct kern_ipc_perm *ipcp, short flag) { return call_int_hook(ipc_permission, ipcp, flag); } /** * security_ipc_getlsmprop() - Get the sysv ipc object LSM data * @ipcp: ipc permission structure * @prop: pointer to lsm information * * Get the lsm information associated with the ipc object. */ void security_ipc_getlsmprop(struct kern_ipc_perm *ipcp, struct lsm_prop *prop) { lsmprop_init(prop); call_void_hook(ipc_getlsmprop, ipcp, prop); } /** * security_msg_msg_alloc() - Allocate a sysv ipc message LSM blob * @msg: message structure * * Allocate and attach a security structure to the msg->security field. The * security field is initialized to NULL when the structure is first created. * * Return: Return 0 if operation was successful and permission is granted. */ int security_msg_msg_alloc(struct msg_msg *msg) { int rc = lsm_msg_msg_alloc(msg); if (unlikely(rc)) return rc; rc = call_int_hook(msg_msg_alloc_security, msg); if (unlikely(rc)) security_msg_msg_free(msg); return rc; } /** * security_msg_msg_free() - Free a sysv ipc message LSM blob * @msg: message structure * * Deallocate the security structure for this message. */ void security_msg_msg_free(struct msg_msg *msg) { call_void_hook(msg_msg_free_security, msg); kfree(msg->security); msg->security = NULL; } /** * security_msg_queue_alloc() - Allocate a sysv ipc msg queue LSM blob * @msq: sysv ipc permission structure * * Allocate and attach a security structure to @msg. The security field is * initialized to NULL when the structure is first created. * * Return: Returns 0 if operation was successful and permission is granted. */ int security_msg_queue_alloc(struct kern_ipc_perm *msq) { int rc = lsm_ipc_alloc(msq); if (unlikely(rc)) return rc; rc = call_int_hook(msg_queue_alloc_security, msq); if (unlikely(rc)) security_msg_queue_free(msq); return rc; } /** * security_msg_queue_free() - Free a sysv ipc msg queue LSM blob * @msq: sysv ipc permission structure * * Deallocate security field @perm->security for the message queue. */ void security_msg_queue_free(struct kern_ipc_perm *msq) { call_void_hook(msg_queue_free_security, msq); kfree(msq->security); msq->security = NULL; } /** * security_msg_queue_associate() - Check if a msg queue operation is allowed * @msq: sysv ipc permission structure * @msqflg: operation flags * * Check permission when a message queue is requested through the msgget system * call. This hook is only called when returning the message queue identifier * for an existing message queue, not when a new message queue is created. * * Return: Return 0 if permission is granted. */ int security_msg_queue_associate(struct kern_ipc_perm *msq, int msqflg) { return call_int_hook(msg_queue_associate, msq, msqflg); } /** * security_msg_queue_msgctl() - Check if a msg queue operation is allowed * @msq: sysv ipc permission structure * @cmd: operation * * Check permission when a message control operation specified by @cmd is to be * performed on the message queue with permissions. * * Return: Returns 0 if permission is granted. */ int security_msg_queue_msgctl(struct kern_ipc_perm *msq, int cmd) { return call_int_hook(msg_queue_msgctl, msq, cmd); } /** * security_msg_queue_msgsnd() - Check if sending a sysv ipc message is allowed * @msq: sysv ipc permission structure * @msg: message * @msqflg: operation flags * * Check permission before a message, @msg, is enqueued on the message queue * with permissions specified in @msq. * * Return: Returns 0 if permission is granted. */ int security_msg_queue_msgsnd(struct kern_ipc_perm *msq, struct msg_msg *msg, int msqflg) { return call_int_hook(msg_queue_msgsnd, msq, msg, msqflg); } /** * security_msg_queue_msgrcv() - Check if receiving a sysv ipc msg is allowed * @msq: sysv ipc permission structure * @msg: message * @target: target task * @type: type of message requested * @mode: operation flags * * Check permission before a message, @msg, is removed from the message queue. * The @target task structure contains a pointer to the process that will be * receiving the message (not equal to the current process when inline receives * are being performed). * * Return: Returns 0 if permission is granted. */ int security_msg_queue_msgrcv(struct kern_ipc_perm *msq, struct msg_msg *msg, struct task_struct *target, long type, int mode) { return call_int_hook(msg_queue_msgrcv, msq, msg, target, type, mode); } /** * security_shm_alloc() - Allocate a sysv shm LSM blob * @shp: sysv ipc permission structure * * Allocate and attach a security structure to the @shp security field. The * security field is initialized to NULL when the structure is first created. * * Return: Returns 0 if operation was successful and permission is granted. */ int security_shm_alloc(struct kern_ipc_perm *shp) { int rc = lsm_ipc_alloc(shp); if (unlikely(rc)) return rc; rc = call_int_hook(shm_alloc_security, shp); if (unlikely(rc)) security_shm_free(shp); return rc; } /** * security_shm_free() - Free a sysv shm LSM blob * @shp: sysv ipc permission structure * * Deallocate the security structure @perm->security for the memory segment. */ void security_shm_free(struct kern_ipc_perm *shp) { call_void_hook(shm_free_security, shp); kfree(shp->security); shp->security = NULL; } /** * security_shm_associate() - Check if a sysv shm operation is allowed * @shp: sysv ipc permission structure * @shmflg: operation flags * * Check permission when a shared memory region is requested through the shmget * system call. This hook is only called when returning the shared memory * region identifier for an existing region, not when a new shared memory * region is created. * * Return: Returns 0 if permission is granted. */ int security_shm_associate(struct kern_ipc_perm *shp, int shmflg) { return call_int_hook(shm_associate, shp, shmflg); } /** * security_shm_shmctl() - Check if a sysv shm operation is allowed * @shp: sysv ipc permission structure * @cmd: operation * * Check permission when a shared memory control operation specified by @cmd is * to be performed on the shared memory region with permissions in @shp. * * Return: Return 0 if permission is granted. */ int security_shm_shmctl(struct kern_ipc_perm *shp, int cmd) { return call_int_hook(shm_shmctl, shp, cmd); } /** * security_shm_shmat() - Check if a sysv shm attach operation is allowed * @shp: sysv ipc permission structure * @shmaddr: address of memory region to attach * @shmflg: operation flags * * Check permissions prior to allowing the shmat system call to attach the * shared memory segment with permissions @shp to the data segment of the * calling process. The attaching address is specified by @shmaddr. * * Return: Returns 0 if permission is granted. */ int security_shm_shmat(struct kern_ipc_perm *shp, char __user *shmaddr, int shmflg) { return call_int_hook(shm_shmat, shp, shmaddr, shmflg); } /** * security_sem_alloc() - Allocate a sysv semaphore LSM blob * @sma: sysv ipc permission structure * * Allocate and attach a security structure to the @sma security field. The * security field is initialized to NULL when the structure is first created. * * Return: Returns 0 if operation was successful and permission is granted. */ int security_sem_alloc(struct kern_ipc_perm *sma) { int rc = lsm_ipc_alloc(sma); if (unlikely(rc)) return rc; rc = call_int_hook(sem_alloc_security, sma); if (unlikely(rc)) security_sem_free(sma); return rc; } /** * security_sem_free() - Free a sysv semaphore LSM blob * @sma: sysv ipc permission structure * * Deallocate security structure @sma->security for the semaphore. */ void security_sem_free(struct kern_ipc_perm *sma) { call_void_hook(sem_free_security, sma); kfree(sma->security); sma->security = NULL; } /** * security_sem_associate() - Check if a sysv semaphore operation is allowed * @sma: sysv ipc permission structure * @semflg: operation flags * * Check permission when a semaphore is requested through the semget system * call. This hook is only called when returning the semaphore identifier for * an existing semaphore, not when a new one must be created. * * Return: Returns 0 if permission is granted. */ int security_sem_associate(struct kern_ipc_perm *sma, int semflg) { return call_int_hook(sem_associate, sma, semflg); } /** * security_sem_semctl() - Check if a sysv semaphore operation is allowed * @sma: sysv ipc permission structure * @cmd: operation * * Check permission when a semaphore operation specified by @cmd is to be * performed on the semaphore. * * Return: Returns 0 if permission is granted. */ int security_sem_semctl(struct kern_ipc_perm *sma, int cmd) { return call_int_hook(sem_semctl, sma, cmd); } /** * security_sem_semop() - Check if a sysv semaphore operation is allowed * @sma: sysv ipc permission structure * @sops: operations to perform * @nsops: number of operations * @alter: flag indicating changes will be made * * Check permissions before performing operations on members of the semaphore * set. If the @alter flag is nonzero, the semaphore set may be modified. * * Return: Returns 0 if permission is granted. */ int security_sem_semop(struct kern_ipc_perm *sma, struct sembuf *sops, unsigned nsops, int alter) { return call_int_hook(sem_semop, sma, sops, nsops, alter); } /** * security_d_instantiate() - Populate an inode's LSM state based on a dentry * @dentry: dentry * @inode: inode * * Fill in @inode security information for a @dentry if allowed. */ void security_d_instantiate(struct dentry *dentry, struct inode *inode) { if (unlikely(inode && IS_PRIVATE(inode))) return; call_void_hook(d_instantiate, dentry, inode); } EXPORT_SYMBOL(security_d_instantiate); /* * Please keep this in sync with it's counterpart in security/lsm_syscalls.c */ /** * security_getselfattr - Read an LSM attribute of the current process. * @attr: which attribute to return * @uctx: the user-space destination for the information, or NULL * @size: pointer to the size of space available to receive the data * @flags: special handling options. LSM_FLAG_SINGLE indicates that only * attributes associated with the LSM identified in the passed @ctx be * reported. * * A NULL value for @uctx can be used to get both the number of attributes * and the size of the data. * * Returns the number of attributes found on success, negative value * on error. @size is reset to the total size of the data. * If @size is insufficient to contain the data -E2BIG is returned. */ int security_getselfattr(unsigned int attr, struct lsm_ctx __user *uctx, u32 __user *size, u32 flags) { struct lsm_static_call *scall; struct lsm_ctx lctx = { .id = LSM_ID_UNDEF, }; u8 __user *base = (u8 __user *)uctx; u32 entrysize; u32 total = 0; u32 left; bool toobig = false; bool single = false; int count = 0; int rc; if (attr == LSM_ATTR_UNDEF) return -EINVAL; if (size == NULL) return -EINVAL; if (get_user(left, size)) return -EFAULT; if (flags) { /* * Only flag supported is LSM_FLAG_SINGLE */ if (flags != LSM_FLAG_SINGLE || !uctx) return -EINVAL; if (copy_from_user(&lctx, uctx, sizeof(lctx))) return -EFAULT; /* * If the LSM ID isn't specified it is an error. */ if (lctx.id == LSM_ID_UNDEF) return -EINVAL; single = true; } /* * In the usual case gather all the data from the LSMs. * In the single case only get the data from the LSM specified. */ lsm_for_each_hook(scall, getselfattr) { if (single && lctx.id != scall->hl->lsmid->id) continue; entrysize = left; if (base) uctx = (struct lsm_ctx __user *)(base + total); rc = scall->hl->hook.getselfattr(attr, uctx, &entrysize, flags); if (rc == -EOPNOTSUPP) continue; if (rc == -E2BIG) { rc = 0; left = 0; toobig = true; } else if (rc < 0) return rc; else left -= entrysize; total += entrysize; count += rc; if (single) break; } if (put_user(total, size)) return -EFAULT; if (toobig) return -E2BIG; if (count == 0) return LSM_RET_DEFAULT(getselfattr); return count; } /* * Please keep this in sync with it's counterpart in security/lsm_syscalls.c */ /** * security_setselfattr - Set an LSM attribute on the current process. * @attr: which attribute to set * @uctx: the user-space source for the information * @size: the size of the data * @flags: reserved for future use, must be 0 * * Set an LSM attribute for the current process. The LSM, attribute * and new value are included in @uctx. * * Returns 0 on success, -EINVAL if the input is inconsistent, -EFAULT * if the user buffer is inaccessible, E2BIG if size is too big, or an * LSM specific failure. */ int security_setselfattr(unsigned int attr, struct lsm_ctx __user *uctx, u32 size, u32 flags) { struct lsm_static_call *scall; struct lsm_ctx *lctx; int rc = LSM_RET_DEFAULT(setselfattr); u64 required_len; if (flags) return -EINVAL; if (size < sizeof(*lctx)) return -EINVAL; if (size > PAGE_SIZE) return -E2BIG; lctx = memdup_user(uctx, size); if (IS_ERR(lctx)) return PTR_ERR(lctx); if (size < lctx->len || check_add_overflow(sizeof(*lctx), lctx->ctx_len, &required_len) || lctx->len < required_len) { rc = -EINVAL; goto free_out; } lsm_for_each_hook(scall, setselfattr) if ((scall->hl->lsmid->id) == lctx->id) { rc = scall->hl->hook.setselfattr(attr, lctx, size, flags); break; } free_out: kfree(lctx); return rc; } /** * security_getprocattr() - Read an attribute for a task * @p: the task * @lsmid: LSM identification * @name: attribute name * @value: attribute value * * Read attribute @name for task @p and store it into @value if allowed. * * Return: Returns the length of @value on success, a negative value otherwise. */ int security_getprocattr(struct task_struct *p, int lsmid, const char *name, char **value) { struct lsm_static_call *scall; lsm_for_each_hook(scall, getprocattr) { if (lsmid != 0 && lsmid != scall->hl->lsmid->id) continue; return scall->hl->hook.getprocattr(p, name, value); } return LSM_RET_DEFAULT(getprocattr); } /** * security_setprocattr() - Set an attribute for a task * @lsmid: LSM identification * @name: attribute name * @value: attribute value * @size: attribute value size * * Write (set) the current task's attribute @name to @value, size @size if * allowed. * * Return: Returns bytes written on success, a negative value otherwise. */ int security_setprocattr(int lsmid, const char *name, void *value, size_t size) { struct lsm_static_call *scall; lsm_for_each_hook(scall, setprocattr) { if (lsmid != 0 && lsmid != scall->hl->lsmid->id) continue; return scall->hl->hook.setprocattr(name, value, size); } return LSM_RET_DEFAULT(setprocattr); } /** * security_ismaclabel() - Check if the named attribute is a MAC label * @name: full extended attribute name * * Check if the extended attribute specified by @name represents a MAC label. * * Return: Returns 1 if name is a MAC attribute otherwise returns 0. */ int security_ismaclabel(const char *name) { return call_int_hook(ismaclabel, name); } EXPORT_SYMBOL(security_ismaclabel); /** * security_secid_to_secctx() - Convert a secid to a secctx * @secid: secid * @cp: the LSM context * * Convert secid to security context. If @cp is NULL the length of the * result will be returned, but no data will be returned. This * does mean that the length could change between calls to check the length and * the next call which actually allocates and returns the data. * * Return: Return length of data on success, error on failure. */ int security_secid_to_secctx(u32 secid, struct lsm_context *cp) { return call_int_hook(secid_to_secctx, secid, cp); } EXPORT_SYMBOL(security_secid_to_secctx); /** * security_lsmprop_to_secctx() - Convert a lsm_prop to a secctx * @prop: lsm specific information * @cp: the LSM context * * Convert a @prop entry to security context. If @cp is NULL the * length of the result will be returned. This does mean that the * length could change between calls to check the length and the * next call which actually allocates and returns the @cp. * * Return: Return length of data on success, error on failure. */ int security_lsmprop_to_secctx(struct lsm_prop *prop, struct lsm_context *cp) { return call_int_hook(lsmprop_to_secctx, prop, cp); } EXPORT_SYMBOL(security_lsmprop_to_secctx); /** * security_secctx_to_secid() - Convert a secctx to a secid * @secdata: secctx * @seclen: length of secctx * @secid: secid * * Convert security context to secid. * * Return: Returns 0 on success, error on failure. */ int security_secctx_to_secid(const char *secdata, u32 seclen, u32 *secid) { *secid = 0; return call_int_hook(secctx_to_secid, secdata, seclen, secid); } EXPORT_SYMBOL(security_secctx_to_secid); /** * security_release_secctx() - Free a secctx buffer * @cp: the security context * * Release the security context. */ void security_release_secctx(struct lsm_context *cp) { call_void_hook(release_secctx, cp); memset(cp, 0, sizeof(*cp)); } EXPORT_SYMBOL(security_release_secctx); /** * security_inode_invalidate_secctx() - Invalidate an inode's security label * @inode: inode * * Notify the security module that it must revalidate the security context of * an inode. */ void security_inode_invalidate_secctx(struct inode *inode) { call_void_hook(inode_invalidate_secctx, inode); } EXPORT_SYMBOL(security_inode_invalidate_secctx); /** * security_inode_notifysecctx() - Notify the LSM of an inode's security label * @inode: inode * @ctx: secctx * @ctxlen: length of secctx * * Notify the security module of what the security context of an inode should * be. Initializes the incore security context managed by the security module * for this inode. Example usage: NFS client invokes this hook to initialize * the security context in its incore inode to the value provided by the server * for the file when the server returned the file's attributes to the client. * Must be called with inode->i_mutex locked. * * Return: Returns 0 on success, error on failure. */ int security_inode_notifysecctx(struct inode *inode, void *ctx, u32 ctxlen) { return call_int_hook(inode_notifysecctx, inode, ctx, ctxlen); } EXPORT_SYMBOL(security_inode_notifysecctx); /** * security_inode_setsecctx() - Change the security label of an inode * @dentry: inode * @ctx: secctx * @ctxlen: length of secctx * * Change the security context of an inode. Updates the incore security * context managed by the security module and invokes the fs code as needed * (via __vfs_setxattr_noperm) to update any backing xattrs that represent the * context. Example usage: NFS server invokes this hook to change the security * context in its incore inode and on the backing filesystem to a value * provided by the client on a SETATTR operation. Must be called with * inode->i_mutex locked. * * Return: Returns 0 on success, error on failure. */ int security_inode_setsecctx(struct dentry *dentry, void *ctx, u32 ctxlen) { return call_int_hook(inode_setsecctx, dentry, ctx, ctxlen); } EXPORT_SYMBOL(security_inode_setsecctx); /** * security_inode_getsecctx() - Get the security label of an inode * @inode: inode * @cp: security context * * On success, returns 0 and fills out @cp with the security context * for the given @inode. * * Return: Returns 0 on success, error on failure. */ int security_inode_getsecctx(struct inode *inode, struct lsm_context *cp) { memset(cp, 0, sizeof(*cp)); return call_int_hook(inode_getsecctx, inode, cp); } EXPORT_SYMBOL(security_inode_getsecctx); #ifdef CONFIG_WATCH_QUEUE /** * security_post_notification() - Check if a watch notification can be posted * @w_cred: credentials of the task that set the watch * @cred: credentials of the task which triggered the watch * @n: the notification * * Check to see if a watch notification can be posted to a particular queue. * * Return: Returns 0 if permission is granted. */ int security_post_notification(const struct cred *w_cred, const struct cred *cred, struct watch_notification *n) { return call_int_hook(post_notification, w_cred, cred, n); } #endif /* CONFIG_WATCH_QUEUE */ #ifdef CONFIG_KEY_NOTIFICATIONS /** * security_watch_key() - Check if a task is allowed to watch for key events * @key: the key to watch * * Check to see if a process is allowed to watch for event notifications from * a key or keyring. * * Return: Returns 0 if permission is granted. */ int security_watch_key(struct key *key) { return call_int_hook(watch_key, key); } #endif /* CONFIG_KEY_NOTIFICATIONS */ #ifdef CONFIG_SECURITY_NETWORK /** * security_netlink_send() - Save info and check if netlink sending is allowed * @sk: sending socket * @skb: netlink message * * Save security information for a netlink message so that permission checking * can be performed when the message is processed. The security information * can be saved using the eff_cap field of the netlink_skb_parms structure. * Also may be used to provide fine grained control over message transmission. * * Return: Returns 0 if the information was successfully saved and message is * allowed to be transmitted. */ int security_netlink_send(struct sock *sk, struct sk_buff *skb) { return call_int_hook(netlink_send, sk, skb); } /** * security_unix_stream_connect() - Check if a AF_UNIX stream is allowed * @sock: originating sock * @other: peer sock * @newsk: new sock * * Check permissions before establishing a Unix domain stream connection * between @sock and @other. * * The @unix_stream_connect and @unix_may_send hooks were necessary because * Linux provides an alternative to the conventional file name space for Unix * domain sockets. Whereas binding and connecting to sockets in the file name * space is mediated by the typical file permissions (and caught by the mknod * and permission hooks in inode_security_ops), binding and connecting to * sockets in the abstract name space is completely unmediated. Sufficient * control of Unix domain sockets in the abstract name space isn't possible * using only the socket layer hooks, since we need to know the actual target * socket, which is not looked up until we are inside the af_unix code. * * Return: Returns 0 if permission is granted. */ int security_unix_stream_connect(struct sock *sock, struct sock *other, struct sock *newsk) { return call_int_hook(unix_stream_connect, sock, other, newsk); } EXPORT_SYMBOL(security_unix_stream_connect); /** * security_unix_may_send() - Check if AF_UNIX socket can send datagrams * @sock: originating sock * @other: peer sock * * Check permissions before connecting or sending datagrams from @sock to * @other. * * The @unix_stream_connect and @unix_may_send hooks were necessary because * Linux provides an alternative to the conventional file name space for Unix * domain sockets. Whereas binding and connecting to sockets in the file name * space is mediated by the typical file permissions (and caught by the mknod * and permission hooks in inode_security_ops), binding and connecting to * sockets in the abstract name space is completely unmediated. Sufficient * control of Unix domain sockets in the abstract name space isn't possible * using only the socket layer hooks, since we need to know the actual target * socket, which is not looked up until we are inside the af_unix code. * * Return: Returns 0 if permission is granted. */ int security_unix_may_send(struct socket *sock, struct socket *other) { return call_int_hook(unix_may_send, sock, other); } EXPORT_SYMBOL(security_unix_may_send); /** * security_socket_create() - Check if creating a new socket is allowed * @family: protocol family * @type: communications type * @protocol: requested protocol * @kern: set to 1 if a kernel socket is requested * * Check permissions prior to creating a new socket. * * Return: Returns 0 if permission is granted. */ int security_socket_create(int family, int type, int protocol, int kern) { return call_int_hook(socket_create, family, type, protocol, kern); } /** * security_socket_post_create() - Initialize a newly created socket * @sock: socket * @family: protocol family * @type: communications type * @protocol: requested protocol * @kern: set to 1 if a kernel socket is requested * * This hook allows a module to update or allocate a per-socket security * structure. Note that the security field was not added directly to the socket * structure, but rather, the socket security information is stored in the * associated inode. Typically, the inode alloc_security hook will allocate * and attach security information to SOCK_INODE(sock)->i_security. This hook * may be used to update the SOCK_INODE(sock)->i_security field with additional * information that wasn't available when the inode was allocated. * * Return: Returns 0 if permission is granted. */ int security_socket_post_create(struct socket *sock, int family, int type, int protocol, int kern) { return call_int_hook(socket_post_create, sock, family, type, protocol, kern); } /** * security_socket_socketpair() - Check if creating a socketpair is allowed * @socka: first socket * @sockb: second socket * * Check permissions before creating a fresh pair of sockets. * * Return: Returns 0 if permission is granted and the connection was * established. */ int security_socket_socketpair(struct socket *socka, struct socket *sockb) { return call_int_hook(socket_socketpair, socka, sockb); } EXPORT_SYMBOL(security_socket_socketpair); /** * security_socket_bind() - Check if a socket bind operation is allowed * @sock: socket * @address: requested bind address * @addrlen: length of address * * Check permission before socket protocol layer bind operation is performed * and the socket @sock is bound to the address specified in the @address * parameter. * * Return: Returns 0 if permission is granted. */ int security_socket_bind(struct socket *sock, struct sockaddr *address, int addrlen) { return call_int_hook(socket_bind, sock, address, addrlen); } /** * security_socket_connect() - Check if a socket connect operation is allowed * @sock: socket * @address: address of remote connection point * @addrlen: length of address * * Check permission before socket protocol layer connect operation attempts to * connect socket @sock to a remote address, @address. * * Return: Returns 0 if permission is granted. */ int security_socket_connect(struct socket *sock, struct sockaddr *address, int addrlen) { return call_int_hook(socket_connect, sock, address, addrlen); } /** * security_socket_listen() - Check if a socket is allowed to listen * @sock: socket * @backlog: connection queue size * * Check permission before socket protocol layer listen operation. * * Return: Returns 0 if permission is granted. */ int security_socket_listen(struct socket *sock, int backlog) { return call_int_hook(socket_listen, sock, backlog); } /** * security_socket_accept() - Check if a socket is allowed to accept connections * @sock: listening socket * @newsock: newly creation connection socket * * Check permission before accepting a new connection. Note that the new * socket, @newsock, has been created and some information copied to it, but * the accept operation has not actually been performed. * * Return: Returns 0 if permission is granted. */ int security_socket_accept(struct socket *sock, struct socket *newsock) { return call_int_hook(socket_accept, sock, newsock); } /** * security_socket_sendmsg() - Check if sending a message is allowed * @sock: sending socket * @msg: message to send * @size: size of message * * Check permission before transmitting a message to another socket. * * Return: Returns 0 if permission is granted. */ int security_socket_sendmsg(struct socket *sock, struct msghdr *msg, int size) { return call_int_hook(socket_sendmsg, sock, msg, size); } /** * security_socket_recvmsg() - Check if receiving a message is allowed * @sock: receiving socket * @msg: message to receive * @size: size of message * @flags: operational flags * * Check permission before receiving a message from a socket. * * Return: Returns 0 if permission is granted. */ int security_socket_recvmsg(struct socket *sock, struct msghdr *msg, int size, int flags) { return call_int_hook(socket_recvmsg, sock, msg, size, flags); } /** * security_socket_getsockname() - Check if reading the socket addr is allowed * @sock: socket * * Check permission before reading the local address (name) of the socket * object. * * Return: Returns 0 if permission is granted. */ int security_socket_getsockname(struct socket *sock) { return call_int_hook(socket_getsockname, sock); } /** * security_socket_getpeername() - Check if reading the peer's addr is allowed * @sock: socket * * Check permission before the remote address (name) of a socket object. * * Return: Returns 0 if permission is granted. */ int security_socket_getpeername(struct socket *sock) { return call_int_hook(socket_getpeername, sock); } /** * security_socket_getsockopt() - Check if reading a socket option is allowed * @sock: socket * @level: option's protocol level * @optname: option name * * Check permissions before retrieving the options associated with socket * @sock. * * Return: Returns 0 if permission is granted. */ int security_socket_getsockopt(struct socket *sock, int level, int optname) { return call_int_hook(socket_getsockopt, sock, level, optname); } /** * security_socket_setsockopt() - Check if setting a socket option is allowed * @sock: socket * @level: option's protocol level * @optname: option name * * Check permissions before setting the options associated with socket @sock. * * Return: Returns 0 if permission is granted. */ int security_socket_setsockopt(struct socket *sock, int level, int optname) { return call_int_hook(socket_setsockopt, sock, level, optname); } /** * security_socket_shutdown() - Checks if shutting down the socket is allowed * @sock: socket * @how: flag indicating how sends and receives are handled * * Checks permission before all or part of a connection on the socket @sock is * shut down. * * Return: Returns 0 if permission is granted. */ int security_socket_shutdown(struct socket *sock, int how) { return call_int_hook(socket_shutdown, sock, how); } /** * security_sock_rcv_skb() - Check if an incoming network packet is allowed * @sk: destination sock * @skb: incoming packet * * Check permissions on incoming network packets. This hook is distinct from * Netfilter's IP input hooks since it is the first time that the incoming * sk_buff @skb has been associated with a particular socket, @sk. Must not * sleep inside this hook because some callers hold spinlocks. * * Return: Returns 0 if permission is granted. */ int security_sock_rcv_skb(struct sock *sk, struct sk_buff *skb) { return call_int_hook(socket_sock_rcv_skb, sk, skb); } EXPORT_SYMBOL(security_sock_rcv_skb); /** * security_socket_getpeersec_stream() - Get the remote peer label * @sock: socket * @optval: destination buffer * @optlen: size of peer label copied into the buffer * @len: maximum size of the destination buffer * * This hook allows the security module to provide peer socket security state * for unix or connected tcp sockets to userspace via getsockopt SO_GETPEERSEC. * For tcp sockets this can be meaningful if the socket is associated with an * ipsec SA. * * Return: Returns 0 if all is well, otherwise, typical getsockopt return * values. */ int security_socket_getpeersec_stream(struct socket *sock, sockptr_t optval, sockptr_t optlen, unsigned int len) { return call_int_hook(socket_getpeersec_stream, sock, optval, optlen, len); } /** * security_socket_getpeersec_dgram() - Get the remote peer label * @sock: socket * @skb: datagram packet * @secid: remote peer label secid * * This hook allows the security module to provide peer socket security state * for udp sockets on a per-packet basis to userspace via getsockopt * SO_GETPEERSEC. The application must first have indicated the IP_PASSSEC * option via getsockopt. It can then retrieve the security state returned by * this hook for a packet via the SCM_SECURITY ancillary message type. * * Return: Returns 0 on success, error on failure. */ int security_socket_getpeersec_dgram(struct socket *sock, struct sk_buff *skb, u32 *secid) { return call_int_hook(socket_getpeersec_dgram, sock, skb, secid); } EXPORT_SYMBOL(security_socket_getpeersec_dgram); /** * lsm_sock_alloc - allocate a composite sock blob * @sock: the sock that needs a blob * @gfp: allocation mode * * Allocate the sock blob for all the modules * * Returns 0, or -ENOMEM if memory can't be allocated. */ static int lsm_sock_alloc(struct sock *sock, gfp_t gfp) { return lsm_blob_alloc(&sock->sk_security, blob_sizes.lbs_sock, gfp); } /** * security_sk_alloc() - Allocate and initialize a sock's LSM blob * @sk: sock * @family: protocol family * @priority: gfp flags * * Allocate and attach a security structure to the sk->sk_security field, which * is used to copy security attributes between local stream sockets. * * Return: Returns 0 on success, error on failure. */ int security_sk_alloc(struct sock *sk, int family, gfp_t priority) { int rc = lsm_sock_alloc(sk, priority); if (unlikely(rc)) return rc; rc = call_int_hook(sk_alloc_security, sk, family, priority); if (unlikely(rc)) security_sk_free(sk); return rc; } /** * security_sk_free() - Free the sock's LSM blob * @sk: sock * * Deallocate security structure. */ void security_sk_free(struct sock *sk) { call_void_hook(sk_free_security, sk); kfree(sk->sk_security); sk->sk_security = NULL; } /** * security_sk_clone() - Clone a sock's LSM state * @sk: original sock * @newsk: target sock * * Clone/copy security structure. */ void security_sk_clone(const struct sock *sk, struct sock *newsk) { call_void_hook(sk_clone_security, sk, newsk); } EXPORT_SYMBOL(security_sk_clone); /** * security_sk_classify_flow() - Set a flow's secid based on socket * @sk: original socket * @flic: target flow * * Set the target flow's secid to socket's secid. */ void security_sk_classify_flow(const struct sock *sk, struct flowi_common *flic) { call_void_hook(sk_getsecid, sk, &flic->flowic_secid); } EXPORT_SYMBOL(security_sk_classify_flow); /** * security_req_classify_flow() - Set a flow's secid based on request_sock * @req: request_sock * @flic: target flow * * Sets @flic's secid to @req's secid. */ void security_req_classify_flow(const struct request_sock *req, struct flowi_common *flic) { call_void_hook(req_classify_flow, req, flic); } EXPORT_SYMBOL(security_req_classify_flow); /** * security_sock_graft() - Reconcile LSM state when grafting a sock on a socket * @sk: sock being grafted * @parent: target parent socket * * Sets @parent's inode secid to @sk's secid and update @sk with any necessary * LSM state from @parent. */ void security_sock_graft(struct sock *sk, struct socket *parent) { call_void_hook(sock_graft, sk, parent); } EXPORT_SYMBOL(security_sock_graft); /** * security_inet_conn_request() - Set request_sock state using incoming connect * @sk: parent listening sock * @skb: incoming connection * @req: new request_sock * * Initialize the @req LSM state based on @sk and the incoming connect in @skb. * * Return: Returns 0 if permission is granted. */ int security_inet_conn_request(const struct sock *sk, struct sk_buff *skb, struct request_sock *req) { return call_int_hook(inet_conn_request, sk, skb, req); } EXPORT_SYMBOL(security_inet_conn_request); /** * security_inet_csk_clone() - Set new sock LSM state based on request_sock * @newsk: new sock * @req: connection request_sock * * Set that LSM state of @sock using the LSM state from @req. */ void security_inet_csk_clone(struct sock *newsk, const struct request_sock *req) { call_void_hook(inet_csk_clone, newsk, req); } /** * security_inet_conn_established() - Update sock's LSM state with connection * @sk: sock * @skb: connection packet * * Update @sock's LSM state to represent a new connection from @skb. */ void security_inet_conn_established(struct sock *sk, struct sk_buff *skb) { call_void_hook(inet_conn_established, sk, skb); } EXPORT_SYMBOL(security_inet_conn_established); /** * security_secmark_relabel_packet() - Check if setting a secmark is allowed * @secid: new secmark value * * Check if the process should be allowed to relabel packets to @secid. * * Return: Returns 0 if permission is granted. */ int security_secmark_relabel_packet(u32 secid) { return call_int_hook(secmark_relabel_packet, secid); } EXPORT_SYMBOL(security_secmark_relabel_packet); /** * security_secmark_refcount_inc() - Increment the secmark labeling rule count * * Tells the LSM to increment the number of secmark labeling rules loaded. */ void security_secmark_refcount_inc(void) { call_void_hook(secmark_refcount_inc); } EXPORT_SYMBOL(security_secmark_refcount_inc); /** * security_secmark_refcount_dec() - Decrement the secmark labeling rule count * * Tells the LSM to decrement the number of secmark labeling rules loaded. */ void security_secmark_refcount_dec(void) { call_void_hook(secmark_refcount_dec); } EXPORT_SYMBOL(security_secmark_refcount_dec); /** * security_tun_dev_alloc_security() - Allocate a LSM blob for a TUN device * @security: pointer to the LSM blob * * This hook allows a module to allocate a security structure for a TUN device, * returning the pointer in @security. * * Return: Returns a zero on success, negative values on failure. */ int security_tun_dev_alloc_security(void **security) { int rc; rc = lsm_blob_alloc(security, blob_sizes.lbs_tun_dev, GFP_KERNEL); if (rc) return rc; rc = call_int_hook(tun_dev_alloc_security, *security); if (rc) { kfree(*security); *security = NULL; } return rc; } EXPORT_SYMBOL(security_tun_dev_alloc_security); /** * security_tun_dev_free_security() - Free a TUN device LSM blob * @security: LSM blob * * This hook allows a module to free the security structure for a TUN device. */ void security_tun_dev_free_security(void *security) { kfree(security); } EXPORT_SYMBOL(security_tun_dev_free_security); /** * security_tun_dev_create() - Check if creating a TUN device is allowed * * Check permissions prior to creating a new TUN device. * * Return: Returns 0 if permission is granted. */ int security_tun_dev_create(void) { return call_int_hook(tun_dev_create); } EXPORT_SYMBOL(security_tun_dev_create); /** * security_tun_dev_attach_queue() - Check if attaching a TUN queue is allowed * @security: TUN device LSM blob * * Check permissions prior to attaching to a TUN device queue. * * Return: Returns 0 if permission is granted. */ int security_tun_dev_attach_queue(void *security) { return call_int_hook(tun_dev_attach_queue, security); } EXPORT_SYMBOL(security_tun_dev_attach_queue); /** * security_tun_dev_attach() - Update TUN device LSM state on attach * @sk: associated sock * @security: TUN device LSM blob * * This hook can be used by the module to update any security state associated * with the TUN device's sock structure. * * Return: Returns 0 if permission is granted. */ int security_tun_dev_attach(struct sock *sk, void *security) { return call_int_hook(tun_dev_attach, sk, security); } EXPORT_SYMBOL(security_tun_dev_attach); /** * security_tun_dev_open() - Update TUN device LSM state on open * @security: TUN device LSM blob * * This hook can be used by the module to update any security state associated * with the TUN device's security structure. * * Return: Returns 0 if permission is granted. */ int security_tun_dev_open(void *security) { return call_int_hook(tun_dev_open, security); } EXPORT_SYMBOL(security_tun_dev_open); /** * security_sctp_assoc_request() - Update the LSM on a SCTP association req * @asoc: SCTP association * @skb: packet requesting the association * * Passes the @asoc and @chunk->skb of the association INIT packet to the LSM. * * Return: Returns 0 on success, error on failure. */ int security_sctp_assoc_request(struct sctp_association *asoc, struct sk_buff *skb) { return call_int_hook(sctp_assoc_request, asoc, skb); } EXPORT_SYMBOL(security_sctp_assoc_request); /** * security_sctp_bind_connect() - Validate a list of addrs for a SCTP option * @sk: socket * @optname: SCTP option to validate * @address: list of IP addresses to validate * @addrlen: length of the address list * * Validiate permissions required for each address associated with sock @sk. * Depending on @optname, the addresses will be treated as either a connect or * bind service. The @addrlen is calculated on each IPv4 and IPv6 address using * sizeof(struct sockaddr_in) or sizeof(struct sockaddr_in6). * * Return: Returns 0 on success, error on failure. */ int security_sctp_bind_connect(struct sock *sk, int optname, struct sockaddr *address, int addrlen) { return call_int_hook(sctp_bind_connect, sk, optname, address, addrlen); } EXPORT_SYMBOL(security_sctp_bind_connect); /** * security_sctp_sk_clone() - Clone a SCTP sock's LSM state * @asoc: SCTP association * @sk: original sock * @newsk: target sock * * Called whenever a new socket is created by accept(2) (i.e. a TCP style * socket) or when a socket is 'peeled off' e.g userspace calls * sctp_peeloff(3). */ void security_sctp_sk_clone(struct sctp_association *asoc, struct sock *sk, struct sock *newsk) { call_void_hook(sctp_sk_clone, asoc, sk, newsk); } EXPORT_SYMBOL(security_sctp_sk_clone); /** * security_sctp_assoc_established() - Update LSM state when assoc established * @asoc: SCTP association * @skb: packet establishing the association * * Passes the @asoc and @chunk->skb of the association COOKIE_ACK packet to the * security module. * * Return: Returns 0 if permission is granted. */ int security_sctp_assoc_established(struct sctp_association *asoc, struct sk_buff *skb) { return call_int_hook(sctp_assoc_established, asoc, skb); } EXPORT_SYMBOL(security_sctp_assoc_established); /** * security_mptcp_add_subflow() - Inherit the LSM label from the MPTCP socket * @sk: the owning MPTCP socket * @ssk: the new subflow * * Update the labeling for the given MPTCP subflow, to match the one of the * owning MPTCP socket. This hook has to be called after the socket creation and * initialization via the security_socket_create() and * security_socket_post_create() LSM hooks. * * Return: Returns 0 on success or a negative error code on failure. */ int security_mptcp_add_subflow(struct sock *sk, struct sock *ssk) { return call_int_hook(mptcp_add_subflow, sk, ssk); } #endif /* CONFIG_SECURITY_NETWORK */ #ifdef CONFIG_SECURITY_INFINIBAND /** * security_ib_pkey_access() - Check if access to an IB pkey is allowed * @sec: LSM blob * @subnet_prefix: subnet prefix of the port * @pkey: IB pkey * * Check permission to access a pkey when modifying a QP. * * Return: Returns 0 if permission is granted. */ int security_ib_pkey_access(void *sec, u64 subnet_prefix, u16 pkey) { return call_int_hook(ib_pkey_access, sec, subnet_prefix, pkey); } EXPORT_SYMBOL(security_ib_pkey_access); /** * security_ib_endport_manage_subnet() - Check if SMPs traffic is allowed * @sec: LSM blob * @dev_name: IB device name * @port_num: port number * * Check permissions to send and receive SMPs on a end port. * * Return: Returns 0 if permission is granted. */ int security_ib_endport_manage_subnet(void *sec, const char *dev_name, u8 port_num) { return call_int_hook(ib_endport_manage_subnet, sec, dev_name, port_num); } EXPORT_SYMBOL(security_ib_endport_manage_subnet); /** * security_ib_alloc_security() - Allocate an Infiniband LSM blob * @sec: LSM blob * * Allocate a security structure for Infiniband objects. * * Return: Returns 0 on success, non-zero on failure. */ int security_ib_alloc_security(void **sec) { int rc; rc = lsm_blob_alloc(sec, blob_sizes.lbs_ib, GFP_KERNEL); if (rc) return rc; rc = call_int_hook(ib_alloc_security, *sec); if (rc) { kfree(*sec); *sec = NULL; } return rc; } EXPORT_SYMBOL(security_ib_alloc_security); /** * security_ib_free_security() - Free an Infiniband LSM blob * @sec: LSM blob * * Deallocate an Infiniband security structure. */ void security_ib_free_security(void *sec) { kfree(sec); } EXPORT_SYMBOL(security_ib_free_security); #endif /* CONFIG_SECURITY_INFINIBAND */ #ifdef CONFIG_SECURITY_NETWORK_XFRM /** * security_xfrm_policy_alloc() - Allocate a xfrm policy LSM blob * @ctxp: xfrm security context being added to the SPD * @sec_ctx: security label provided by userspace * @gfp: gfp flags * * Allocate a security structure to the xp->security field; the security field * is initialized to NULL when the xfrm_policy is allocated. * * Return: Return 0 if operation was successful. */ int security_xfrm_policy_alloc(struct xfrm_sec_ctx **ctxp, struct xfrm_user_sec_ctx *sec_ctx, gfp_t gfp) { return call_int_hook(xfrm_policy_alloc_security, ctxp, sec_ctx, gfp); } EXPORT_SYMBOL(security_xfrm_policy_alloc); /** * security_xfrm_policy_clone() - Clone xfrm policy LSM state * @old_ctx: xfrm security context * @new_ctxp: target xfrm security context * * Allocate a security structure in new_ctxp that contains the information from * the old_ctx structure. * * Return: Return 0 if operation was successful. */ int security_xfrm_policy_clone(struct xfrm_sec_ctx *old_ctx, struct xfrm_sec_ctx **new_ctxp) { return call_int_hook(xfrm_policy_clone_security, old_ctx, new_ctxp); } /** * security_xfrm_policy_free() - Free a xfrm security context * @ctx: xfrm security context * * Free LSM resources associated with @ctx. */ void security_xfrm_policy_free(struct xfrm_sec_ctx *ctx) { call_void_hook(xfrm_policy_free_security, ctx); } EXPORT_SYMBOL(security_xfrm_policy_free); /** * security_xfrm_policy_delete() - Check if deleting a xfrm policy is allowed * @ctx: xfrm security context * * Authorize deletion of a SPD entry. * * Return: Returns 0 if permission is granted. */ int security_xfrm_policy_delete(struct xfrm_sec_ctx *ctx) { return call_int_hook(xfrm_policy_delete_security, ctx); } /** * security_xfrm_state_alloc() - Allocate a xfrm state LSM blob * @x: xfrm state being added to the SAD * @sec_ctx: security label provided by userspace * * Allocate a security structure to the @x->security field; the security field * is initialized to NULL when the xfrm_state is allocated. Set the context to * correspond to @sec_ctx. * * Return: Return 0 if operation was successful. */ int security_xfrm_state_alloc(struct xfrm_state *x, struct xfrm_user_sec_ctx *sec_ctx) { return call_int_hook(xfrm_state_alloc, x, sec_ctx); } EXPORT_SYMBOL(security_xfrm_state_alloc); /** * security_xfrm_state_alloc_acquire() - Allocate a xfrm state LSM blob * @x: xfrm state being added to the SAD * @polsec: associated policy's security context * @secid: secid from the flow * * Allocate a security structure to the x->security field; the security field * is initialized to NULL when the xfrm_state is allocated. Set the context to * correspond to secid. * * Return: Returns 0 if operation was successful. */ int security_xfrm_state_alloc_acquire(struct xfrm_state *x, struct xfrm_sec_ctx *polsec, u32 secid) { return call_int_hook(xfrm_state_alloc_acquire, x, polsec, secid); } /** * security_xfrm_state_delete() - Check if deleting a xfrm state is allowed * @x: xfrm state * * Authorize deletion of x->security. * * Return: Returns 0 if permission is granted. */ int security_xfrm_state_delete(struct xfrm_state *x) { return call_int_hook(xfrm_state_delete_security, x); } EXPORT_SYMBOL(security_xfrm_state_delete); /** * security_xfrm_state_free() - Free a xfrm state * @x: xfrm state * * Deallocate x->security. */ void security_xfrm_state_free(struct xfrm_state *x) { call_void_hook(xfrm_state_free_security, x); } /** * security_xfrm_policy_lookup() - Check if using a xfrm policy is allowed * @ctx: target xfrm security context * @fl_secid: flow secid used to authorize access * * Check permission when a flow selects a xfrm_policy for processing XFRMs on a * packet. The hook is called when selecting either a per-socket policy or a * generic xfrm policy. * * Return: Return 0 if permission is granted, -ESRCH otherwise, or -errno on * other errors. */ int security_xfrm_policy_lookup(struct xfrm_sec_ctx *ctx, u32 fl_secid) { return call_int_hook(xfrm_policy_lookup, ctx, fl_secid); } /** * security_xfrm_state_pol_flow_match() - Check for a xfrm match * @x: xfrm state to match * @xp: xfrm policy to check for a match * @flic: flow to check for a match. * * Check @xp and @flic for a match with @x. * * Return: Returns 1 if there is a match. */ int security_xfrm_state_pol_flow_match(struct xfrm_state *x, struct xfrm_policy *xp, const struct flowi_common *flic) { struct lsm_static_call *scall; int rc = LSM_RET_DEFAULT(xfrm_state_pol_flow_match); /* * Since this function is expected to return 0 or 1, the judgment * becomes difficult if multiple LSMs supply this call. Fortunately, * we can use the first LSM's judgment because currently only SELinux * supplies this call. * * For speed optimization, we explicitly break the loop rather than * using the macro */ lsm_for_each_hook(scall, xfrm_state_pol_flow_match) { rc = scall->hl->hook.xfrm_state_pol_flow_match(x, xp, flic); break; } return rc; } /** * security_xfrm_decode_session() - Determine the xfrm secid for a packet * @skb: xfrm packet * @secid: secid * * Decode the packet in @skb and return the security label in @secid. * * Return: Return 0 if all xfrms used have the same secid. */ int security_xfrm_decode_session(struct sk_buff *skb, u32 *secid) { return call_int_hook(xfrm_decode_session, skb, secid, 1); } void security_skb_classify_flow(struct sk_buff *skb, struct flowi_common *flic) { int rc = call_int_hook(xfrm_decode_session, skb, &flic->flowic_secid, 0); BUG_ON(rc); } EXPORT_SYMBOL(security_skb_classify_flow); #endif /* CONFIG_SECURITY_NETWORK_XFRM */ #ifdef CONFIG_KEYS /** * security_key_alloc() - Allocate and initialize a kernel key LSM blob * @key: key * @cred: credentials * @flags: allocation flags * * Permit allocation of a key and assign security data. Note that key does not * have a serial number assigned at this point. * * Return: Return 0 if permission is granted, -ve error otherwise. */ int security_key_alloc(struct key *key, const struct cred *cred, unsigned long flags) { int rc = lsm_key_alloc(key); if (unlikely(rc)) return rc; rc = call_int_hook(key_alloc, key, cred, flags); if (unlikely(rc)) security_key_free(key); return rc; } /** * security_key_free() - Free a kernel key LSM blob * @key: key * * Notification of destruction; free security data. */ void security_key_free(struct key *key) { kfree(key->security); key->security = NULL; } /** * security_key_permission() - Check if a kernel key operation is allowed * @key_ref: key reference * @cred: credentials of actor requesting access * @need_perm: requested permissions * * See whether a specific operational right is granted to a process on a key. * * Return: Return 0 if permission is granted, -ve error otherwise. */ int security_key_permission(key_ref_t key_ref, const struct cred *cred, enum key_need_perm need_perm) { return call_int_hook(key_permission, key_ref, cred, need_perm); } /** * security_key_getsecurity() - Get the key's security label * @key: key * @buffer: security label buffer * * Get a textual representation of the security context attached to a key for * the purposes of honouring KEYCTL_GETSECURITY. This function allocates the * storage for the NUL-terminated string and the caller should free it. * * Return: Returns the length of @buffer (including terminating NUL) or -ve if * an error occurs. May also return 0 (and a NULL buffer pointer) if * there is no security label assigned to the key. */ int security_key_getsecurity(struct key *key, char **buffer) { *buffer = NULL; return call_int_hook(key_getsecurity, key, buffer); } /** * security_key_post_create_or_update() - Notification of key create or update * @keyring: keyring to which the key is linked to * @key: created or updated key * @payload: data used to instantiate or update the key * @payload_len: length of payload * @flags: key flags * @create: flag indicating whether the key was created or updated * * Notify the caller of a key creation or update. */ void security_key_post_create_or_update(struct key *keyring, struct key *key, const void *payload, size_t payload_len, unsigned long flags, bool create) { call_void_hook(key_post_create_or_update, keyring, key, payload, payload_len, flags, create); } #endif /* CONFIG_KEYS */ #ifdef CONFIG_AUDIT /** * security_audit_rule_init() - Allocate and init an LSM audit rule struct * @field: audit action * @op: rule operator * @rulestr: rule context * @lsmrule: receive buffer for audit rule struct * @gfp: GFP flag used for kmalloc * * Allocate and initialize an LSM audit rule structure. * * Return: Return 0 if @lsmrule has been successfully set, -EINVAL in case of * an invalid rule. */ int security_audit_rule_init(u32 field, u32 op, char *rulestr, void **lsmrule, gfp_t gfp) { return call_int_hook(audit_rule_init, field, op, rulestr, lsmrule, gfp); } /** * security_audit_rule_known() - Check if an audit rule contains LSM fields * @krule: audit rule * * Specifies whether given @krule contains any fields related to the current * LSM. * * Return: Returns 1 in case of relation found, 0 otherwise. */ int security_audit_rule_known(struct audit_krule *krule) { return call_int_hook(audit_rule_known, krule); } /** * security_audit_rule_free() - Free an LSM audit rule struct * @lsmrule: audit rule struct * * Deallocate the LSM audit rule structure previously allocated by * audit_rule_init(). */ void security_audit_rule_free(void *lsmrule) { call_void_hook(audit_rule_free, lsmrule); } /** * security_audit_rule_match() - Check if a label matches an audit rule * @prop: security label * @field: LSM audit field * @op: matching operator * @lsmrule: audit rule * * Determine if given @secid matches a rule previously approved by * security_audit_rule_known(). * * Return: Returns 1 if secid matches the rule, 0 if it does not, -ERRNO on * failure. */ int security_audit_rule_match(struct lsm_prop *prop, u32 field, u32 op, void *lsmrule) { return call_int_hook(audit_rule_match, prop, field, op, lsmrule); } #endif /* CONFIG_AUDIT */ #ifdef CONFIG_BPF_SYSCALL /** * security_bpf() - Check if the bpf syscall operation is allowed * @cmd: command * @attr: bpf attribute * @size: size * @kernel: whether or not call originated from kernel * * Do a initial check for all bpf syscalls after the attribute is copied into * the kernel. The actual security module can implement their own rules to * check the specific cmd they need. * * Return: Returns 0 if permission is granted. */ int security_bpf(int cmd, union bpf_attr *attr, unsigned int size, bool kernel) { return call_int_hook(bpf, cmd, attr, size, kernel); } /** * security_bpf_map() - Check if access to a bpf map is allowed * @map: bpf map * @fmode: mode * * Do a check when the kernel generates and returns a file descriptor for eBPF * maps. * * Return: Returns 0 if permission is granted. */ int security_bpf_map(struct bpf_map *map, fmode_t fmode) { return call_int_hook(bpf_map, map, fmode); } /** * security_bpf_prog() - Check if access to a bpf program is allowed * @prog: bpf program * * Do a check when the kernel generates and returns a file descriptor for eBPF * programs. * * Return: Returns 0 if permission is granted. */ int security_bpf_prog(struct bpf_prog *prog) { return call_int_hook(bpf_prog, prog); } /** * security_bpf_map_create() - Check if BPF map creation is allowed * @map: BPF map object * @attr: BPF syscall attributes used to create BPF map * @token: BPF token used to grant user access * @kernel: whether or not call originated from kernel * * Do a check when the kernel creates a new BPF map. This is also the * point where LSM blob is allocated for LSMs that need them. * * Return: Returns 0 on success, error on failure. */ int security_bpf_map_create(struct bpf_map *map, union bpf_attr *attr, struct bpf_token *token, bool kernel) { return call_int_hook(bpf_map_create, map, attr, token, kernel); } /** * security_bpf_prog_load() - Check if loading of BPF program is allowed * @prog: BPF program object * @attr: BPF syscall attributes used to create BPF program * @token: BPF token used to grant user access to BPF subsystem * @kernel: whether or not call originated from kernel * * Perform an access control check when the kernel loads a BPF program and * allocates associated BPF program object. This hook is also responsible for * allocating any required LSM state for the BPF program. * * Return: Returns 0 on success, error on failure. */ int security_bpf_prog_load(struct bpf_prog *prog, union bpf_attr *attr, struct bpf_token *token, bool kernel) { return call_int_hook(bpf_prog_load, prog, attr, token, kernel); } /** * security_bpf_token_create() - Check if creating of BPF token is allowed * @token: BPF token object * @attr: BPF syscall attributes used to create BPF token * @path: path pointing to BPF FS mount point from which BPF token is created * * Do a check when the kernel instantiates a new BPF token object from BPF FS * instance. This is also the point where LSM blob can be allocated for LSMs. * * Return: Returns 0 on success, error on failure. */ int security_bpf_token_create(struct bpf_token *token, union bpf_attr *attr, const struct path *path) { return call_int_hook(bpf_token_create, token, attr, path); } /** * security_bpf_token_cmd() - Check if BPF token is allowed to delegate * requested BPF syscall command * @token: BPF token object * @cmd: BPF syscall command requested to be delegated by BPF token * * Do a check when the kernel decides whether provided BPF token should allow * delegation of requested BPF syscall command. * * Return: Returns 0 on success, error on failure. */ int security_bpf_token_cmd(const struct bpf_token *token, enum bpf_cmd cmd) { return call_int_hook(bpf_token_cmd, token, cmd); } /** * security_bpf_token_capable() - Check if BPF token is allowed to delegate * requested BPF-related capability * @token: BPF token object * @cap: capabilities requested to be delegated by BPF token * * Do a check when the kernel decides whether provided BPF token should allow * delegation of requested BPF-related capabilities. * * Return: Returns 0 on success, error on failure. */ int security_bpf_token_capable(const struct bpf_token *token, int cap) { return call_int_hook(bpf_token_capable, token, cap); } /** * security_bpf_map_free() - Free a bpf map's LSM blob * @map: bpf map * * Clean up the security information stored inside bpf map. */ void security_bpf_map_free(struct bpf_map *map) { call_void_hook(bpf_map_free, map); } /** * security_bpf_prog_free() - Free a BPF program's LSM blob * @prog: BPF program struct * * Clean up the security information stored inside BPF program. */ void security_bpf_prog_free(struct bpf_prog *prog) { call_void_hook(bpf_prog_free, prog); } /** * security_bpf_token_free() - Free a BPF token's LSM blob * @token: BPF token struct * * Clean up the security information stored inside BPF token. */ void security_bpf_token_free(struct bpf_token *token) { call_void_hook(bpf_token_free, token); } #endif /* CONFIG_BPF_SYSCALL */ /** * security_locked_down() - Check if a kernel feature is allowed * @what: requested kernel feature * * Determine whether a kernel feature that potentially enables arbitrary code * execution in kernel space should be permitted. * * Return: Returns 0 if permission is granted. */ int security_locked_down(enum lockdown_reason what) { return call_int_hook(locked_down, what); } EXPORT_SYMBOL(security_locked_down); /** * security_bdev_alloc() - Allocate a block device LSM blob * @bdev: block device * * Allocate and attach a security structure to @bdev->bd_security. The * security field is initialized to NULL when the bdev structure is * allocated. * * Return: Return 0 if operation was successful. */ int security_bdev_alloc(struct block_device *bdev) { int rc = 0; rc = lsm_bdev_alloc(bdev); if (unlikely(rc)) return rc; rc = call_int_hook(bdev_alloc_security, bdev); if (unlikely(rc)) security_bdev_free(bdev); return rc; } EXPORT_SYMBOL(security_bdev_alloc); /** * security_bdev_free() - Free a block device's LSM blob * @bdev: block device * * Deallocate the bdev security structure and set @bdev->bd_security to NULL. */ void security_bdev_free(struct block_device *bdev) { if (!bdev->bd_security) return; call_void_hook(bdev_free_security, bdev); kfree(bdev->bd_security); bdev->bd_security = NULL; } EXPORT_SYMBOL(security_bdev_free); /** * security_bdev_setintegrity() - Set the device's integrity data * @bdev: block device * @type: type of integrity, e.g. hash digest, signature, etc * @value: the integrity value * @size: size of the integrity value * * Register a verified integrity measurement of a bdev with LSMs. * LSMs should free the previously saved data if @value is NULL. * Please note that the new hook should be invoked every time the security * information is updated to keep these data current. For example, in dm-verity, * if the mapping table is reloaded and configured to use a different dm-verity * target with a new roothash and signing information, the previously stored * data in the LSM blob will become obsolete. It is crucial to re-invoke the * hook to refresh these data and ensure they are up to date. This necessity * arises from the design of device-mapper, where a device-mapper device is * first created, and then targets are subsequently loaded into it. These * targets can be modified multiple times during the device's lifetime. * Therefore, while the LSM blob is allocated during the creation of the block * device, its actual contents are not initialized at this stage and can change * substantially over time. This includes alterations from data that the LSMs * 'trusts' to those they do not, making it essential to handle these changes * correctly. Failure to address this dynamic aspect could potentially allow * for bypassing LSM checks. * * Return: Returns 0 on success, negative values on failure. */ int security_bdev_setintegrity(struct block_device *bdev, enum lsm_integrity_type type, const void *value, size_t size) { return call_int_hook(bdev_setintegrity, bdev, type, value, size); } EXPORT_SYMBOL(security_bdev_setintegrity); #ifdef CONFIG_PERF_EVENTS /** * security_perf_event_open() - Check if a perf event open is allowed * @type: type of event * * Check whether the @type of perf_event_open syscall is allowed. * * Return: Returns 0 if permission is granted. */ int security_perf_event_open(int type) { return call_int_hook(perf_event_open, type); } /** * security_perf_event_alloc() - Allocate a perf event LSM blob * @event: perf event * * Allocate and save perf_event security info. * * Return: Returns 0 on success, error on failure. */ int security_perf_event_alloc(struct perf_event *event) { int rc; rc = lsm_blob_alloc(&event->security, blob_sizes.lbs_perf_event, GFP_KERNEL); if (rc) return rc; rc = call_int_hook(perf_event_alloc, event); if (rc) { kfree(event->security); event->security = NULL; } return rc; } /** * security_perf_event_free() - Free a perf event LSM blob * @event: perf event * * Release (free) perf_event security info. */ void security_perf_event_free(struct perf_event *event) { kfree(event->security); event->security = NULL; } /** * security_perf_event_read() - Check if reading a perf event label is allowed * @event: perf event * * Read perf_event security info if allowed. * * Return: Returns 0 if permission is granted. */ int security_perf_event_read(struct perf_event *event) { return call_int_hook(perf_event_read, event); } /** * security_perf_event_write() - Check if writing a perf event label is allowed * @event: perf event * * Write perf_event security info if allowed. * * Return: Returns 0 if permission is granted. */ int security_perf_event_write(struct perf_event *event) { return call_int_hook(perf_event_write, event); } #endif /* CONFIG_PERF_EVENTS */ #ifdef CONFIG_IO_URING /** * security_uring_override_creds() - Check if overriding creds is allowed * @new: new credentials * * Check if the current task, executing an io_uring operation, is allowed to * override it's credentials with @new. * * Return: Returns 0 if permission is granted. */ int security_uring_override_creds(const struct cred *new) { return call_int_hook(uring_override_creds, new); } /** * security_uring_sqpoll() - Check if IORING_SETUP_SQPOLL is allowed * * Check whether the current task is allowed to spawn a io_uring polling thread * (IORING_SETUP_SQPOLL). * * Return: Returns 0 if permission is granted. */ int security_uring_sqpoll(void) { return call_int_hook(uring_sqpoll); } /** * security_uring_cmd() - Check if a io_uring passthrough command is allowed * @ioucmd: command * * Check whether the file_operations uring_cmd is allowed to run. * * Return: Returns 0 if permission is granted. */ int security_uring_cmd(struct io_uring_cmd *ioucmd) { return call_int_hook(uring_cmd, ioucmd); } /** * security_uring_allowed() - Check if io_uring_setup() is allowed * * Check whether the current task is allowed to call io_uring_setup(). * * Return: Returns 0 if permission is granted. */ int security_uring_allowed(void) { return call_int_hook(uring_allowed); } #endif /* CONFIG_IO_URING */ /** * security_initramfs_populated() - Notify LSMs that initramfs has been loaded * * Tells the LSMs the initramfs has been unpacked into the rootfs. */ void security_initramfs_populated(void) { call_void_hook(initramfs_populated); } |
| 2578 3 1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 | /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * include/linux/eventpoll.h ( Efficient event polling implementation ) * Copyright (C) 2001,...,2006 Davide Libenzi * * Davide Libenzi <davidel@xmailserver.org> */ #ifndef _LINUX_EVENTPOLL_H #define _LINUX_EVENTPOLL_H #include <uapi/linux/eventpoll.h> #include <uapi/linux/kcmp.h> /* Forward declarations to avoid compiler errors */ struct file; #ifdef CONFIG_EPOLL #ifdef CONFIG_KCMP struct file *get_epoll_tfile_raw_ptr(struct file *file, int tfd, unsigned long toff); #endif /* Used to release the epoll bits inside the "struct file" */ void eventpoll_release_file(struct file *file); /* Copy ready events to userspace */ int epoll_sendevents(struct file *file, struct epoll_event __user *events, int maxevents); /* * This is called from inside fs/file_table.c:__fput() to unlink files * from the eventpoll interface. We need to have this facility to cleanup * correctly files that are closed without being removed from the eventpoll * interface. */ static inline void eventpoll_release(struct file *file) { /* * Fast check to avoid the get/release of the semaphore. Since * we're doing this outside the semaphore lock, it might return * false negatives, but we don't care. It'll help in 99.99% of cases * to avoid the semaphore lock. False positives simply cannot happen * because the file in on the way to be removed and nobody ( but * eventpoll ) has still a reference to this file. */ if (likely(!READ_ONCE(file->f_ep))) return; /* * The file is being closed while it is still linked to an epoll * descriptor. We need to handle this by correctly unlinking it * from its containers. */ eventpoll_release_file(file); } int do_epoll_ctl(int epfd, int op, int fd, struct epoll_event *epds, bool nonblock); /* Tells if the epoll_ctl(2) operation needs an event copy from userspace */ static inline int ep_op_has_event(int op) { return op != EPOLL_CTL_DEL; } #else static inline void eventpoll_release(struct file *file) {} #endif #if defined(CONFIG_ARM) && defined(CONFIG_OABI_COMPAT) /* ARM OABI has an incompatible struct layout and needs a special handler */ extern struct epoll_event __user * epoll_put_uevent(__poll_t revents, __u64 data, struct epoll_event __user *uevent); #else static inline struct epoll_event __user * epoll_put_uevent(__poll_t revents, __u64 data, struct epoll_event __user *uevent) { if (__put_user(revents, &uevent->events) || __put_user(data, &uevent->data)) return NULL; return uevent+1; } #endif #endif /* #ifndef _LINUX_EVENTPOLL_H */ |
| 2 2 2 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 | /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (c) 2016 Qualcomm Atheros, Inc * * Based on net/sched/sch_fq_codel.c */ #ifndef __NET_SCHED_FQ_IMPL_H #define __NET_SCHED_FQ_IMPL_H #include <net/fq.h> /* functions that are embedded into includer */ static void __fq_adjust_removal(struct fq *fq, struct fq_flow *flow, unsigned int packets, unsigned int bytes, unsigned int truesize) { struct fq_tin *tin = flow->tin; int idx; tin->backlog_bytes -= bytes; tin->backlog_packets -= packets; flow->backlog -= bytes; fq->backlog -= packets; fq->memory_usage -= truesize; if (flow->backlog) return; if (flow == &tin->default_flow) { list_del_init(&tin->tin_list); return; } idx = flow - fq->flows; __clear_bit(idx, fq->flows_bitmap); } static void fq_adjust_removal(struct fq *fq, struct fq_flow *flow, struct sk_buff *skb) { __fq_adjust_removal(fq, flow, 1, skb->len, skb->truesize); } static struct sk_buff *fq_flow_dequeue(struct fq *fq, struct fq_flow *flow) { struct sk_buff *skb; lockdep_assert_held(&fq->lock); skb = __skb_dequeue(&flow->queue); if (!skb) return NULL; fq_adjust_removal(fq, flow, skb); return skb; } static int fq_flow_drop(struct fq *fq, struct fq_flow *flow, fq_skb_free_t free_func) { unsigned int packets = 0, bytes = 0, truesize = 0; struct fq_tin *tin = flow->tin; struct sk_buff *skb; int pending; lockdep_assert_held(&fq->lock); pending = min_t(int, 32, skb_queue_len(&flow->queue) / 2); do { skb = __skb_dequeue(&flow->queue); if (!skb) break; packets++; bytes += skb->len; truesize += skb->truesize; free_func(fq, tin, flow, skb); } while (packets < pending); __fq_adjust_removal(fq, flow, packets, bytes, truesize); return packets; } static struct sk_buff *fq_tin_dequeue(struct fq *fq, struct fq_tin *tin, fq_tin_dequeue_t dequeue_func) { struct fq_flow *flow; struct list_head *head; struct sk_buff *skb; lockdep_assert_held(&fq->lock); begin: head = &tin->new_flows; if (list_empty(head)) { head = &tin->old_flows; if (list_empty(head)) return NULL; } flow = list_first_entry(head, struct fq_flow, flowchain); if (flow->deficit <= 0) { flow->deficit += fq->quantum; list_move_tail(&flow->flowchain, &tin->old_flows); goto begin; } skb = dequeue_func(fq, tin, flow); if (!skb) { /* force a pass through old_flows to prevent starvation */ if ((head == &tin->new_flows) && !list_empty(&tin->old_flows)) { list_move_tail(&flow->flowchain, &tin->old_flows); } else { list_del_init(&flow->flowchain); flow->tin = NULL; } goto begin; } flow->deficit -= skb->len; tin->tx_bytes += skb->len; tin->tx_packets++; return skb; } static u32 fq_flow_idx(struct fq *fq, struct sk_buff *skb) { u32 hash = skb_get_hash(skb); return reciprocal_scale(hash, fq->flows_cnt); } static struct fq_flow *fq_flow_classify(struct fq *fq, struct fq_tin *tin, u32 idx, struct sk_buff *skb) { struct fq_flow *flow; lockdep_assert_held(&fq->lock); flow = &fq->flows[idx]; if (flow->tin && flow->tin != tin) { flow = &tin->default_flow; tin->collisions++; fq->collisions++; } if (!flow->tin) tin->flows++; return flow; } static struct fq_flow *fq_find_fattest_flow(struct fq *fq) { struct fq_tin *tin; struct fq_flow *flow = NULL; u32 len = 0; int i; for_each_set_bit(i, fq->flows_bitmap, fq->flows_cnt) { struct fq_flow *cur = &fq->flows[i]; unsigned int cur_len; cur_len = cur->backlog; if (cur_len <= len) continue; flow = cur; len = cur_len; } list_for_each_entry(tin, &fq->tin_backlog, tin_list) { unsigned int cur_len = tin->default_flow.backlog; if (cur_len <= len) continue; flow = &tin->default_flow; len = cur_len; } return flow; } static void fq_tin_enqueue(struct fq *fq, struct fq_tin *tin, u32 idx, struct sk_buff *skb, fq_skb_free_t free_func) { struct fq_flow *flow; struct sk_buff *next; bool oom; lockdep_assert_held(&fq->lock); flow = fq_flow_classify(fq, tin, idx, skb); if (!flow->backlog) { if (flow != &tin->default_flow) __set_bit(idx, fq->flows_bitmap); else if (list_empty(&tin->tin_list)) list_add(&tin->tin_list, &fq->tin_backlog); } flow->tin = tin; skb_list_walk_safe(skb, skb, next) { skb_mark_not_on_list(skb); flow->backlog += skb->len; tin->backlog_bytes += skb->len; tin->backlog_packets++; fq->memory_usage += skb->truesize; fq->backlog++; __skb_queue_tail(&flow->queue, skb); } if (list_empty(&flow->flowchain)) { flow->deficit = fq->quantum; list_add_tail(&flow->flowchain, &tin->new_flows); } oom = (fq->memory_usage > fq->memory_limit); while (fq->backlog > fq->limit || oom) { flow = fq_find_fattest_flow(fq); if (!flow) return; if (!fq_flow_drop(fq, flow, free_func)) return; flow->tin->overlimit++; fq->overlimit++; if (oom) { fq->overmemory++; oom = (fq->memory_usage > fq->memory_limit); } } } static void fq_flow_filter(struct fq *fq, struct fq_flow *flow, fq_skb_filter_t filter_func, void *filter_data, fq_skb_free_t free_func) { struct fq_tin *tin = flow->tin; struct sk_buff *skb, *tmp; lockdep_assert_held(&fq->lock); skb_queue_walk_safe(&flow->queue, skb, tmp) { if (!filter_func(fq, tin, flow, skb, filter_data)) continue; __skb_unlink(skb, &flow->queue); fq_adjust_removal(fq, flow, skb); free_func(fq, tin, flow, skb); } } static void fq_tin_filter(struct fq *fq, struct fq_tin *tin, fq_skb_filter_t filter_func, void *filter_data, fq_skb_free_t free_func) { struct fq_flow *flow; lockdep_assert_held(&fq->lock); list_for_each_entry(flow, &tin->new_flows, flowchain) fq_flow_filter(fq, flow, filter_func, filter_data, free_func); list_for_each_entry(flow, &tin->old_flows, flowchain) fq_flow_filter(fq, flow, filter_func, filter_data, free_func); } static void fq_flow_reset(struct fq *fq, struct fq_flow *flow, fq_skb_free_t free_func) { struct fq_tin *tin = flow->tin; struct sk_buff *skb; while ((skb = fq_flow_dequeue(fq, flow))) free_func(fq, tin, flow, skb); if (!list_empty(&flow->flowchain)) { list_del_init(&flow->flowchain); if (list_empty(&tin->new_flows) && list_empty(&tin->old_flows)) list_del_init(&tin->tin_list); } flow->tin = NULL; WARN_ON_ONCE(flow->backlog); } static void fq_tin_reset(struct fq *fq, struct fq_tin *tin, fq_skb_free_t free_func) { struct list_head *head; struct fq_flow *flow; for (;;) { head = &tin->new_flows; if (list_empty(head)) { head = &tin->old_flows; if (list_empty(head)) break; } flow = list_first_entry(head, struct fq_flow, flowchain); fq_flow_reset(fq, flow, free_func); } WARN_ON_ONCE(!list_empty(&tin->tin_list)); WARN_ON_ONCE(tin->backlog_bytes); WARN_ON_ONCE(tin->backlog_packets); } static void fq_flow_init(struct fq_flow *flow) { INIT_LIST_HEAD(&flow->flowchain); __skb_queue_head_init(&flow->queue); } static void fq_tin_init(struct fq_tin *tin) { INIT_LIST_HEAD(&tin->new_flows); INIT_LIST_HEAD(&tin->old_flows); INIT_LIST_HEAD(&tin->tin_list); fq_flow_init(&tin->default_flow); } static int fq_init(struct fq *fq, int flows_cnt) { int i; memset(fq, 0, sizeof(fq[0])); spin_lock_init(&fq->lock); INIT_LIST_HEAD(&fq->tin_backlog); fq->flows_cnt = max_t(u32, flows_cnt, 1); fq->quantum = 300; fq->limit = 8192; fq->memory_limit = 16 << 20; /* 16 MBytes */ fq->flows = kvcalloc(fq->flows_cnt, sizeof(fq->flows[0]), GFP_KERNEL); if (!fq->flows) return -ENOMEM; fq->flows_bitmap = bitmap_zalloc(fq->flows_cnt, GFP_KERNEL); if (!fq->flows_bitmap) { kvfree(fq->flows); fq->flows = NULL; return -ENOMEM; } for (i = 0; i < fq->flows_cnt; i++) fq_flow_init(&fq->flows[i]); return 0; } static void fq_reset(struct fq *fq, fq_skb_free_t free_func) { int i; for (i = 0; i < fq->flows_cnt; i++) fq_flow_reset(fq, &fq->flows[i], free_func); kvfree(fq->flows); fq->flows = NULL; bitmap_free(fq->flows_bitmap); fq->flows_bitmap = NULL; } #endif |
| 4 2 1488 1490 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 | // SPDX-License-Identifier: GPL-2.0-only /* Common code for 32 and 64-bit NUMA */ #include <linux/acpi.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/of.h> #include <linux/string.h> #include <linux/init.h> #include <linux/memblock.h> #include <linux/mmzone.h> #include <linux/ctype.h> #include <linux/nodemask.h> #include <linux/sched.h> #include <linux/topology.h> #include <linux/sort.h> #include <linux/numa_memblks.h> #include <asm/e820/api.h> #include <asm/proto.h> #include <asm/dma.h> #include <asm/numa.h> #include <asm/amd/nb.h> #include "mm_internal.h" int numa_off; static __init int numa_setup(char *opt) { if (!opt) return -EINVAL; if (!strncmp(opt, "off", 3)) numa_off = 1; if (!strncmp(opt, "fake=", 5)) return numa_emu_cmdline(opt + 5); if (!strncmp(opt, "noacpi", 6)) disable_srat(); if (!strncmp(opt, "nohmat", 6)) disable_hmat(); return 0; } early_param("numa", numa_setup); /* * apicid, cpu, node mappings */ s16 __apicid_to_node[MAX_LOCAL_APIC] = { [0 ... MAX_LOCAL_APIC-1] = NUMA_NO_NODE }; int numa_cpu_node(int cpu) { u32 apicid = early_per_cpu(x86_cpu_to_apicid, cpu); if (apicid != BAD_APICID) return __apicid_to_node[apicid]; return NUMA_NO_NODE; } cpumask_var_t node_to_cpumask_map[MAX_NUMNODES]; EXPORT_SYMBOL(node_to_cpumask_map); /* * Map cpu index to node index */ DEFINE_EARLY_PER_CPU(int, x86_cpu_to_node_map, NUMA_NO_NODE); EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_node_map); void numa_set_node(int cpu, int node) { int *cpu_to_node_map = early_per_cpu_ptr(x86_cpu_to_node_map); /* early setting, no percpu area yet */ if (cpu_to_node_map) { cpu_to_node_map[cpu] = node; return; } #ifdef CONFIG_DEBUG_PER_CPU_MAPS if (cpu >= nr_cpu_ids || !cpu_possible(cpu)) { printk(KERN_ERR "numa_set_node: invalid cpu# (%d)\n", cpu); dump_stack(); return; } #endif per_cpu(x86_cpu_to_node_map, cpu) = node; set_cpu_numa_node(cpu, node); } void numa_clear_node(int cpu) { numa_set_node(cpu, NUMA_NO_NODE); } /* * Allocate node_to_cpumask_map based on number of available nodes * Requires node_possible_map to be valid. * * Note: cpumask_of_node() is not valid until after this is done. * (Use CONFIG_DEBUG_PER_CPU_MAPS to check this.) */ void __init setup_node_to_cpumask_map(void) { unsigned int node; /* setup nr_node_ids if not done yet */ if (nr_node_ids == MAX_NUMNODES) setup_nr_node_ids(); /* allocate the map */ for (node = 0; node < nr_node_ids; node++) alloc_bootmem_cpumask_var(&node_to_cpumask_map[node]); /* cpumask_of_node() will now work */ pr_debug("Node to cpumask map for %u nodes\n", nr_node_ids); } static int __init numa_register_nodes(void) { int nid; if (!memblock_validate_numa_coverage(SZ_1M)) return -EINVAL; /* Finally register nodes. */ for_each_node_mask(nid, node_possible_map) { unsigned long start_pfn, end_pfn; /* * Note, get_pfn_range_for_nid() depends on * memblock_set_node() having already happened */ get_pfn_range_for_nid(nid, &start_pfn, &end_pfn); if (start_pfn >= end_pfn) continue; alloc_node_data(nid); node_set_online(nid); } /* Dump memblock with node info and return. */ memblock_dump_all(); return 0; } /* * There are unfortunately some poorly designed mainboards around that * only connect memory to a single CPU. This breaks the 1:1 cpu->node * mapping. To avoid this fill in the mapping for all possible CPUs, * as the number of CPUs is not known yet. We round robin the existing * nodes. */ static void __init numa_init_array(void) { int rr, i; rr = first_node(node_online_map); for (i = 0; i < nr_cpu_ids; i++) { if (early_cpu_to_node(i) != NUMA_NO_NODE) continue; numa_set_node(i, rr); rr = next_node_in(rr, node_online_map); } } static int __init numa_init(int (*init_func)(void)) { int i; int ret; for (i = 0; i < MAX_LOCAL_APIC; i++) set_apicid_to_node(i, NUMA_NO_NODE); ret = numa_memblks_init(init_func, /* memblock_force_top_down */ true); if (ret < 0) return ret; ret = numa_register_nodes(); if (ret < 0) return ret; for (i = 0; i < nr_cpu_ids; i++) { int nid = early_cpu_to_node(i); if (nid == NUMA_NO_NODE) continue; if (!node_online(nid)) numa_clear_node(i); } numa_init_array(); return 0; } /** * dummy_numa_init - Fallback dummy NUMA init * * Used if there's no underlying NUMA architecture, NUMA initialization * fails, or NUMA is disabled on the command line. * * Must online at least one node and add memory blocks that cover all * allowed memory. This function must not fail. */ static int __init dummy_numa_init(void) { printk(KERN_INFO "%s\n", numa_off ? "NUMA turned off" : "No NUMA configuration found"); printk(KERN_INFO "Faking a node at [mem %#018Lx-%#018Lx]\n", 0LLU, PFN_PHYS(max_pfn) - 1); node_set(0, numa_nodes_parsed); numa_add_memblk(0, 0, PFN_PHYS(max_pfn)); return 0; } /** * x86_numa_init - Initialize NUMA * * Try each configured NUMA initialization method until one succeeds. The * last fallback is dummy single node config encompassing whole memory and * never fails. */ void __init x86_numa_init(void) { if (!numa_off) { #ifdef CONFIG_ACPI_NUMA if (!numa_init(x86_acpi_numa_init)) return; #endif #ifdef CONFIG_AMD_NUMA if (!numa_init(amd_numa_init)) return; #endif if (acpi_disabled && !numa_init(of_numa_init)) return; } numa_init(dummy_numa_init); } /* * A node may exist which has one or more Generic Initiators but no CPUs and no * memory. * * This function must be called after init_cpu_to_node(), to ensure that any * memoryless CPU nodes have already been brought online, and before the * node_data[nid] is needed for zone list setup in build_all_zonelists(). * * When this function is called, any nodes containing either memory and/or CPUs * will already be online and there is no need to do anything extra, even if * they also contain one or more Generic Initiators. */ void __init init_gi_nodes(void) { int nid; /* * Exclude this node from * bringup_nonboot_cpus * cpu_up * __try_online_node * register_one_node * because node_subsys is not initialized yet. * TODO remove dependency on node_online */ for_each_node_state(nid, N_GENERIC_INITIATOR) if (!node_online(nid)) node_set_online(nid); } /* * Setup early cpu_to_node. * * Populate cpu_to_node[] only if x86_cpu_to_apicid[], * and apicid_to_node[] tables have valid entries for a CPU. * This means we skip cpu_to_node[] initialisation for NUMA * emulation and faking node case (when running a kernel compiled * for NUMA on a non NUMA box), which is OK as cpu_to_node[] * is already initialized in a round robin manner at numa_init_array, * prior to this call, and this initialization is good enough * for the fake NUMA cases. * * Called before the per_cpu areas are setup. */ void __init init_cpu_to_node(void) { int cpu; u32 *cpu_to_apicid = early_per_cpu_ptr(x86_cpu_to_apicid); BUG_ON(cpu_to_apicid == NULL); for_each_possible_cpu(cpu) { int node = numa_cpu_node(cpu); if (node == NUMA_NO_NODE) continue; /* * Exclude this node from * bringup_nonboot_cpus * cpu_up * __try_online_node * register_one_node * because node_subsys is not initialized yet. * TODO remove dependency on node_online */ if (!node_online(node)) node_set_online(node); numa_set_node(cpu, node); } } #ifndef CONFIG_DEBUG_PER_CPU_MAPS # ifndef CONFIG_NUMA_EMU void numa_add_cpu(unsigned int cpu) { cpumask_set_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]); } void numa_remove_cpu(unsigned int cpu) { cpumask_clear_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]); } # endif /* !CONFIG_NUMA_EMU */ #else /* !CONFIG_DEBUG_PER_CPU_MAPS */ int __cpu_to_node(int cpu) { if (early_per_cpu_ptr(x86_cpu_to_node_map)) { printk(KERN_WARNING "cpu_to_node(%d): usage too early!\n", cpu); dump_stack(); return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu]; } return per_cpu(x86_cpu_to_node_map, cpu); } EXPORT_SYMBOL(__cpu_to_node); /* * Same function as cpu_to_node() but used if called before the * per_cpu areas are setup. */ int early_cpu_to_node(int cpu) { if (early_per_cpu_ptr(x86_cpu_to_node_map)) return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu]; if (!cpu_possible(cpu)) { printk(KERN_WARNING "early_cpu_to_node(%d): no per_cpu area!\n", cpu); dump_stack(); return NUMA_NO_NODE; } return per_cpu(x86_cpu_to_node_map, cpu); } void debug_cpumask_set_cpu(unsigned int cpu, int node, bool enable) { struct cpumask *mask; if (node == NUMA_NO_NODE) { /* early_cpu_to_node() already emits a warning and trace */ return; } mask = node_to_cpumask_map[node]; if (!cpumask_available(mask)) { pr_err("node_to_cpumask_map[%i] NULL\n", node); dump_stack(); return; } if (enable) cpumask_set_cpu(cpu, mask); else cpumask_clear_cpu(cpu, mask); printk(KERN_DEBUG "%s cpu %d node %d: mask now %*pbl\n", enable ? "numa_add_cpu" : "numa_remove_cpu", cpu, node, cpumask_pr_args(mask)); return; } # ifndef CONFIG_NUMA_EMU static void numa_set_cpumask(int cpu, bool enable) { debug_cpumask_set_cpu(cpu, early_cpu_to_node(cpu), enable); } void numa_add_cpu(unsigned int cpu) { numa_set_cpumask(cpu, true); } void numa_remove_cpu(unsigned int cpu) { numa_set_cpumask(cpu, false); } # endif /* !CONFIG_NUMA_EMU */ /* * Returns a pointer to the bitmask of CPUs on Node 'node'. */ const struct cpumask *cpumask_of_node(int node) { if ((unsigned)node >= nr_node_ids) { printk(KERN_WARNING "cpumask_of_node(%d): (unsigned)node >= nr_node_ids(%u)\n", node, nr_node_ids); dump_stack(); return cpu_none_mask; } if (!cpumask_available(node_to_cpumask_map[node])) { printk(KERN_WARNING "cpumask_of_node(%d): no node_to_cpumask_map!\n", node); dump_stack(); return cpu_online_mask; } return node_to_cpumask_map[node]; } EXPORT_SYMBOL(cpumask_of_node); #endif /* !CONFIG_DEBUG_PER_CPU_MAPS */ #ifdef CONFIG_NUMA_EMU void __init numa_emu_update_cpu_to_node(int *emu_nid_to_phys, unsigned int nr_emu_nids) { int i, j; /* * Transform __apicid_to_node table to use emulated nids by * reverse-mapping phys_nid. The maps should always exist but fall * back to zero just in case. */ for (i = 0; i < ARRAY_SIZE(__apicid_to_node); i++) { if (__apicid_to_node[i] == NUMA_NO_NODE) continue; for (j = 0; j < nr_emu_nids; j++) if (__apicid_to_node[i] == emu_nid_to_phys[j]) break; __apicid_to_node[i] = j < nr_emu_nids ? j : 0; } } u64 __init numa_emu_dma_end(void) { return PFN_PHYS(MAX_DMA32_PFN); } #endif /* CONFIG_NUMA_EMU */ |
| 14 14 2 12 14 14 14 14 14 14 14 14 14 14 14 14 14 14 14 5859 5851 14 5851 5859 442 13 14 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 | // SPDX-License-Identifier: GPL-2.0 /* * blk-mq scheduling framework * * Copyright (C) 2016 Jens Axboe */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/list_sort.h> #include <trace/events/block.h> #include "blk.h" #include "blk-mq.h" #include "blk-mq-debugfs.h" #include "blk-mq-sched.h" #include "blk-wbt.h" /* * Mark a hardware queue as needing a restart. */ void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx) { if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state)) return; set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state); } EXPORT_SYMBOL_GPL(blk_mq_sched_mark_restart_hctx); void __blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx) { clear_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state); /* * Order clearing SCHED_RESTART and list_empty_careful(&hctx->dispatch) * in blk_mq_run_hw_queue(). Its pair is the barrier in * blk_mq_dispatch_rq_list(). So dispatch code won't see SCHED_RESTART, * meantime new request added to hctx->dispatch is missed to check in * blk_mq_run_hw_queue(). */ smp_mb(); blk_mq_run_hw_queue(hctx, true); } static int sched_rq_cmp(void *priv, const struct list_head *a, const struct list_head *b) { struct request *rqa = container_of(a, struct request, queuelist); struct request *rqb = container_of(b, struct request, queuelist); return rqa->mq_hctx > rqb->mq_hctx; } static bool blk_mq_dispatch_hctx_list(struct list_head *rq_list) { struct blk_mq_hw_ctx *hctx = list_first_entry(rq_list, struct request, queuelist)->mq_hctx; struct request *rq; LIST_HEAD(hctx_list); list_for_each_entry(rq, rq_list, queuelist) { if (rq->mq_hctx != hctx) { list_cut_before(&hctx_list, rq_list, &rq->queuelist); goto dispatch; } } list_splice_tail_init(rq_list, &hctx_list); dispatch: return blk_mq_dispatch_rq_list(hctx, &hctx_list, false); } #define BLK_MQ_BUDGET_DELAY 3 /* ms units */ /* * Only SCSI implements .get_budget and .put_budget, and SCSI restarts * its queue by itself in its completion handler, so we don't need to * restart queue if .get_budget() fails to get the budget. * * Returns -EAGAIN if hctx->dispatch was found non-empty and run_work has to * be run again. This is necessary to avoid starving flushes. */ static int __blk_mq_do_dispatch_sched(struct blk_mq_hw_ctx *hctx) { struct request_queue *q = hctx->queue; struct elevator_queue *e = q->elevator; bool multi_hctxs = false, run_queue = false; bool dispatched = false, busy = false; unsigned int max_dispatch; LIST_HEAD(rq_list); int count = 0; if (hctx->dispatch_busy) max_dispatch = 1; else max_dispatch = hctx->queue->nr_requests; do { struct request *rq; int budget_token; if (e->type->ops.has_work && !e->type->ops.has_work(hctx)) break; if (!list_empty_careful(&hctx->dispatch)) { busy = true; break; } budget_token = blk_mq_get_dispatch_budget(q); if (budget_token < 0) break; rq = e->type->ops.dispatch_request(hctx); if (!rq) { blk_mq_put_dispatch_budget(q, budget_token); /* * We're releasing without dispatching. Holding the * budget could have blocked any "hctx"s with the * same queue and if we didn't dispatch then there's * no guarantee anyone will kick the queue. Kick it * ourselves. */ run_queue = true; break; } blk_mq_set_rq_budget_token(rq, budget_token); /* * Now this rq owns the budget which has to be released * if this rq won't be queued to driver via .queue_rq() * in blk_mq_dispatch_rq_list(). */ list_add_tail(&rq->queuelist, &rq_list); count++; if (rq->mq_hctx != hctx) multi_hctxs = true; /* * If we cannot get tag for the request, stop dequeueing * requests from the IO scheduler. We are unlikely to be able * to submit them anyway and it creates false impression for * scheduling heuristics that the device can take more IO. */ if (!blk_mq_get_driver_tag(rq)) break; } while (count < max_dispatch); if (!count) { if (run_queue) blk_mq_delay_run_hw_queues(q, BLK_MQ_BUDGET_DELAY); } else if (multi_hctxs) { /* * Requests from different hctx may be dequeued from some * schedulers, such as bfq and deadline. * * Sort the requests in the list according to their hctx, * dispatch batching requests from same hctx at a time. */ list_sort(NULL, &rq_list, sched_rq_cmp); do { dispatched |= blk_mq_dispatch_hctx_list(&rq_list); } while (!list_empty(&rq_list)); } else { dispatched = blk_mq_dispatch_rq_list(hctx, &rq_list, false); } if (busy) return -EAGAIN; return !!dispatched; } static int blk_mq_do_dispatch_sched(struct blk_mq_hw_ctx *hctx) { unsigned long end = jiffies + HZ; int ret; do { ret = __blk_mq_do_dispatch_sched(hctx); if (ret != 1) break; if (need_resched() || time_is_before_jiffies(end)) { blk_mq_delay_run_hw_queue(hctx, 0); break; } } while (1); return ret; } static struct blk_mq_ctx *blk_mq_next_ctx(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx) { unsigned short idx = ctx->index_hw[hctx->type]; if (++idx == hctx->nr_ctx) idx = 0; return hctx->ctxs[idx]; } /* * Only SCSI implements .get_budget and .put_budget, and SCSI restarts * its queue by itself in its completion handler, so we don't need to * restart queue if .get_budget() fails to get the budget. * * Returns -EAGAIN if hctx->dispatch was found non-empty and run_work has to * be run again. This is necessary to avoid starving flushes. */ static int blk_mq_do_dispatch_ctx(struct blk_mq_hw_ctx *hctx) { struct request_queue *q = hctx->queue; LIST_HEAD(rq_list); struct blk_mq_ctx *ctx = READ_ONCE(hctx->dispatch_from); int ret = 0; struct request *rq; do { int budget_token; if (!list_empty_careful(&hctx->dispatch)) { ret = -EAGAIN; break; } if (!sbitmap_any_bit_set(&hctx->ctx_map)) break; budget_token = blk_mq_get_dispatch_budget(q); if (budget_token < 0) break; rq = blk_mq_dequeue_from_ctx(hctx, ctx); if (!rq) { blk_mq_put_dispatch_budget(q, budget_token); /* * We're releasing without dispatching. Holding the * budget could have blocked any "hctx"s with the * same queue and if we didn't dispatch then there's * no guarantee anyone will kick the queue. Kick it * ourselves. */ blk_mq_delay_run_hw_queues(q, BLK_MQ_BUDGET_DELAY); break; } blk_mq_set_rq_budget_token(rq, budget_token); /* * Now this rq owns the budget which has to be released * if this rq won't be queued to driver via .queue_rq() * in blk_mq_dispatch_rq_list(). */ list_add(&rq->queuelist, &rq_list); /* round robin for fair dispatch */ ctx = blk_mq_next_ctx(hctx, rq->mq_ctx); } while (blk_mq_dispatch_rq_list(rq->mq_hctx, &rq_list, false)); WRITE_ONCE(hctx->dispatch_from, ctx); return ret; } static int __blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx) { bool need_dispatch = false; LIST_HEAD(rq_list); /* * If we have previous entries on our dispatch list, grab them first for * more fair dispatch. */ if (!list_empty_careful(&hctx->dispatch)) { spin_lock(&hctx->lock); if (!list_empty(&hctx->dispatch)) list_splice_init(&hctx->dispatch, &rq_list); spin_unlock(&hctx->lock); } /* * Only ask the scheduler for requests, if we didn't have residual * requests from the dispatch list. This is to avoid the case where * we only ever dispatch a fraction of the requests available because * of low device queue depth. Once we pull requests out of the IO * scheduler, we can no longer merge or sort them. So it's best to * leave them there for as long as we can. Mark the hw queue as * needing a restart in that case. * * We want to dispatch from the scheduler if there was nothing * on the dispatch list or we were able to dispatch from the * dispatch list. */ if (!list_empty(&rq_list)) { blk_mq_sched_mark_restart_hctx(hctx); if (!blk_mq_dispatch_rq_list(hctx, &rq_list, true)) return 0; need_dispatch = true; } else { need_dispatch = hctx->dispatch_busy; } if (hctx->queue->elevator) return blk_mq_do_dispatch_sched(hctx); /* dequeue request one by one from sw queue if queue is busy */ if (need_dispatch) return blk_mq_do_dispatch_ctx(hctx); blk_mq_flush_busy_ctxs(hctx, &rq_list); blk_mq_dispatch_rq_list(hctx, &rq_list, true); return 0; } void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx) { struct request_queue *q = hctx->queue; /* RCU or SRCU read lock is needed before checking quiesced flag */ if (unlikely(blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(q))) return; /* * A return of -EAGAIN is an indication that hctx->dispatch is not * empty and we must run again in order to avoid starving flushes. */ if (__blk_mq_sched_dispatch_requests(hctx) == -EAGAIN) { if (__blk_mq_sched_dispatch_requests(hctx) == -EAGAIN) blk_mq_run_hw_queue(hctx, true); } } bool blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio, unsigned int nr_segs) { struct elevator_queue *e = q->elevator; struct blk_mq_ctx *ctx; struct blk_mq_hw_ctx *hctx; bool ret = false; enum hctx_type type; if (e && e->type->ops.bio_merge) { ret = e->type->ops.bio_merge(q, bio, nr_segs); goto out_put; } ctx = blk_mq_get_ctx(q); hctx = blk_mq_map_queue(bio->bi_opf, ctx); type = hctx->type; if (list_empty_careful(&ctx->rq_lists[type])) goto out_put; /* default per sw-queue merge */ spin_lock(&ctx->lock); /* * Reverse check our software queue for entries that we could * potentially merge with. Currently includes a hand-wavy stop * count of 8, to not spend too much time checking for merges. */ if (blk_bio_list_merge(q, &ctx->rq_lists[type], bio, nr_segs)) ret = true; spin_unlock(&ctx->lock); out_put: return ret; } bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq, struct list_head *free) { return rq_mergeable(rq) && elv_attempt_insert_merge(q, rq, free); } EXPORT_SYMBOL_GPL(blk_mq_sched_try_insert_merge); /* called in queue's release handler, tagset has gone away */ static void blk_mq_sched_tags_teardown(struct request_queue *q, unsigned int flags) { struct blk_mq_hw_ctx *hctx; unsigned long i; queue_for_each_hw_ctx(q, hctx, i) hctx->sched_tags = NULL; if (blk_mq_is_shared_tags(flags)) q->sched_shared_tags = NULL; } void blk_mq_sched_reg_debugfs(struct request_queue *q) { struct blk_mq_hw_ctx *hctx; unsigned long i; mutex_lock(&q->debugfs_mutex); blk_mq_debugfs_register_sched(q); queue_for_each_hw_ctx(q, hctx, i) blk_mq_debugfs_register_sched_hctx(q, hctx); mutex_unlock(&q->debugfs_mutex); } void blk_mq_sched_unreg_debugfs(struct request_queue *q) { struct blk_mq_hw_ctx *hctx; unsigned long i; mutex_lock(&q->debugfs_mutex); queue_for_each_hw_ctx(q, hctx, i) blk_mq_debugfs_unregister_sched_hctx(hctx); blk_mq_debugfs_unregister_sched(q); mutex_unlock(&q->debugfs_mutex); } void blk_mq_free_sched_tags(struct elevator_tags *et, struct blk_mq_tag_set *set) { unsigned long i; /* Shared tags are stored at index 0 in @tags. */ if (blk_mq_is_shared_tags(set->flags)) blk_mq_free_map_and_rqs(set, et->tags[0], BLK_MQ_NO_HCTX_IDX); else { for (i = 0; i < et->nr_hw_queues; i++) blk_mq_free_map_and_rqs(set, et->tags[i], i); } kfree(et); } void blk_mq_free_sched_tags_batch(struct xarray *et_table, struct blk_mq_tag_set *set) { struct request_queue *q; struct elevator_tags *et; lockdep_assert_held_write(&set->update_nr_hwq_lock); list_for_each_entry(q, &set->tag_list, tag_set_list) { /* * Accessing q->elevator without holding q->elevator_lock is * safe because we're holding here set->update_nr_hwq_lock in * the writer context. So, scheduler update/switch code (which * acquires the same lock but in the reader context) can't run * concurrently. */ if (q->elevator) { et = xa_load(et_table, q->id); if (unlikely(!et)) WARN_ON_ONCE(1); else blk_mq_free_sched_tags(et, set); } } } struct elevator_tags *blk_mq_alloc_sched_tags(struct blk_mq_tag_set *set, unsigned int nr_hw_queues) { unsigned int nr_tags; int i; struct elevator_tags *et; gfp_t gfp = GFP_NOIO | __GFP_ZERO | __GFP_NOWARN | __GFP_NORETRY; if (blk_mq_is_shared_tags(set->flags)) nr_tags = 1; else nr_tags = nr_hw_queues; et = kmalloc(sizeof(struct elevator_tags) + nr_tags * sizeof(struct blk_mq_tags *), gfp); if (!et) return NULL; /* * Default to double of smaller one between hw queue_depth and * 128, since we don't split into sync/async like the old code * did. Additionally, this is a per-hw queue depth. */ et->nr_requests = 2 * min_t(unsigned int, set->queue_depth, BLKDEV_DEFAULT_RQ); et->nr_hw_queues = nr_hw_queues; if (blk_mq_is_shared_tags(set->flags)) { /* Shared tags are stored at index 0 in @tags. */ et->tags[0] = blk_mq_alloc_map_and_rqs(set, BLK_MQ_NO_HCTX_IDX, MAX_SCHED_RQ); if (!et->tags[0]) goto out; } else { for (i = 0; i < et->nr_hw_queues; i++) { et->tags[i] = blk_mq_alloc_map_and_rqs(set, i, et->nr_requests); if (!et->tags[i]) goto out_unwind; } } return et; out_unwind: while (--i >= 0) blk_mq_free_map_and_rqs(set, et->tags[i], i); out: kfree(et); return NULL; } int blk_mq_alloc_sched_tags_batch(struct xarray *et_table, struct blk_mq_tag_set *set, unsigned int nr_hw_queues) { struct request_queue *q; struct elevator_tags *et; gfp_t gfp = GFP_NOIO | __GFP_ZERO | __GFP_NOWARN | __GFP_NORETRY; lockdep_assert_held_write(&set->update_nr_hwq_lock); list_for_each_entry(q, &set->tag_list, tag_set_list) { /* * Accessing q->elevator without holding q->elevator_lock is * safe because we're holding here set->update_nr_hwq_lock in * the writer context. So, scheduler update/switch code (which * acquires the same lock but in the reader context) can't run * concurrently. */ if (q->elevator) { et = blk_mq_alloc_sched_tags(set, nr_hw_queues); if (!et) goto out_unwind; if (xa_insert(et_table, q->id, et, gfp)) goto out_free_tags; } } return 0; out_free_tags: blk_mq_free_sched_tags(et, set); out_unwind: list_for_each_entry_continue_reverse(q, &set->tag_list, tag_set_list) { if (q->elevator) { et = xa_load(et_table, q->id); if (et) blk_mq_free_sched_tags(et, set); } } return -ENOMEM; } /* caller must have a reference to @e, will grab another one if successful */ int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e, struct elevator_tags *et) { unsigned int flags = q->tag_set->flags; struct blk_mq_hw_ctx *hctx; struct elevator_queue *eq; unsigned long i; int ret; eq = elevator_alloc(q, e, et); if (!eq) return -ENOMEM; q->nr_requests = et->nr_requests; if (blk_mq_is_shared_tags(flags)) { /* Shared tags are stored at index 0 in @et->tags. */ q->sched_shared_tags = et->tags[0]; blk_mq_tag_update_sched_shared_tags(q); } queue_for_each_hw_ctx(q, hctx, i) { if (blk_mq_is_shared_tags(flags)) hctx->sched_tags = q->sched_shared_tags; else hctx->sched_tags = et->tags[i]; } ret = e->ops.init_sched(q, eq); if (ret) goto out; queue_for_each_hw_ctx(q, hctx, i) { if (e->ops.init_hctx) { ret = e->ops.init_hctx(hctx, i); if (ret) { blk_mq_exit_sched(q, eq); kobject_put(&eq->kobj); return ret; } } } return 0; out: blk_mq_sched_tags_teardown(q, flags); kobject_put(&eq->kobj); q->elevator = NULL; return ret; } /* * called in either blk_queue_cleanup or elevator_switch, tagset * is required for freeing requests */ void blk_mq_sched_free_rqs(struct request_queue *q) { struct blk_mq_hw_ctx *hctx; unsigned long i; if (blk_mq_is_shared_tags(q->tag_set->flags)) { blk_mq_free_rqs(q->tag_set, q->sched_shared_tags, BLK_MQ_NO_HCTX_IDX); } else { queue_for_each_hw_ctx(q, hctx, i) { if (hctx->sched_tags) blk_mq_free_rqs(q->tag_set, hctx->sched_tags, i); } } } void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e) { struct blk_mq_hw_ctx *hctx; unsigned long i; unsigned int flags = 0; queue_for_each_hw_ctx(q, hctx, i) { if (e->type->ops.exit_hctx && hctx->sched_data) { e->type->ops.exit_hctx(hctx, i); hctx->sched_data = NULL; } flags = hctx->flags; } if (e->type->ops.exit_sched) e->type->ops.exit_sched(e); blk_mq_sched_tags_teardown(q, flags); set_bit(ELEVATOR_FLAG_DYING, &q->elevator->flags); q->elevator = NULL; } |
| 821 11 818 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 | /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * SHA-1 optimized for x86_64 * * Copyright 2025 Google LLC */ #include <asm/fpu/api.h> #include <linux/static_call.h> DEFINE_STATIC_CALL(sha1_blocks_x86, sha1_blocks_generic); #define DEFINE_X86_SHA1_FN(c_fn, asm_fn) \ asmlinkage void asm_fn(struct sha1_block_state *state, \ const u8 *data, size_t nblocks); \ static void c_fn(struct sha1_block_state *state, \ const u8 *data, size_t nblocks) \ { \ if (likely(irq_fpu_usable())) { \ kernel_fpu_begin(); \ asm_fn(state, data, nblocks); \ kernel_fpu_end(); \ } else { \ sha1_blocks_generic(state, data, nblocks); \ } \ } DEFINE_X86_SHA1_FN(sha1_blocks_ssse3, sha1_transform_ssse3); DEFINE_X86_SHA1_FN(sha1_blocks_avx, sha1_transform_avx); DEFINE_X86_SHA1_FN(sha1_blocks_ni, sha1_ni_transform); #define SHA1_AVX2_BLOCK_OPTSIZE 4 /* optimal 4*64 bytes of SHA1 blocks */ asmlinkage void sha1_transform_avx2(struct sha1_block_state *state, const u8 *data, size_t nblocks); static void sha1_blocks_avx2(struct sha1_block_state *state, const u8 *data, size_t nblocks) { if (likely(irq_fpu_usable())) { kernel_fpu_begin(); /* Select the optimal transform based on the number of blocks */ if (nblocks >= SHA1_AVX2_BLOCK_OPTSIZE) sha1_transform_avx2(state, data, nblocks); else sha1_transform_avx(state, data, nblocks); kernel_fpu_end(); } else { sha1_blocks_generic(state, data, nblocks); } } static void sha1_blocks(struct sha1_block_state *state, const u8 *data, size_t nblocks) { static_call(sha1_blocks_x86)(state, data, nblocks); } #define sha1_mod_init_arch sha1_mod_init_arch static inline void sha1_mod_init_arch(void) { if (boot_cpu_has(X86_FEATURE_SHA_NI)) { static_call_update(sha1_blocks_x86, sha1_blocks_ni); } else if (cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM, NULL) && boot_cpu_has(X86_FEATURE_AVX)) { if (boot_cpu_has(X86_FEATURE_AVX2) && boot_cpu_has(X86_FEATURE_BMI1) && boot_cpu_has(X86_FEATURE_BMI2)) static_call_update(sha1_blocks_x86, sha1_blocks_avx2); else static_call_update(sha1_blocks_x86, sha1_blocks_avx); } else if (boot_cpu_has(X86_FEATURE_SSSE3)) { static_call_update(sha1_blocks_x86, sha1_blocks_ssse3); } } |
| 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 | // SPDX-License-Identifier: GPL-2.0-only /* * Copyright 2002-2005, Instant802 Networks, Inc. * Copyright 2005-2006, Devicescape Software, Inc. * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz> * Copyright 2013-2014 Intel Mobile Communications GmbH * Copyright (C) 2017 Intel Deutschland GmbH * Copyright (C) 2018-2025 Intel Corporation */ #include <net/mac80211.h> #include <linux/module.h> #include <linux/fips.h> #include <linux/init.h> #include <linux/netdevice.h> #include <linux/types.h> #include <linux/slab.h> #include <linux/skbuff.h> #include <linux/etherdevice.h> #include <linux/if_arp.h> #include <linux/rtnetlink.h> #include <linux/bitmap.h> #include <linux/inetdevice.h> #include <net/net_namespace.h> #include <net/dropreason.h> #include <net/cfg80211.h> #include <net/addrconf.h> #include "ieee80211_i.h" #include "driver-ops.h" #include "rate.h" #include "mesh.h" #include "wep.h" #include "led.h" #include "debugfs.h" void ieee80211_configure_filter(struct ieee80211_local *local) { u64 mc; unsigned int changed_flags; unsigned int new_flags = 0; if (atomic_read(&local->iff_allmultis)) new_flags |= FIF_ALLMULTI; if (local->monitors || test_bit(SCAN_SW_SCANNING, &local->scanning) || test_bit(SCAN_ONCHANNEL_SCANNING, &local->scanning)) new_flags |= FIF_BCN_PRBRESP_PROMISC; if (local->fif_probe_req || local->probe_req_reg) new_flags |= FIF_PROBE_REQ; if (local->fif_fcsfail) new_flags |= FIF_FCSFAIL; if (local->fif_plcpfail) new_flags |= FIF_PLCPFAIL; if (local->fif_control) new_flags |= FIF_CONTROL; if (local->fif_other_bss) new_flags |= FIF_OTHER_BSS; if (local->fif_pspoll) new_flags |= FIF_PSPOLL; if (local->rx_mcast_action_reg) new_flags |= FIF_MCAST_ACTION; spin_lock_bh(&local->filter_lock); changed_flags = local->filter_flags ^ new_flags; mc = drv_prepare_multicast(local, &local->mc_list); spin_unlock_bh(&local->filter_lock); /* be a bit nasty */ new_flags |= (1<<31); drv_configure_filter(local, changed_flags, &new_flags, mc); WARN_ON(new_flags & (1<<31)); local->filter_flags = new_flags & ~(1<<31); } static void ieee80211_reconfig_filter(struct wiphy *wiphy, struct wiphy_work *work) { struct ieee80211_local *local = container_of(work, struct ieee80211_local, reconfig_filter); ieee80211_configure_filter(local); } static u32 ieee80211_calc_hw_conf_chan(struct ieee80211_local *local, struct ieee80211_chanctx_conf *ctx) { struct ieee80211_sub_if_data *sdata; struct cfg80211_chan_def chandef = {}; struct cfg80211_chan_def *oper = NULL; enum ieee80211_smps_mode smps_mode = IEEE80211_SMPS_STATIC; u32 changed = 0; int power; u32 offchannel_flag; if (!local->emulate_chanctx) return 0; offchannel_flag = local->hw.conf.flags & IEEE80211_CONF_OFFCHANNEL; if (ctx && !WARN_ON(!ctx->def.chan)) { oper = &ctx->def; if (ctx->rx_chains_static > 1) smps_mode = IEEE80211_SMPS_OFF; else if (ctx->rx_chains_dynamic > 1) smps_mode = IEEE80211_SMPS_DYNAMIC; else smps_mode = IEEE80211_SMPS_STATIC; } if (local->scan_chandef.chan) { chandef = local->scan_chandef; } else if (local->tmp_channel) { chandef.chan = local->tmp_channel; chandef.width = NL80211_CHAN_WIDTH_20_NOHT; chandef.center_freq1 = chandef.chan->center_freq; chandef.freq1_offset = chandef.chan->freq_offset; } else if (oper) { chandef = *oper; } else { chandef = local->dflt_chandef; } if (WARN(!cfg80211_chandef_valid(&chandef), "control:%d.%03d MHz width:%d center: %d.%03d/%d MHz", chandef.chan ? chandef.chan->center_freq : -1, chandef.chan ? chandef.chan->freq_offset : 0, chandef.width, chandef.center_freq1, chandef.freq1_offset, chandef.center_freq2)) return 0; if (!oper || !cfg80211_chandef_identical(&chandef, oper)) local->hw.conf.flags |= IEEE80211_CONF_OFFCHANNEL; else local->hw.conf.flags &= ~IEEE80211_CONF_OFFCHANNEL; offchannel_flag ^= local->hw.conf.flags & IEEE80211_CONF_OFFCHANNEL; /* force it also for scanning, since drivers might config differently */ if (offchannel_flag || local->scanning || local->in_reconfig || !cfg80211_chandef_identical(&local->hw.conf.chandef, &chandef)) { local->hw.conf.chandef = chandef; changed |= IEEE80211_CONF_CHANGE_CHANNEL; } if (!conf_is_ht(&local->hw.conf)) { /* * mac80211.h documents that this is only valid * when the channel is set to an HT type, and * that otherwise STATIC is used. */ local->hw.conf.smps_mode = IEEE80211_SMPS_STATIC; } else if (local->hw.conf.smps_mode != smps_mode) { local->hw.conf.smps_mode = smps_mode; changed |= IEEE80211_CONF_CHANGE_SMPS; } power = ieee80211_chandef_max_power(&chandef); if (local->user_power_level != IEEE80211_UNSET_POWER_LEVEL) power = min(local->user_power_level, power); rcu_read_lock(); list_for_each_entry_rcu(sdata, &local->interfaces, list) { if (!rcu_access_pointer(sdata->vif.bss_conf.chanctx_conf)) continue; if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) continue; if (sdata->vif.bss_conf.txpower == INT_MIN) continue; power = min(power, sdata->vif.bss_conf.txpower); } rcu_read_unlock(); if (local->hw.conf.power_level != power) { changed |= IEEE80211_CONF_CHANGE_POWER; local->hw.conf.power_level = power; } return changed; } int ieee80211_hw_config(struct ieee80211_local *local, int radio_idx, u32 changed) { int ret = 0; might_sleep(); WARN_ON(changed & (IEEE80211_CONF_CHANGE_CHANNEL | IEEE80211_CONF_CHANGE_POWER | IEEE80211_CONF_CHANGE_SMPS)); if (changed && local->open_count) { ret = drv_config(local, radio_idx, changed); /* * Goal: * HW reconfiguration should never fail, the driver has told * us what it can support so it should live up to that promise. * * Current status: * rfkill is not integrated with mac80211 and a * configuration command can thus fail if hardware rfkill * is enabled * * FIXME: integrate rfkill with mac80211 and then add this * WARN_ON() back * */ /* WARN_ON(ret); */ } return ret; } /* for scanning, offchannel and chanctx emulation only */ static int _ieee80211_hw_conf_chan(struct ieee80211_local *local, struct ieee80211_chanctx_conf *ctx) { u32 changed; if (!local->open_count) return 0; changed = ieee80211_calc_hw_conf_chan(local, ctx); if (!changed) return 0; return drv_config(local, -1, changed); } int ieee80211_hw_conf_chan(struct ieee80211_local *local) { struct ieee80211_chanctx *ctx; ctx = list_first_entry_or_null(&local->chanctx_list, struct ieee80211_chanctx, list); return _ieee80211_hw_conf_chan(local, ctx ? &ctx->conf : NULL); } void ieee80211_hw_conf_init(struct ieee80211_local *local) { u32 changed = ~(IEEE80211_CONF_CHANGE_CHANNEL | IEEE80211_CONF_CHANGE_POWER | IEEE80211_CONF_CHANGE_SMPS); if (WARN_ON(!local->open_count)) return; if (local->emulate_chanctx) { struct ieee80211_chanctx *ctx; ctx = list_first_entry_or_null(&local->chanctx_list, struct ieee80211_chanctx, list); changed |= ieee80211_calc_hw_conf_chan(local, ctx ? &ctx->conf : NULL); } WARN_ON(drv_config(local, -1, changed)); } int ieee80211_emulate_add_chanctx(struct ieee80211_hw *hw, struct ieee80211_chanctx_conf *ctx) { struct ieee80211_local *local = hw_to_local(hw); local->hw.conf.radar_enabled = ctx->radar_enabled; return _ieee80211_hw_conf_chan(local, ctx); } EXPORT_SYMBOL(ieee80211_emulate_add_chanctx); void ieee80211_emulate_remove_chanctx(struct ieee80211_hw *hw, struct ieee80211_chanctx_conf *ctx) { struct ieee80211_local *local = hw_to_local(hw); local->hw.conf.radar_enabled = false; _ieee80211_hw_conf_chan(local, NULL); } EXPORT_SYMBOL(ieee80211_emulate_remove_chanctx); void ieee80211_emulate_change_chanctx(struct ieee80211_hw *hw, struct ieee80211_chanctx_conf *ctx, u32 changed) { struct ieee80211_local *local = hw_to_local(hw); local->hw.conf.radar_enabled = ctx->radar_enabled; _ieee80211_hw_conf_chan(local, ctx); } EXPORT_SYMBOL(ieee80211_emulate_change_chanctx); int ieee80211_emulate_switch_vif_chanctx(struct ieee80211_hw *hw, struct ieee80211_vif_chanctx_switch *vifs, int n_vifs, enum ieee80211_chanctx_switch_mode mode) { struct ieee80211_local *local = hw_to_local(hw); if (n_vifs <= 0) return -EINVAL; local->hw.conf.radar_enabled = vifs[0].new_ctx->radar_enabled; _ieee80211_hw_conf_chan(local, vifs[0].new_ctx); return 0; } EXPORT_SYMBOL(ieee80211_emulate_switch_vif_chanctx); #define BSS_CHANGED_VIF_CFG_FLAGS (BSS_CHANGED_ASSOC |\ BSS_CHANGED_IDLE |\ BSS_CHANGED_PS |\ BSS_CHANGED_IBSS |\ BSS_CHANGED_ARP_FILTER |\ BSS_CHANGED_SSID |\ BSS_CHANGED_MLD_VALID_LINKS |\ BSS_CHANGED_MLD_TTLM) void ieee80211_bss_info_change_notify(struct ieee80211_sub_if_data *sdata, u64 changed) { struct ieee80211_local *local = sdata->local; might_sleep(); WARN_ON_ONCE(ieee80211_vif_is_mld(&sdata->vif)); if (!changed || sdata->vif.type == NL80211_IFTYPE_AP_VLAN) return; if (WARN_ON_ONCE(changed & (BSS_CHANGED_BEACON | BSS_CHANGED_BEACON_ENABLED) && sdata->vif.type != NL80211_IFTYPE_AP && sdata->vif.type != NL80211_IFTYPE_ADHOC && sdata->vif.type != NL80211_IFTYPE_MESH_POINT && sdata->vif.type != NL80211_IFTYPE_OCB)) return; if (WARN_ON_ONCE(sdata->vif.type == NL80211_IFTYPE_P2P_DEVICE || sdata->vif.type == NL80211_IFTYPE_NAN || (sdata->vif.type == NL80211_IFTYPE_MONITOR && !sdata->vif.bss_conf.mu_mimo_owner && !(changed & BSS_CHANGED_TXPOWER)))) return; if (!check_sdata_in_driver(sdata)) return; if (changed & BSS_CHANGED_VIF_CFG_FLAGS) { u64 ch = changed & BSS_CHANGED_VIF_CFG_FLAGS; trace_drv_vif_cfg_changed(local, sdata, changed); if (local->ops->vif_cfg_changed) local->ops->vif_cfg_changed(&local->hw, &sdata->vif, ch); } if (changed & ~BSS_CHANGED_VIF_CFG_FLAGS) { u64 ch = changed & ~BSS_CHANGED_VIF_CFG_FLAGS; trace_drv_link_info_changed(local, sdata, &sdata->vif.bss_conf, changed); if (local->ops->link_info_changed) local->ops->link_info_changed(&local->hw, &sdata->vif, &sdata->vif.bss_conf, ch); } if (local->ops->bss_info_changed) local->ops->bss_info_changed(&local->hw, &sdata->vif, &sdata->vif.bss_conf, changed); trace_drv_return_void(local); } void ieee80211_vif_cfg_change_notify(struct ieee80211_sub_if_data *sdata, u64 changed) { struct ieee80211_local *local = sdata->local; WARN_ON_ONCE(changed & ~BSS_CHANGED_VIF_CFG_FLAGS); if (!changed || sdata->vif.type == NL80211_IFTYPE_AP_VLAN) return; drv_vif_cfg_changed(local, sdata, changed); } void ieee80211_link_info_change_notify(struct ieee80211_sub_if_data *sdata, struct ieee80211_link_data *link, u64 changed) { struct ieee80211_local *local = sdata->local; WARN_ON_ONCE(changed & BSS_CHANGED_VIF_CFG_FLAGS); if (!changed) return; switch (sdata->vif.type) { case NL80211_IFTYPE_AP_VLAN: return; case NL80211_IFTYPE_MONITOR: if (!ieee80211_hw_check(&local->hw, WANT_MONITOR_VIF)) return; break; default: break; } if (!check_sdata_in_driver(sdata)) return; drv_link_info_changed(local, sdata, link->conf, link->link_id, changed); } u64 ieee80211_reset_erp_info(struct ieee80211_sub_if_data *sdata) { sdata->vif.bss_conf.use_cts_prot = false; sdata->vif.bss_conf.use_short_preamble = false; sdata->vif.bss_conf.use_short_slot = false; return BSS_CHANGED_ERP_CTS_PROT | BSS_CHANGED_ERP_PREAMBLE | BSS_CHANGED_ERP_SLOT; } /* context: requires softirqs disabled */ void ieee80211_handle_queued_frames(struct ieee80211_local *local) { struct sk_buff *skb; while ((skb = skb_dequeue(&local->skb_queue)) || (skb = skb_dequeue(&local->skb_queue_unreliable))) { switch (skb->pkt_type) { case IEEE80211_RX_MSG: /* Clear skb->pkt_type in order to not confuse kernel * netstack. */ skb->pkt_type = 0; ieee80211_rx(&local->hw, skb); break; case IEEE80211_TX_STATUS_MSG: skb->pkt_type = 0; ieee80211_tx_status_skb(&local->hw, skb); break; default: WARN(1, "mac80211: Packet is of unknown type %d\n", skb->pkt_type); dev_kfree_skb(skb); break; } } } static void ieee80211_tasklet_handler(struct tasklet_struct *t) { struct ieee80211_local *local = from_tasklet(local, t, tasklet); ieee80211_handle_queued_frames(local); } static void ieee80211_restart_work(struct work_struct *work) { struct ieee80211_local *local = container_of(work, struct ieee80211_local, restart_work); struct ieee80211_sub_if_data *sdata; int ret; flush_workqueue(local->workqueue); rtnl_lock(); /* we might do interface manipulations, so need both */ wiphy_lock(local->hw.wiphy); wiphy_work_flush(local->hw.wiphy, NULL); WARN(test_bit(SCAN_HW_SCANNING, &local->scanning), "%s called with hardware scan in progress\n", __func__); list_for_each_entry(sdata, &local->interfaces, list) { /* * XXX: there may be more work for other vif types and even * for station mode: a good thing would be to run most of * the iface type's dependent _stop (ieee80211_mg_stop, * ieee80211_ibss_stop) etc... * For now, fix only the specific bug that was seen: race * between csa_connection_drop_work and us. */ if (sdata->vif.type == NL80211_IFTYPE_STATION) { /* * This worker is scheduled from the iface worker that * runs on mac80211's workqueue, so we can't be * scheduling this worker after the cancel right here. * The exception is ieee80211_chswitch_done. * Then we can have a race... */ wiphy_work_cancel(local->hw.wiphy, &sdata->u.mgd.csa_connection_drop_work); if (sdata->vif.bss_conf.csa_active) ieee80211_sta_connection_lost(sdata, WLAN_REASON_UNSPECIFIED, false); } wiphy_delayed_work_flush(local->hw.wiphy, &sdata->dec_tailroom_needed_wk); } ieee80211_scan_cancel(local); /* make sure any new ROC will consider local->in_reconfig */ wiphy_delayed_work_flush(local->hw.wiphy, &local->roc_work); wiphy_work_flush(local->hw.wiphy, &local->hw_roc_done); /* wait for all packet processing to be done */ synchronize_net(); ret = ieee80211_reconfig(local); wiphy_unlock(local->hw.wiphy); if (ret) cfg80211_shutdown_all_interfaces(local->hw.wiphy); rtnl_unlock(); } void ieee80211_restart_hw(struct ieee80211_hw *hw) { struct ieee80211_local *local = hw_to_local(hw); trace_api_restart_hw(local); wiphy_info(hw->wiphy, "Hardware restart was requested\n"); /* use this reason, ieee80211_reconfig will unblock it */ ieee80211_stop_queues_by_reason(hw, IEEE80211_MAX_QUEUE_MAP, IEEE80211_QUEUE_STOP_REASON_SUSPEND, false); /* * Stop all Rx during the reconfig. We don't want state changes * or driver callbacks while this is in progress. */ local->in_reconfig = true; barrier(); queue_work(system_freezable_wq, &local->restart_work); } EXPORT_SYMBOL(ieee80211_restart_hw); #ifdef CONFIG_INET static int ieee80211_ifa_changed(struct notifier_block *nb, unsigned long data, void *arg) { struct in_ifaddr *ifa = arg; struct ieee80211_local *local = container_of(nb, struct ieee80211_local, ifa_notifier); struct net_device *ndev = ifa->ifa_dev->dev; struct wireless_dev *wdev = ndev->ieee80211_ptr; struct in_device *idev; struct ieee80211_sub_if_data *sdata; struct ieee80211_vif_cfg *vif_cfg; struct ieee80211_if_managed *ifmgd; int c = 0; /* Make sure it's our interface that got changed */ if (!wdev) return NOTIFY_DONE; if (wdev->wiphy != local->hw.wiphy || !wdev->registered) return NOTIFY_DONE; sdata = IEEE80211_DEV_TO_SUB_IF(ndev); vif_cfg = &sdata->vif.cfg; /* ARP filtering is only supported in managed mode */ if (sdata->vif.type != NL80211_IFTYPE_STATION) return NOTIFY_DONE; idev = __in_dev_get_rtnl(sdata->dev); if (!idev) return NOTIFY_DONE; ifmgd = &sdata->u.mgd; /* * The nested here is needed to convince lockdep that this is * all OK. Yes, we lock the wiphy mutex here while we already * hold the notifier rwsem, that's the normal case. And yes, * we also acquire the notifier rwsem again when unregistering * a netdev while we already hold the wiphy mutex, so it does * look like a typical ABBA deadlock. * * However, both of these things happen with the RTNL held * already. Therefore, they can't actually happen, since the * lock orders really are ABC and ACB, which is fine due to * the RTNL (A). * * We still need to prevent recursion, which is accomplished * by the !wdev->registered check above. */ mutex_lock_nested(&local->hw.wiphy->mtx, 1); __acquire(&local->hw.wiphy->mtx); /* Copy the addresses to the vif config list */ ifa = rtnl_dereference(idev->ifa_list); while (ifa) { if (c < IEEE80211_BSS_ARP_ADDR_LIST_LEN) vif_cfg->arp_addr_list[c] = ifa->ifa_address; ifa = rtnl_dereference(ifa->ifa_next); c++; } vif_cfg->arp_addr_cnt = c; /* Configure driver only if associated (which also implies it is up) */ if (ifmgd->associated) ieee80211_vif_cfg_change_notify(sdata, BSS_CHANGED_ARP_FILTER); wiphy_unlock(local->hw.wiphy); return NOTIFY_OK; } #endif #if IS_ENABLED(CONFIG_IPV6) static int ieee80211_ifa6_changed(struct notifier_block *nb, unsigned long data, void *arg) { struct inet6_ifaddr *ifa = (struct inet6_ifaddr *)arg; struct inet6_dev *idev = ifa->idev; struct net_device *ndev = ifa->idev->dev; struct ieee80211_local *local = container_of(nb, struct ieee80211_local, ifa6_notifier); struct wireless_dev *wdev = ndev->ieee80211_ptr; struct ieee80211_sub_if_data *sdata; /* Make sure it's our interface that got changed */ if (!wdev || wdev->wiphy != local->hw.wiphy) return NOTIFY_DONE; sdata = IEEE80211_DEV_TO_SUB_IF(ndev); /* * For now only support station mode. This is mostly because * doing AP would have to handle AP_VLAN in some way ... */ if (sdata->vif.type != NL80211_IFTYPE_STATION) return NOTIFY_DONE; drv_ipv6_addr_change(local, sdata, idev); return NOTIFY_OK; } #endif /* There isn't a lot of sense in it, but you can transmit anything you like */ static const struct ieee80211_txrx_stypes ieee80211_default_mgmt_stypes[NUM_NL80211_IFTYPES] = { [NL80211_IFTYPE_ADHOC] = { .tx = 0xffff, .rx = BIT(IEEE80211_STYPE_ACTION >> 4) | BIT(IEEE80211_STYPE_AUTH >> 4) | BIT(IEEE80211_STYPE_DEAUTH >> 4) | BIT(IEEE80211_STYPE_PROBE_REQ >> 4), }, [NL80211_IFTYPE_STATION] = { .tx = 0xffff, /* * To support Pre Association Security Negotiation (PASN) while * already associated to one AP, allow user space to register to * Rx authentication frames, so that the user space logic would * be able to receive/handle authentication frames from a * different AP as part of PASN. * It is expected that user space would intelligently register * for Rx authentication frames, i.e., only when PASN is used * and configure a match filter only for PASN authentication * algorithm, as otherwise the MLME functionality of mac80211 * would be broken. */ .rx = BIT(IEEE80211_STYPE_ACTION >> 4) | BIT(IEEE80211_STYPE_AUTH >> 4) | BIT(IEEE80211_STYPE_PROBE_REQ >> 4), }, [NL80211_IFTYPE_AP] = { .tx = 0xffff, .rx = BIT(IEEE80211_STYPE_ASSOC_REQ >> 4) | BIT(IEEE80211_STYPE_REASSOC_REQ >> 4) | BIT(IEEE80211_STYPE_PROBE_REQ >> 4) | BIT(IEEE80211_STYPE_DISASSOC >> 4) | BIT(IEEE80211_STYPE_AUTH >> 4) | BIT(IEEE80211_STYPE_DEAUTH >> 4) | BIT(IEEE80211_STYPE_ACTION >> 4), }, [NL80211_IFTYPE_AP_VLAN] = { /* copy AP */ .tx = 0xffff, .rx = BIT(IEEE80211_STYPE_ASSOC_REQ >> 4) | BIT(IEEE80211_STYPE_REASSOC_REQ >> 4) | BIT(IEEE80211_STYPE_PROBE_REQ >> 4) | BIT(IEEE80211_STYPE_DISASSOC >> 4) | BIT(IEEE80211_STYPE_AUTH >> 4) | BIT(IEEE80211_STYPE_DEAUTH >> 4) | BIT(IEEE80211_STYPE_ACTION >> 4), }, [NL80211_IFTYPE_P2P_CLIENT] = { .tx = 0xffff, .rx = BIT(IEEE80211_STYPE_ACTION >> 4) | BIT(IEEE80211_STYPE_PROBE_REQ >> 4), }, [NL80211_IFTYPE_P2P_GO] = { .tx = 0xffff, .rx = BIT(IEEE80211_STYPE_ASSOC_REQ >> 4) | BIT(IEEE80211_STYPE_REASSOC_REQ >> 4) | BIT(IEEE80211_STYPE_PROBE_REQ >> 4) | BIT(IEEE80211_STYPE_DISASSOC >> 4) | BIT(IEEE80211_STYPE_AUTH >> 4) | BIT(IEEE80211_STYPE_DEAUTH >> 4) | BIT(IEEE80211_STYPE_ACTION >> 4), }, [NL80211_IFTYPE_MESH_POINT] = { .tx = 0xffff, .rx = BIT(IEEE80211_STYPE_ACTION >> 4) | BIT(IEEE80211_STYPE_AUTH >> 4) | BIT(IEEE80211_STYPE_DEAUTH >> 4), }, [NL80211_IFTYPE_P2P_DEVICE] = { .tx = 0xffff, /* * To support P2P PASN pairing let user space register to rx * also AUTH frames on P2P device interface. */ .rx = BIT(IEEE80211_STYPE_ACTION >> 4) | BIT(IEEE80211_STYPE_PROBE_REQ >> 4) | BIT(IEEE80211_STYPE_AUTH >> 4), }, }; static const struct ieee80211_ht_cap mac80211_ht_capa_mod_mask = { .ampdu_params_info = IEEE80211_HT_AMPDU_PARM_FACTOR | IEEE80211_HT_AMPDU_PARM_DENSITY, .cap_info = cpu_to_le16(IEEE80211_HT_CAP_SUP_WIDTH_20_40 | IEEE80211_HT_CAP_MAX_AMSDU | IEEE80211_HT_CAP_SGI_20 | IEEE80211_HT_CAP_SGI_40 | IEEE80211_HT_CAP_TX_STBC | IEEE80211_HT_CAP_RX_STBC | IEEE80211_HT_CAP_LDPC_CODING | IEEE80211_HT_CAP_40MHZ_INTOLERANT), .mcs = { .rx_mask = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, }, }, }; static const struct ieee80211_vht_cap mac80211_vht_capa_mod_mask = { .vht_cap_info = cpu_to_le32(IEEE80211_VHT_CAP_RXLDPC | IEEE80211_VHT_CAP_SHORT_GI_80 | IEEE80211_VHT_CAP_SHORT_GI_160 | IEEE80211_VHT_CAP_RXSTBC_MASK | IEEE80211_VHT_CAP_TXSTBC | IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE | IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE | IEEE80211_VHT_CAP_TX_ANTENNA_PATTERN | IEEE80211_VHT_CAP_RX_ANTENNA_PATTERN | IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK), .supp_mcs = { .rx_mcs_map = cpu_to_le16(~0), .tx_mcs_map = cpu_to_le16(~0), }, }; struct ieee80211_hw *ieee80211_alloc_hw_nm(size_t priv_data_len, const struct ieee80211_ops *ops, const char *requested_name) { struct ieee80211_local *local; int priv_size, i; struct wiphy *wiphy; bool emulate_chanctx; if (WARN_ON(!ops->tx || !ops->start || !ops->stop || !ops->config || !ops->add_interface || !ops->remove_interface || !ops->configure_filter || !ops->wake_tx_queue)) return NULL; if (WARN_ON(ops->sta_state && (ops->sta_add || ops->sta_remove))) return NULL; if (WARN_ON(!!ops->link_info_changed != !!ops->vif_cfg_changed || (ops->link_info_changed && ops->bss_info_changed))) return NULL; /* check all or no channel context operations exist */ if (ops->add_chanctx == ieee80211_emulate_add_chanctx && ops->remove_chanctx == ieee80211_emulate_remove_chanctx && ops->change_chanctx == ieee80211_emulate_change_chanctx) { if (WARN_ON(ops->assign_vif_chanctx || ops->unassign_vif_chanctx)) return NULL; emulate_chanctx = true; } else { if (WARN_ON(ops->add_chanctx == ieee80211_emulate_add_chanctx || ops->remove_chanctx == ieee80211_emulate_remove_chanctx || ops->change_chanctx == ieee80211_emulate_change_chanctx)) return NULL; if (WARN_ON(!ops->add_chanctx || !ops->remove_chanctx || !ops->change_chanctx || !ops->assign_vif_chanctx || !ops->unassign_vif_chanctx)) return NULL; emulate_chanctx = false; } /* Ensure 32-byte alignment of our private data and hw private data. * We use the wiphy priv data for both our ieee80211_local and for * the driver's private data * * In memory it'll be like this: * * +-------------------------+ * | struct wiphy | * +-------------------------+ * | struct ieee80211_local | * +-------------------------+ * | driver's private data | * +-------------------------+ * */ priv_size = ALIGN(sizeof(*local), NETDEV_ALIGN) + priv_data_len; wiphy = wiphy_new_nm(&mac80211_config_ops, priv_size, requested_name); if (!wiphy) return NULL; wiphy->mgmt_stypes = ieee80211_default_mgmt_stypes; wiphy->privid = mac80211_wiphy_privid; wiphy->flags |= WIPHY_FLAG_NETNS_OK | WIPHY_FLAG_4ADDR_AP | WIPHY_FLAG_4ADDR_STATION | WIPHY_FLAG_REPORTS_OBSS | WIPHY_FLAG_OFFCHAN_TX; if (emulate_chanctx || ops->remain_on_channel) wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL; wiphy->features |= NL80211_FEATURE_SK_TX_STATUS | NL80211_FEATURE_SAE | NL80211_FEATURE_HT_IBSS | NL80211_FEATURE_VIF_TXPOWER | NL80211_FEATURE_MAC_ON_CREATE | NL80211_FEATURE_USERSPACE_MPM | NL80211_FEATURE_FULL_AP_CLIENT_STATE; wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_FILS_STA); wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_CONTROL_PORT_OVER_NL80211); wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_CONTROL_PORT_NO_PREAUTH); wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_CONTROL_PORT_OVER_NL80211_TX_STATUS); wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_SCAN_FREQ_KHZ); wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_POWERED_ADDR_CHANGE); if (!ops->hw_scan) { wiphy->features |= NL80211_FEATURE_LOW_PRIORITY_SCAN | NL80211_FEATURE_AP_SCAN; /* * if the driver behaves correctly using the probe request * (template) from mac80211, then both of these should be * supported even with hw scan - but let drivers opt in. */ wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_SCAN_RANDOM_SN); wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_SCAN_MIN_PREQ_CONTENT); } if (!ops->set_key) { wiphy->flags |= WIPHY_FLAG_IBSS_RSN; wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_SPP_AMSDU_SUPPORT); } wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_TXQS); wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_RRM); wiphy->bss_priv_size = sizeof(struct ieee80211_bss); local = wiphy_priv(wiphy); if (sta_info_init(local)) goto err_free; local->hw.wiphy = wiphy; local->hw.priv = (char *)local + ALIGN(sizeof(*local), NETDEV_ALIGN); local->ops = ops; local->emulate_chanctx = emulate_chanctx; if (emulate_chanctx) ieee80211_hw_set(&local->hw, CHANCTX_STA_CSA); /* * We need a bit of data queued to build aggregates properly, so * instruct the TCP stack to allow more than a single ms of data * to be queued in the stack. The value is a bit-shift of 1 * second, so 7 is ~8ms of queued data. Only affects local TCP * sockets. * This is the default, anyhow - drivers may need to override it * for local reasons (longer buffers, longer completion time, or * similar). */ local->hw.tx_sk_pacing_shift = 7; /* set up some defaults */ local->hw.queues = 1; local->hw.max_rates = 1; local->hw.max_report_rates = 0; local->hw.max_rx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF_HT; local->hw.max_tx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF_HT; local->hw.offchannel_tx_hw_queue = IEEE80211_INVAL_HW_QUEUE; local->hw.conf.long_frame_max_tx_count = wiphy->retry_long; local->hw.conf.short_frame_max_tx_count = wiphy->retry_short; local->hw.radiotap_mcs_details = IEEE80211_RADIOTAP_MCS_HAVE_MCS | IEEE80211_RADIOTAP_MCS_HAVE_GI | IEEE80211_RADIOTAP_MCS_HAVE_BW; local->hw.radiotap_vht_details = IEEE80211_RADIOTAP_VHT_KNOWN_GI | IEEE80211_RADIOTAP_VHT_KNOWN_BANDWIDTH; local->hw.uapsd_queues = IEEE80211_DEFAULT_UAPSD_QUEUES; local->hw.uapsd_max_sp_len = IEEE80211_DEFAULT_MAX_SP_LEN; local->hw.max_mtu = IEEE80211_MAX_DATA_LEN; local->user_power_level = IEEE80211_UNSET_POWER_LEVEL; wiphy->ht_capa_mod_mask = &mac80211_ht_capa_mod_mask; wiphy->vht_capa_mod_mask = &mac80211_vht_capa_mod_mask; local->ext_capa[7] = WLAN_EXT_CAPA8_OPMODE_NOTIF; wiphy->extended_capabilities = local->ext_capa; wiphy->extended_capabilities_mask = local->ext_capa; wiphy->extended_capabilities_len = ARRAY_SIZE(local->ext_capa); INIT_LIST_HEAD(&local->interfaces); INIT_LIST_HEAD(&local->mon_list); __hw_addr_init(&local->mc_list); mutex_init(&local->iflist_mtx); spin_lock_init(&local->filter_lock); spin_lock_init(&local->rx_path_lock); spin_lock_init(&local->queue_stop_reason_lock); for (i = 0; i < IEEE80211_NUM_ACS; i++) { INIT_LIST_HEAD(&local->active_txqs[i]); spin_lock_init(&local->active_txq_lock[i]); local->aql_txq_limit_low[i] = IEEE80211_DEFAULT_AQL_TXQ_LIMIT_L; local->aql_txq_limit_high[i] = IEEE80211_DEFAULT_AQL_TXQ_LIMIT_H; atomic_set(&local->aql_ac_pending_airtime[i], 0); } local->airtime_flags = AIRTIME_USE_TX | AIRTIME_USE_RX; local->aql_threshold = IEEE80211_AQL_THRESHOLD; atomic_set(&local->aql_total_pending_airtime, 0); spin_lock_init(&local->handle_wake_tx_queue_lock); INIT_LIST_HEAD(&local->chanctx_list); wiphy_delayed_work_init(&local->scan_work, ieee80211_scan_work); INIT_WORK(&local->restart_work, ieee80211_restart_work); wiphy_work_init(&local->radar_detected_work, ieee80211_dfs_radar_detected_work); wiphy_work_init(&local->reconfig_filter, ieee80211_reconfig_filter); wiphy_work_init(&local->dynamic_ps_enable_work, ieee80211_dynamic_ps_enable_work); wiphy_work_init(&local->dynamic_ps_disable_work, ieee80211_dynamic_ps_disable_work); timer_setup(&local->dynamic_ps_timer, ieee80211_dynamic_ps_timer, 0); wiphy_work_init(&local->sched_scan_stopped_work, ieee80211_sched_scan_stopped_work); spin_lock_init(&local->ack_status_lock); idr_init(&local->ack_status_frames); for (i = 0; i < IEEE80211_MAX_QUEUES; i++) { skb_queue_head_init(&local->pending[i]); atomic_set(&local->agg_queue_stop[i], 0); } tasklet_setup(&local->tx_pending_tasklet, ieee80211_tx_pending); tasklet_setup(&local->wake_txqs_tasklet, ieee80211_wake_txqs); tasklet_setup(&local->tasklet, ieee80211_tasklet_handler); skb_queue_head_init(&local->skb_queue); skb_queue_head_init(&local->skb_queue_unreliable); ieee80211_alloc_led_names(local); ieee80211_roc_setup(local); local->hw.radiotap_timestamp.units_pos = -1; local->hw.radiotap_timestamp.accuracy = -1; return &local->hw; err_free: wiphy_free(wiphy); return NULL; } EXPORT_SYMBOL(ieee80211_alloc_hw_nm); static int ieee80211_init_cipher_suites(struct ieee80211_local *local) { bool have_mfp = ieee80211_hw_check(&local->hw, MFP_CAPABLE); static const u32 cipher_suites[] = { /* keep WEP and TKIP first, they may be removed below */ WLAN_CIPHER_SUITE_WEP40, WLAN_CIPHER_SUITE_WEP104, WLAN_CIPHER_SUITE_TKIP, WLAN_CIPHER_SUITE_CCMP, WLAN_CIPHER_SUITE_CCMP_256, WLAN_CIPHER_SUITE_GCMP, WLAN_CIPHER_SUITE_GCMP_256, /* keep last -- depends on hw flags! */ WLAN_CIPHER_SUITE_AES_CMAC, WLAN_CIPHER_SUITE_BIP_CMAC_256, WLAN_CIPHER_SUITE_BIP_GMAC_128, WLAN_CIPHER_SUITE_BIP_GMAC_256, }; if (ieee80211_hw_check(&local->hw, SW_CRYPTO_CONTROL) && fips_enabled) { dev_err(local->hw.wiphy->dev.parent, "Drivers with SW_CRYPTO_CONTROL cannot work with FIPS\n"); return -EINVAL; } if (WARN_ON(ieee80211_hw_check(&local->hw, SW_CRYPTO_CONTROL) && !local->hw.wiphy->cipher_suites)) return -EINVAL; if (fips_enabled || !local->hw.wiphy->cipher_suites) { /* assign the (software supported and perhaps offloaded) * cipher suites */ local->hw.wiphy->cipher_suites = cipher_suites; local->hw.wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites); if (!have_mfp) local->hw.wiphy->n_cipher_suites -= 4; /* FIPS does not permit the use of RC4 */ if (fips_enabled) { local->hw.wiphy->cipher_suites += 3; local->hw.wiphy->n_cipher_suites -= 3; } } return 0; } static bool ieee80211_ifcomb_check(const struct ieee80211_iface_combination *c, int n_comb) { int i, j; for (i = 0; i < n_comb; i++, c++) { /* DFS is not supported with multi-channel combinations yet */ if (c->radar_detect_widths && c->num_different_channels > 1) return false; /* mac80211 doesn't support more than one IBSS interface */ for (j = 0; j < c->n_limits; j++) if ((c->limits[j].types & BIT(NL80211_IFTYPE_ADHOC)) && c->limits[j].max > 1) return false; } return true; } int ieee80211_register_hw(struct ieee80211_hw *hw) { struct ieee80211_local *local = hw_to_local(hw); int result, i; enum nl80211_band band; int channels, max_bitrates; bool supp_ht, supp_vht, supp_he, supp_eht; struct cfg80211_chan_def dflt_chandef = {}; if (ieee80211_hw_check(hw, QUEUE_CONTROL) && (local->hw.offchannel_tx_hw_queue == IEEE80211_INVAL_HW_QUEUE || local->hw.offchannel_tx_hw_queue >= local->hw.queues)) return -EINVAL; if ((hw->wiphy->features & NL80211_FEATURE_TDLS_CHANNEL_SWITCH) && (!local->ops->tdls_channel_switch || !local->ops->tdls_cancel_channel_switch || !local->ops->tdls_recv_channel_switch)) return -EOPNOTSUPP; if (WARN_ON(ieee80211_hw_check(hw, SUPPORTS_TX_FRAG) && !local->ops->set_frag_threshold)) return -EINVAL; if (WARN_ON(local->hw.wiphy->interface_modes & BIT(NL80211_IFTYPE_NAN) && (!local->ops->start_nan || !local->ops->stop_nan))) return -EINVAL; if (hw->wiphy->flags & WIPHY_FLAG_SUPPORTS_MLO) { /* * For drivers capable of doing MLO, assume modern driver * or firmware facilities, so software doesn't have to do * as much, e.g. monitoring beacons would be hard if we * might not even know which link is active at which time. */ if (WARN_ON(local->emulate_chanctx)) return -EINVAL; if (WARN_ON(!local->ops->link_info_changed)) return -EINVAL; if (WARN_ON(!ieee80211_hw_check(hw, HAS_RATE_CONTROL))) return -EINVAL; if (WARN_ON(!ieee80211_hw_check(hw, AMPDU_AGGREGATION))) return -EINVAL; if (WARN_ON(ieee80211_hw_check(hw, HOST_BROADCAST_PS_BUFFERING))) return -EINVAL; if (WARN_ON(ieee80211_hw_check(hw, SUPPORTS_PS) && (!ieee80211_hw_check(hw, SUPPORTS_DYNAMIC_PS) || ieee80211_hw_check(hw, PS_NULLFUNC_STACK)))) return -EINVAL; if (WARN_ON(!ieee80211_hw_check(hw, MFP_CAPABLE))) return -EINVAL; if (WARN_ON(!ieee80211_hw_check(hw, CONNECTION_MONITOR))) return -EINVAL; if (WARN_ON(ieee80211_hw_check(hw, NEED_DTIM_BEFORE_ASSOC))) return -EINVAL; if (WARN_ON(ieee80211_hw_check(hw, TIMING_BEACON_ONLY))) return -EINVAL; if (WARN_ON(!ieee80211_hw_check(hw, AP_LINK_PS))) return -EINVAL; } #ifdef CONFIG_PM if (hw->wiphy->wowlan && (!local->ops->suspend || !local->ops->resume)) return -EINVAL; #endif if (local->emulate_chanctx) { for (i = 0; i < local->hw.wiphy->n_iface_combinations; i++) { const struct ieee80211_iface_combination *comb; comb = &local->hw.wiphy->iface_combinations[i]; if (comb->num_different_channels > 1) return -EINVAL; } } if (hw->wiphy->n_radio) { for (i = 0; i < hw->wiphy->n_radio; i++) { const struct wiphy_radio *radio = &hw->wiphy->radio[i]; if (!ieee80211_ifcomb_check(radio->iface_combinations, radio->n_iface_combinations)) return -EINVAL; } } else { if (!ieee80211_ifcomb_check(hw->wiphy->iface_combinations, hw->wiphy->n_iface_combinations)) return -EINVAL; } /* Only HW csum features are currently compatible with mac80211 */ if (WARN_ON(hw->netdev_features & ~MAC80211_SUPPORTED_FEATURES)) return -EINVAL; if (hw->max_report_rates == 0) hw->max_report_rates = hw->max_rates; local->rx_chains = 1; /* * generic code guarantees at least one band, * set this very early because much code assumes * that hw.conf.channel is assigned */ channels = 0; max_bitrates = 0; supp_ht = false; supp_vht = false; supp_he = false; supp_eht = false; for (band = 0; band < NUM_NL80211_BANDS; band++) { const struct ieee80211_sband_iftype_data *iftd; struct ieee80211_supported_band *sband; sband = local->hw.wiphy->bands[band]; if (!sband) continue; if (!dflt_chandef.chan) { /* * Assign the first enabled channel to dflt_chandef * from the list of channels */ for (i = 0; i < sband->n_channels; i++) if (!(sband->channels[i].flags & IEEE80211_CHAN_DISABLED)) break; /* if none found then use the first anyway */ if (i == sband->n_channels) i = 0; cfg80211_chandef_create(&dflt_chandef, &sband->channels[i], NL80211_CHAN_NO_HT); /* init channel we're on */ local->monitor_chanreq.oper = dflt_chandef; if (local->emulate_chanctx) { local->dflt_chandef = dflt_chandef; local->hw.conf.chandef = dflt_chandef; } } channels += sband->n_channels; /* * Due to the way the aggregation code handles this and it * being an HT capability, we can't really support delayed * BA in MLO (yet). */ if (WARN_ON(sband->ht_cap.ht_supported && (sband->ht_cap.cap & IEEE80211_HT_CAP_DELAY_BA) && hw->wiphy->flags & WIPHY_FLAG_SUPPORTS_MLO)) return -EINVAL; if (max_bitrates < sband->n_bitrates) max_bitrates = sband->n_bitrates; supp_ht = supp_ht || sband->ht_cap.ht_supported; supp_vht = supp_vht || sband->vht_cap.vht_supported; for_each_sband_iftype_data(sband, i, iftd) { u8 he_40_mhz_cap; supp_he = supp_he || iftd->he_cap.has_he; supp_eht = supp_eht || iftd->eht_cap.has_eht; if (band == NL80211_BAND_2GHZ) he_40_mhz_cap = IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G; else he_40_mhz_cap = IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G; /* currently no support for HE client where HT has 40 MHz but not HT */ if (iftd->he_cap.has_he && iftd->types_mask & (BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_P2P_CLIENT)) && sband->ht_cap.ht_supported && sband->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40 && !(iftd->he_cap.he_cap_elem.phy_cap_info[0] & he_40_mhz_cap)) return -EINVAL; /* no support for per-band vendor elems with MLO */ if (WARN_ON(iftd->vendor_elems.len && hw->wiphy->flags & WIPHY_FLAG_SUPPORTS_MLO)) return -EINVAL; } /* HT, VHT, HE require QoS, thus >= 4 queues */ if (WARN_ON(local->hw.queues < IEEE80211_NUM_ACS && (supp_ht || supp_vht || supp_he))) return -EINVAL; /* EHT requires HE support */ if (WARN_ON(supp_eht && !supp_he)) return -EINVAL; if (!sband->ht_cap.ht_supported) continue; /* TODO: consider VHT for RX chains, hopefully it's the same */ local->rx_chains = max(ieee80211_mcs_to_chains(&sband->ht_cap.mcs), local->rx_chains); /* no need to mask, SM_PS_DISABLED has all bits set */ sband->ht_cap.cap |= WLAN_HT_CAP_SM_PS_DISABLED << IEEE80211_HT_CAP_SM_PS_SHIFT; } /* if low-level driver supports AP, we also support VLAN. * drivers advertising SW_CRYPTO_CONTROL should enable AP_VLAN * based on their support to transmit SW encrypted packets. */ if (local->hw.wiphy->interface_modes & BIT(NL80211_IFTYPE_AP) && !ieee80211_hw_check(&local->hw, SW_CRYPTO_CONTROL)) { hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_AP_VLAN); hw->wiphy->software_iftypes |= BIT(NL80211_IFTYPE_AP_VLAN); } /* mac80211 always supports monitor */ hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_MONITOR); hw->wiphy->software_iftypes |= BIT(NL80211_IFTYPE_MONITOR); local->int_scan_req = kzalloc(struct_size(local->int_scan_req, channels, channels), GFP_KERNEL); if (!local->int_scan_req) return -ENOMEM; eth_broadcast_addr(local->int_scan_req->bssid); for (band = 0; band < NUM_NL80211_BANDS; band++) { if (!local->hw.wiphy->bands[band]) continue; local->int_scan_req->rates[band] = (u32) -1; } #ifndef CONFIG_MAC80211_MESH /* mesh depends on Kconfig, but drivers should set it if they want */ local->hw.wiphy->interface_modes &= ~BIT(NL80211_IFTYPE_MESH_POINT); #endif /* if the underlying driver supports mesh, mac80211 will (at least) * provide routing of mesh authentication frames to userspace */ if (local->hw.wiphy->interface_modes & BIT(NL80211_IFTYPE_MESH_POINT)) local->hw.wiphy->flags |= WIPHY_FLAG_MESH_AUTH; /* mac80211 supports control port protocol changing */ local->hw.wiphy->flags |= WIPHY_FLAG_CONTROL_PORT_PROTOCOL; if (ieee80211_hw_check(&local->hw, SIGNAL_DBM)) { local->hw.wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM; } else if (ieee80211_hw_check(&local->hw, SIGNAL_UNSPEC)) { local->hw.wiphy->signal_type = CFG80211_SIGNAL_TYPE_UNSPEC; if (hw->max_signal <= 0) { result = -EINVAL; goto fail_workqueue; } } /* Mac80211 and therefore all drivers using SW crypto only * are able to handle PTK rekeys and Extended Key ID. */ if (!local->ops->set_key) { wiphy_ext_feature_set(local->hw.wiphy, NL80211_EXT_FEATURE_CAN_REPLACE_PTK0); wiphy_ext_feature_set(local->hw.wiphy, NL80211_EXT_FEATURE_EXT_KEY_ID); } if (local->hw.wiphy->interface_modes & BIT(NL80211_IFTYPE_ADHOC)) wiphy_ext_feature_set(local->hw.wiphy, NL80211_EXT_FEATURE_DEL_IBSS_STA); /* * Calculate scan IE length -- we need this to alloc * memory and to subtract from the driver limit. It * includes the DS Params, (extended) supported rates, and HT * information -- SSID is the driver's responsibility. */ local->scan_ies_len = 4 + max_bitrates /* (ext) supp rates */ + 3 /* DS Params */; if (supp_ht) local->scan_ies_len += 2 + sizeof(struct ieee80211_ht_cap); if (supp_vht) local->scan_ies_len += 2 + sizeof(struct ieee80211_vht_cap); /* * HE cap element is variable in size - set len to allow max size */ if (supp_he) { local->scan_ies_len += 3 + sizeof(struct ieee80211_he_cap_elem) + sizeof(struct ieee80211_he_mcs_nss_supp) + IEEE80211_HE_PPE_THRES_MAX_LEN; if (supp_eht) local->scan_ies_len += 3 + sizeof(struct ieee80211_eht_cap_elem) + sizeof(struct ieee80211_eht_mcs_nss_supp) + IEEE80211_EHT_PPE_THRES_MAX_LEN; } if (!local->ops->hw_scan) { /* For hw_scan, driver needs to set these up. */ local->hw.wiphy->max_scan_ssids = 4; local->hw.wiphy->max_scan_ie_len = IEEE80211_MAX_DATA_LEN; } /* * If the driver supports any scan IEs, then assume the * limit includes the IEs mac80211 will add, otherwise * leave it at zero and let the driver sort it out; we * still pass our IEs to the driver but userspace will * not be allowed to in that case. */ if (local->hw.wiphy->max_scan_ie_len) local->hw.wiphy->max_scan_ie_len -= local->scan_ies_len; result = ieee80211_init_cipher_suites(local); if (result < 0) goto fail_workqueue; if (!local->ops->remain_on_channel) local->hw.wiphy->max_remain_on_channel_duration = 5000; /* mac80211 based drivers don't support internal TDLS setup */ if (local->hw.wiphy->flags & WIPHY_FLAG_SUPPORTS_TDLS) local->hw.wiphy->flags |= WIPHY_FLAG_TDLS_EXTERNAL_SETUP; /* mac80211 supports eCSA, if the driver supports STA CSA at all */ if (ieee80211_hw_check(&local->hw, CHANCTX_STA_CSA)) local->ext_capa[0] |= WLAN_EXT_CAPA1_EXT_CHANNEL_SWITCHING; /* mac80211 supports multi BSSID, if the driver supports it */ if (ieee80211_hw_check(&local->hw, SUPPORTS_MULTI_BSSID)) { local->hw.wiphy->support_mbssid = true; if (ieee80211_hw_check(&local->hw, SUPPORTS_ONLY_HE_MULTI_BSSID)) local->hw.wiphy->support_only_he_mbssid = true; else local->ext_capa[2] |= WLAN_EXT_CAPA3_MULTI_BSSID_SUPPORT; } local->hw.wiphy->max_num_csa_counters = IEEE80211_MAX_CNTDWN_COUNTERS_NUM; /* * We use the number of queues for feature tests (QoS, HT) internally * so restrict them appropriately. */ if (hw->queues > IEEE80211_MAX_QUEUES) hw->queues = IEEE80211_MAX_QUEUES; local->workqueue = alloc_ordered_workqueue("%s", 0, wiphy_name(local->hw.wiphy)); if (!local->workqueue) { result = -ENOMEM; goto fail_workqueue; } /* * The hardware needs headroom for sending the frame, * and we need some headroom for passing the frame to monitor * interfaces, but never both at the same time. */ local->tx_headroom = max_t(unsigned int , local->hw.extra_tx_headroom, IEEE80211_TX_STATUS_HEADROOM); /* * if the driver doesn't specify a max listen interval we * use 5 which should be a safe default */ if (local->hw.max_listen_interval == 0) local->hw.max_listen_interval = 5; local->hw.conf.listen_interval = local->hw.max_listen_interval; local->dynamic_ps_forced_timeout = -1; if (!local->hw.max_nan_de_entries) local->hw.max_nan_de_entries = IEEE80211_MAX_NAN_INSTANCE_ID; if (!local->hw.weight_multiplier) local->hw.weight_multiplier = 1; ieee80211_wep_init(local); local->hw.conf.flags = IEEE80211_CONF_IDLE; ieee80211_led_init(local); result = ieee80211_txq_setup_flows(local); if (result) goto fail_flows; rtnl_lock(); result = ieee80211_init_rate_ctrl_alg(local, hw->rate_control_algorithm); rtnl_unlock(); if (result < 0) { wiphy_debug(local->hw.wiphy, "Failed to initialize rate control algorithm\n"); goto fail_rate; } if (local->rate_ctrl) { clear_bit(IEEE80211_HW_SUPPORTS_VHT_EXT_NSS_BW, hw->flags); if (local->rate_ctrl->ops->capa & RATE_CTRL_CAPA_VHT_EXT_NSS_BW) ieee80211_hw_set(hw, SUPPORTS_VHT_EXT_NSS_BW); } /* * If the VHT capabilities don't have IEEE80211_VHT_EXT_NSS_BW_CAPABLE, * or have it when we don't, copy the sband structure and set/clear it. * This is necessary because rate scaling algorithms could be switched * and have different support values. * Print a message so that in the common case the reallocation can be * avoided. */ BUILD_BUG_ON(NUM_NL80211_BANDS > 8 * sizeof(local->sband_allocated)); for (band = 0; band < NUM_NL80211_BANDS; band++) { struct ieee80211_supported_band *sband; bool local_cap, ie_cap; local_cap = ieee80211_hw_check(hw, SUPPORTS_VHT_EXT_NSS_BW); sband = local->hw.wiphy->bands[band]; if (!sband || !sband->vht_cap.vht_supported) continue; ie_cap = !!(sband->vht_cap.vht_mcs.tx_highest & cpu_to_le16(IEEE80211_VHT_EXT_NSS_BW_CAPABLE)); if (local_cap == ie_cap) continue; sband = kmemdup(sband, sizeof(*sband), GFP_KERNEL); if (!sband) { result = -ENOMEM; goto fail_rate; } wiphy_dbg(hw->wiphy, "copying sband (band %d) due to VHT EXT NSS BW flag\n", band); sband->vht_cap.vht_mcs.tx_highest ^= cpu_to_le16(IEEE80211_VHT_EXT_NSS_BW_CAPABLE); local->hw.wiphy->bands[band] = sband; local->sband_allocated |= BIT(band); } result = wiphy_register(local->hw.wiphy); if (result < 0) goto fail_wiphy_register; debugfs_hw_add(local); rate_control_add_debugfs(local); ieee80211_check_wbrf_support(local); rtnl_lock(); wiphy_lock(hw->wiphy); /* add one default STA interface if supported */ if (local->hw.wiphy->interface_modes & BIT(NL80211_IFTYPE_STATION) && !ieee80211_hw_check(hw, NO_AUTO_VIF)) { struct vif_params params = {0}; result = ieee80211_if_add(local, "wlan%d", NET_NAME_ENUM, NULL, NL80211_IFTYPE_STATION, ¶ms); if (result) wiphy_warn(local->hw.wiphy, "Failed to add default virtual iface\n"); } wiphy_unlock(hw->wiphy); rtnl_unlock(); #ifdef CONFIG_INET local->ifa_notifier.notifier_call = ieee80211_ifa_changed; result = register_inetaddr_notifier(&local->ifa_notifier); if (result) goto fail_ifa; #endif #if IS_ENABLED(CONFIG_IPV6) local->ifa6_notifier.notifier_call = ieee80211_ifa6_changed; result = register_inet6addr_notifier(&local->ifa6_notifier); if (result) goto fail_ifa6; #endif return 0; #if IS_ENABLED(CONFIG_IPV6) fail_ifa6: #ifdef CONFIG_INET unregister_inetaddr_notifier(&local->ifa_notifier); #endif #endif #if defined(CONFIG_INET) || defined(CONFIG_IPV6) fail_ifa: #endif wiphy_unregister(local->hw.wiphy); fail_wiphy_register: rtnl_lock(); rate_control_deinitialize(local); ieee80211_remove_interfaces(local); rtnl_unlock(); fail_rate: ieee80211_txq_teardown_flows(local); fail_flows: ieee80211_led_exit(local); destroy_workqueue(local->workqueue); fail_workqueue: kfree(local->int_scan_req); return result; } EXPORT_SYMBOL(ieee80211_register_hw); void ieee80211_unregister_hw(struct ieee80211_hw *hw) { struct ieee80211_local *local = hw_to_local(hw); tasklet_kill(&local->tx_pending_tasklet); tasklet_kill(&local->tasklet); #ifdef CONFIG_INET unregister_inetaddr_notifier(&local->ifa_notifier); #endif #if IS_ENABLED(CONFIG_IPV6) unregister_inet6addr_notifier(&local->ifa6_notifier); #endif rtnl_lock(); /* * At this point, interface list manipulations are fine * because the driver cannot be handing us frames any * more and the tasklet is killed. */ ieee80211_remove_interfaces(local); ieee80211_txq_teardown_flows(local); wiphy_lock(local->hw.wiphy); wiphy_delayed_work_cancel(local->hw.wiphy, &local->roc_work); wiphy_work_cancel(local->hw.wiphy, &local->reconfig_filter); wiphy_work_cancel(local->hw.wiphy, &local->sched_scan_stopped_work); wiphy_work_cancel(local->hw.wiphy, &local->radar_detected_work); wiphy_unlock(local->hw.wiphy); rtnl_unlock(); cancel_work_sync(&local->restart_work); ieee80211_clear_tx_pending(local); rate_control_deinitialize(local); if (skb_queue_len(&local->skb_queue) || skb_queue_len(&local->skb_queue_unreliable)) wiphy_warn(local->hw.wiphy, "skb_queue not empty\n"); skb_queue_purge(&local->skb_queue); skb_queue_purge(&local->skb_queue_unreliable); wiphy_unregister(local->hw.wiphy); destroy_workqueue(local->workqueue); ieee80211_led_exit(local); kfree(local->int_scan_req); } EXPORT_SYMBOL(ieee80211_unregister_hw); static int ieee80211_free_ack_frame(int id, void *p, void *data) { WARN_ONCE(1, "Have pending ack frames!\n"); kfree_skb(p); return 0; } void ieee80211_free_hw(struct ieee80211_hw *hw) { struct ieee80211_local *local = hw_to_local(hw); enum nl80211_band band; mutex_destroy(&local->iflist_mtx); idr_for_each(&local->ack_status_frames, ieee80211_free_ack_frame, NULL); idr_destroy(&local->ack_status_frames); sta_info_stop(local); ieee80211_free_led_names(local); for (band = 0; band < NUM_NL80211_BANDS; band++) { if (!(local->sband_allocated & BIT(band))) continue; kfree(local->hw.wiphy->bands[band]); } wiphy_free(local->hw.wiphy); } EXPORT_SYMBOL(ieee80211_free_hw); #define V(x) #x, static const char * const drop_reasons_unusable[] = { [0] = "RX_DROP_UNUSABLE", MAC80211_DROP_REASONS_UNUSABLE(V) #undef V }; static struct drop_reason_list drop_reason_list_unusable = { .reasons = drop_reasons_unusable, .n_reasons = ARRAY_SIZE(drop_reasons_unusable), }; static int __init ieee80211_init(void) { struct sk_buff *skb; int ret; BUILD_BUG_ON(sizeof(struct ieee80211_tx_info) > sizeof(skb->cb)); BUILD_BUG_ON(offsetof(struct ieee80211_tx_info, driver_data) + IEEE80211_TX_INFO_DRIVER_DATA_SIZE > sizeof(skb->cb)); ret = rc80211_minstrel_init(); if (ret) return ret; ret = ieee80211_iface_init(); if (ret) goto err_netdev; drop_reasons_register_subsys(SKB_DROP_REASON_SUBSYS_MAC80211_UNUSABLE, &drop_reason_list_unusable); return 0; err_netdev: rc80211_minstrel_exit(); return ret; } static void __exit ieee80211_exit(void) { rc80211_minstrel_exit(); ieee80211s_stop(); ieee80211_iface_exit(); drop_reasons_unregister_subsys(SKB_DROP_REASON_SUBSYS_MAC80211_UNUSABLE); rcu_barrier(); } subsys_initcall(ieee80211_init); module_exit(ieee80211_exit); MODULE_DESCRIPTION("IEEE 802.11 subsystem"); MODULE_LICENSE("GPL"); |
| 641 641 602 577 587 587 525 525 576 576 2 53 53 565 565 560 565 563 565 5 13 13 13 451 451 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 | /* SPDX-License-Identifier: LGPL-2.1+ */ /* Copyright (C) 2022 Kent Overstreet */ #ifndef _BCACHEFS_PRINTBUF_H #define _BCACHEFS_PRINTBUF_H /* * Printbufs: Simple strings for printing to, with optional heap allocation * * This code has provisions for use in userspace, to aid in making other code * portable between kernelspace and userspace. * * Basic example: * struct printbuf buf = PRINTBUF; * * prt_printf(&buf, "foo="); * foo_to_text(&buf, foo); * printk("%s", buf.buf); * printbuf_exit(&buf); * * Or * struct printbuf buf = PRINTBUF_EXTERN(char_buf, char_buf_size) * * We can now write pretty printers instead of writing code that dumps * everything to the kernel log buffer, and then those pretty-printers can be * used by other code that outputs to kernel log, sysfs, debugfs, etc. * * Memory allocation: Outputing to a printbuf may allocate memory. This * allocation is done with GFP_KERNEL, by default: use the newer * memalloc_*_(save|restore) functions as needed. * * Since no equivalent yet exists for GFP_ATOMIC/GFP_NOWAIT, memory allocations * will be done with GFP_NOWAIT if printbuf->atomic is nonzero. * * It's allowed to grab the output buffer and free it later with kfree() instead * of using printbuf_exit(), if the user just needs a heap allocated string at * the end. * * Memory allocation failures: We don't return errors directly, because on * memory allocation failure we usually don't want to bail out and unwind - we * want to print what we've got, on a best-effort basis. But code that does want * to return -ENOMEM may check printbuf.allocation_failure. * * Indenting, tabstops: * * To aid is writing multi-line pretty printers spread across multiple * functions, printbufs track the current indent level. * * printbuf_indent_push() and printbuf_indent_pop() increase and decrease the current indent * level, respectively. * * To use tabstops, set printbuf->tabstops[]; they are in units of spaces, from * start of line. Once set, prt_tab() will output spaces up to the next tabstop. * prt_tab_rjust() will also advance the current line of text up to the next * tabstop, but it does so by shifting text since the previous tabstop up to the * next tabstop - right justifying it. * * Make sure you use prt_newline() instead of \n in the format string for indent * level and tabstops to work corretly. * * Output units: printbuf->units exists to tell pretty-printers how to output * numbers: a raw value (e.g. directly from a superblock field), as bytes, or as * human readable bytes. prt_units() obeys it. */ #include <linux/kernel.h> #include <linux/string.h> enum printbuf_si { PRINTBUF_UNITS_2, /* use binary powers of 2^10 */ PRINTBUF_UNITS_10, /* use powers of 10^3 (standard SI) */ }; #define PRINTBUF_INLINE_TABSTOPS 6 struct printbuf { char *buf; unsigned size; unsigned pos; unsigned last_newline; unsigned last_field; unsigned indent; /* * If nonzero, allocations will be done with GFP_ATOMIC: */ u8 atomic; bool allocation_failure:1; bool heap_allocated:1; bool overflow:1; enum printbuf_si si_units:1; bool human_readable_units:1; bool has_indent_or_tabstops:1; bool suppress_indent_tabstop_handling:1; u8 nr_tabstops; /* * Do not modify directly: use printbuf_tabstop_add(), * printbuf_tabstop_get() */ u8 cur_tabstop; u8 _tabstops[PRINTBUF_INLINE_TABSTOPS]; }; int bch2_printbuf_make_room(struct printbuf *, unsigned); __printf(2, 3) void bch2_prt_printf(struct printbuf *out, const char *fmt, ...); __printf(2, 0) void bch2_prt_vprintf(struct printbuf *out, const char *fmt, va_list); const char *bch2_printbuf_str(const struct printbuf *); void bch2_printbuf_exit(struct printbuf *); void bch2_printbuf_tabstops_reset(struct printbuf *); void bch2_printbuf_tabstop_pop(struct printbuf *); int bch2_printbuf_tabstop_push(struct printbuf *, unsigned); void bch2_printbuf_indent_add(struct printbuf *, unsigned); void bch2_printbuf_indent_add_nextline(struct printbuf *, unsigned); void bch2_printbuf_indent_sub(struct printbuf *, unsigned); void bch2_prt_newline(struct printbuf *); void bch2_printbuf_strip_trailing_newline(struct printbuf *); void bch2_prt_tab(struct printbuf *); void bch2_prt_tab_rjust(struct printbuf *); void bch2_prt_bytes_indented(struct printbuf *, const char *, unsigned); void bch2_prt_human_readable_u64(struct printbuf *, u64); void bch2_prt_human_readable_s64(struct printbuf *, s64); void bch2_prt_units_u64(struct printbuf *, u64); void bch2_prt_units_s64(struct printbuf *, s64); void bch2_prt_string_option(struct printbuf *, const char * const[], size_t); void bch2_prt_bitflags(struct printbuf *, const char * const[], u64); void bch2_prt_bitflags_vector(struct printbuf *, const char * const[], unsigned long *, unsigned); /* Initializer for a heap allocated printbuf: */ #define PRINTBUF ((struct printbuf) { .heap_allocated = true }) /* Initializer a printbuf that points to an external buffer: */ #define PRINTBUF_EXTERN(_buf, _size) \ ((struct printbuf) { \ .buf = _buf, \ .size = _size, \ }) static inline struct printbuf bch2_printbuf_init(void) { return PRINTBUF; } DEFINE_CLASS(printbuf, struct printbuf, bch2_printbuf_exit(&_T), bch2_printbuf_init(), void) /* * Returns size remaining of output buffer: */ static inline unsigned printbuf_remaining_size(struct printbuf *out) { if (WARN_ON(out->size && out->pos >= out->size)) out->pos = out->size - 1; return out->size - out->pos; } /* * Returns number of characters we can print to the output buffer - i.e. * excluding the terminating nul: */ static inline unsigned printbuf_remaining(struct printbuf *out) { return out->size ? printbuf_remaining_size(out) - 1 : 0; } static inline unsigned printbuf_written(struct printbuf *out) { return out->size ? min(out->pos, out->size - 1) : 0; } static inline void printbuf_nul_terminate_reserved(struct printbuf *out) { if (WARN_ON(out->size && out->pos >= out->size)) out->pos = out->size - 1; if (out->size) out->buf[out->pos] = 0; } static inline void printbuf_nul_terminate(struct printbuf *out) { bch2_printbuf_make_room(out, 1); printbuf_nul_terminate_reserved(out); } /* Doesn't call bch2_printbuf_make_room(), doesn't nul terminate: */ static inline void __prt_char_reserved(struct printbuf *out, char c) { if (printbuf_remaining(out)) out->buf[out->pos++] = c; } /* Doesn't nul terminate: */ static inline void __prt_char(struct printbuf *out, char c) { bch2_printbuf_make_room(out, 1); __prt_char_reserved(out, c); } static inline void prt_char(struct printbuf *out, char c) { bch2_printbuf_make_room(out, 2); __prt_char_reserved(out, c); printbuf_nul_terminate_reserved(out); } static inline void __prt_chars_reserved(struct printbuf *out, char c, unsigned n) { unsigned can_print = min(n, printbuf_remaining(out)); for (unsigned i = 0; i < can_print; i++) out->buf[out->pos++] = c; } static inline void prt_chars(struct printbuf *out, char c, unsigned n) { bch2_printbuf_make_room(out, n); __prt_chars_reserved(out, c, n); printbuf_nul_terminate_reserved(out); } static inline void prt_bytes(struct printbuf *out, const void *b, unsigned n) { bch2_printbuf_make_room(out, n); unsigned can_print = min(n, printbuf_remaining(out)); for (unsigned i = 0; i < can_print; i++) out->buf[out->pos++] = ((char *) b)[i]; printbuf_nul_terminate(out); } static inline void prt_str(struct printbuf *out, const char *str) { prt_bytes(out, str, strlen(str)); } static inline void prt_str_indented(struct printbuf *out, const char *str) { bch2_prt_bytes_indented(out, str, strlen(str)); } static inline void prt_hex_byte(struct printbuf *out, u8 byte) { bch2_printbuf_make_room(out, 3); __prt_char_reserved(out, hex_asc_hi(byte)); __prt_char_reserved(out, hex_asc_lo(byte)); printbuf_nul_terminate_reserved(out); } static inline void prt_hex_byte_upper(struct printbuf *out, u8 byte) { bch2_printbuf_make_room(out, 3); __prt_char_reserved(out, hex_asc_upper_hi(byte)); __prt_char_reserved(out, hex_asc_upper_lo(byte)); printbuf_nul_terminate_reserved(out); } static inline void printbuf_reset_keep_tabstops(struct printbuf *buf) { buf->pos = 0; buf->allocation_failure = 0; buf->last_newline = 0; buf->last_field = 0; buf->indent = 0; buf->cur_tabstop = 0; } /** * printbuf_reset - re-use a printbuf without freeing and re-initializing it: */ static inline void printbuf_reset(struct printbuf *buf) { printbuf_reset_keep_tabstops(buf); buf->nr_tabstops = 0; } /** * printbuf_atomic_inc - mark as entering an atomic section */ static inline void printbuf_atomic_inc(struct printbuf *buf) { buf->atomic++; } /** * printbuf_atomic_inc - mark as leaving an atomic section */ static inline void printbuf_atomic_dec(struct printbuf *buf) { buf->atomic--; } #endif /* _BCACHEFS_PRINTBUF_H */ |
| 105 14 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 | /* SPDX-License-Identifier: GPL-2.0 */ /* * Task I/O accounting operations */ #ifndef __TASK_IO_ACCOUNTING_OPS_INCLUDED #define __TASK_IO_ACCOUNTING_OPS_INCLUDED #include <linux/sched.h> #ifdef CONFIG_TASK_IO_ACCOUNTING static inline void task_io_account_read(size_t bytes) { current->ioac.read_bytes += bytes; } /* * We approximate number of blocks, because we account bytes only. * A 'block' is 512 bytes */ static inline unsigned long task_io_get_inblock(const struct task_struct *p) { return p->ioac.read_bytes >> 9; } static inline void task_io_account_write(size_t bytes) { current->ioac.write_bytes += bytes; } /* * We approximate number of blocks, because we account bytes only. * A 'block' is 512 bytes */ static inline unsigned long task_io_get_oublock(const struct task_struct *p) { return p->ioac.write_bytes >> 9; } static inline void task_io_account_cancelled_write(size_t bytes) { current->ioac.cancelled_write_bytes += bytes; } static inline void task_io_accounting_init(struct task_io_accounting *ioac) { memset(ioac, 0, sizeof(*ioac)); } static inline void task_blk_io_accounting_add(struct task_io_accounting *dst, struct task_io_accounting *src) { dst->read_bytes += src->read_bytes; dst->write_bytes += src->write_bytes; dst->cancelled_write_bytes += src->cancelled_write_bytes; } #else static inline void task_io_account_read(size_t bytes) { } static inline unsigned long task_io_get_inblock(const struct task_struct *p) { return 0; } static inline void task_io_account_write(size_t bytes) { } static inline unsigned long task_io_get_oublock(const struct task_struct *p) { return 0; } static inline void task_io_account_cancelled_write(size_t bytes) { } static inline void task_io_accounting_init(struct task_io_accounting *ioac) { } static inline void task_blk_io_accounting_add(struct task_io_accounting *dst, struct task_io_accounting *src) { } #endif /* CONFIG_TASK_IO_ACCOUNTING */ #ifdef CONFIG_TASK_XACCT static inline void task_chr_io_accounting_add(struct task_io_accounting *dst, struct task_io_accounting *src) { dst->rchar += src->rchar; dst->wchar += src->wchar; dst->syscr += src->syscr; dst->syscw += src->syscw; } #else static inline void task_chr_io_accounting_add(struct task_io_accounting *dst, struct task_io_accounting *src) { } #endif /* CONFIG_TASK_XACCT */ static inline void task_io_accounting_add(struct task_io_accounting *dst, struct task_io_accounting *src) { task_chr_io_accounting_add(dst, src); task_blk_io_accounting_add(dst, src); } #endif /* __TASK_IO_ACCOUNTING_OPS_INCLUDED */ |
| 166 9 310 310 22 309 310 165 293 310 18 5 30 30 30 30 30 1 1 7 6 1 1 1 4 310 304 310 3 1 296 9 8 271 30 9 30 30 5 25 1060 24 53 1051 1049 1050 444 809 327 807 43 1049 1027 165 165 165 165 7 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 | // SPDX-License-Identifier: GPL-2.0 /* * linux/fs/ext4/block_validity.c * * Copyright (C) 2009 * Theodore Ts'o (tytso@mit.edu) * * Track which blocks in the filesystem are metadata blocks that * should never be used as data blocks by files or directories. */ #include <linux/time.h> #include <linux/fs.h> #include <linux/namei.h> #include <linux/quotaops.h> #include <linux/buffer_head.h> #include <linux/swap.h> #include <linux/pagemap.h> #include <linux/blkdev.h> #include <linux/slab.h> #include "ext4.h" struct ext4_system_zone { struct rb_node node; ext4_fsblk_t start_blk; unsigned int count; u32 ino; }; static struct kmem_cache *ext4_system_zone_cachep; int __init ext4_init_system_zone(void) { ext4_system_zone_cachep = KMEM_CACHE(ext4_system_zone, 0); if (ext4_system_zone_cachep == NULL) return -ENOMEM; return 0; } void ext4_exit_system_zone(void) { rcu_barrier(); kmem_cache_destroy(ext4_system_zone_cachep); } static inline int can_merge(struct ext4_system_zone *entry1, struct ext4_system_zone *entry2) { if ((entry1->start_blk + entry1->count) == entry2->start_blk && entry1->ino == entry2->ino) return 1; return 0; } static void release_system_zone(struct ext4_system_blocks *system_blks) { struct ext4_system_zone *entry, *n; rbtree_postorder_for_each_entry_safe(entry, n, &system_blks->root, node) kmem_cache_free(ext4_system_zone_cachep, entry); } /* * Mark a range of blocks as belonging to the "system zone" --- that * is, filesystem metadata blocks which should never be used by * inodes. */ static int add_system_zone(struct ext4_system_blocks *system_blks, ext4_fsblk_t start_blk, unsigned int count, u32 ino) { struct ext4_system_zone *new_entry, *entry; struct rb_node **n = &system_blks->root.rb_node, *node; struct rb_node *parent = NULL, *new_node; while (*n) { parent = *n; entry = rb_entry(parent, struct ext4_system_zone, node); if (start_blk < entry->start_blk) n = &(*n)->rb_left; else if (start_blk >= (entry->start_blk + entry->count)) n = &(*n)->rb_right; else /* Unexpected overlap of system zones. */ return -EFSCORRUPTED; } new_entry = kmem_cache_alloc(ext4_system_zone_cachep, GFP_KERNEL); if (!new_entry) return -ENOMEM; new_entry->start_blk = start_blk; new_entry->count = count; new_entry->ino = ino; new_node = &new_entry->node; rb_link_node(new_node, parent, n); rb_insert_color(new_node, &system_blks->root); /* Can we merge to the left? */ node = rb_prev(new_node); if (node) { entry = rb_entry(node, struct ext4_system_zone, node); if (can_merge(entry, new_entry)) { new_entry->start_blk = entry->start_blk; new_entry->count += entry->count; rb_erase(node, &system_blks->root); kmem_cache_free(ext4_system_zone_cachep, entry); } } /* Can we merge to the right? */ node = rb_next(new_node); if (node) { entry = rb_entry(node, struct ext4_system_zone, node); if (can_merge(new_entry, entry)) { new_entry->count += entry->count; rb_erase(node, &system_blks->root); kmem_cache_free(ext4_system_zone_cachep, entry); } } return 0; } static void debug_print_tree(struct ext4_sb_info *sbi) { struct rb_node *node; struct ext4_system_zone *entry; struct ext4_system_blocks *system_blks; int first = 1; printk(KERN_INFO "System zones: "); rcu_read_lock(); system_blks = rcu_dereference(sbi->s_system_blks); node = rb_first(&system_blks->root); while (node) { entry = rb_entry(node, struct ext4_system_zone, node); printk(KERN_CONT "%s%llu-%llu", first ? "" : ", ", entry->start_blk, entry->start_blk + entry->count - 1); first = 0; node = rb_next(node); } rcu_read_unlock(); printk(KERN_CONT "\n"); } static int ext4_protect_reserved_inode(struct super_block *sb, struct ext4_system_blocks *system_blks, u32 ino) { struct inode *inode; struct ext4_sb_info *sbi = EXT4_SB(sb); struct ext4_map_blocks map; u32 i = 0, num; int err = 0, n; if ((ino < EXT4_ROOT_INO) || (ino > le32_to_cpu(sbi->s_es->s_inodes_count))) return -EINVAL; inode = ext4_iget(sb, ino, EXT4_IGET_SPECIAL); if (IS_ERR(inode)) return PTR_ERR(inode); num = (inode->i_size + sb->s_blocksize - 1) >> sb->s_blocksize_bits; while (i < num) { cond_resched(); map.m_lblk = i; map.m_len = num - i; n = ext4_map_blocks(NULL, inode, &map, 0); if (n < 0) { err = n; break; } if (n == 0) { i++; } else { err = add_system_zone(system_blks, map.m_pblk, n, ino); if (err < 0) { if (err == -EFSCORRUPTED) { EXT4_ERROR_INODE_ERR(inode, -err, "blocks %llu-%llu from inode overlap system zone", map.m_pblk, map.m_pblk + map.m_len - 1); } break; } i += n; } } iput(inode); return err; } static void ext4_destroy_system_zone(struct rcu_head *rcu) { struct ext4_system_blocks *system_blks; system_blks = container_of(rcu, struct ext4_system_blocks, rcu); release_system_zone(system_blks); kfree(system_blks); } /* * Build system zone rbtree which is used for block validity checking. * * The update of system_blks pointer in this function is protected by * sb->s_umount semaphore. However we have to be careful as we can be * racing with ext4_inode_block_valid() calls reading system_blks rbtree * protected only by RCU. That's why we first build the rbtree and then * swap it in place. */ int ext4_setup_system_zone(struct super_block *sb) { ext4_group_t ngroups = ext4_get_groups_count(sb); struct ext4_sb_info *sbi = EXT4_SB(sb); struct ext4_system_blocks *system_blks; struct ext4_group_desc *gdp; ext4_group_t i; int ret; system_blks = kzalloc(sizeof(*system_blks), GFP_KERNEL); if (!system_blks) return -ENOMEM; for (i=0; i < ngroups; i++) { unsigned int meta_blks = ext4_num_base_meta_blocks(sb, i); cond_resched(); if (meta_blks != 0) { ret = add_system_zone(system_blks, ext4_group_first_block_no(sb, i), meta_blks, 0); if (ret) goto err; } gdp = ext4_get_group_desc(sb, i, NULL); ret = add_system_zone(system_blks, ext4_block_bitmap(sb, gdp), 1, 0); if (ret) goto err; ret = add_system_zone(system_blks, ext4_inode_bitmap(sb, gdp), 1, 0); if (ret) goto err; ret = add_system_zone(system_blks, ext4_inode_table(sb, gdp), sbi->s_itb_per_group, 0); if (ret) goto err; } if (ext4_has_feature_journal(sb) && sbi->s_es->s_journal_inum) { ret = ext4_protect_reserved_inode(sb, system_blks, le32_to_cpu(sbi->s_es->s_journal_inum)); if (ret) goto err; } /* * System blks rbtree complete, announce it once to prevent racing * with ext4_inode_block_valid() accessing the rbtree at the same * time. */ rcu_assign_pointer(sbi->s_system_blks, system_blks); if (test_opt(sb, DEBUG)) debug_print_tree(sbi); return 0; err: release_system_zone(system_blks); kfree(system_blks); return ret; } /* * Called when the filesystem is unmounted or when remounting it with * noblock_validity specified. * * The update of system_blks pointer in this function is protected by * sb->s_umount semaphore. However we have to be careful as we can be * racing with ext4_inode_block_valid() calls reading system_blks rbtree * protected only by RCU. So we first clear the system_blks pointer and * then free the rbtree only after RCU grace period expires. */ void ext4_release_system_zone(struct super_block *sb) { struct ext4_system_blocks *system_blks; system_blks = rcu_dereference_protected(EXT4_SB(sb)->s_system_blks, lockdep_is_held(&sb->s_umount)); rcu_assign_pointer(EXT4_SB(sb)->s_system_blks, NULL); if (system_blks) call_rcu(&system_blks->rcu, ext4_destroy_system_zone); } int ext4_sb_block_valid(struct super_block *sb, struct inode *inode, ext4_fsblk_t start_blk, unsigned int count) { struct ext4_sb_info *sbi = EXT4_SB(sb); struct ext4_system_blocks *system_blks; struct ext4_system_zone *entry; struct rb_node *n; int ret = 1; if ((start_blk <= le32_to_cpu(sbi->s_es->s_first_data_block)) || (start_blk + count < start_blk) || (start_blk + count > ext4_blocks_count(sbi->s_es))) return 0; /* * Lock the system zone to prevent it being released concurrently * when doing a remount which inverse current "[no]block_validity" * mount option. */ rcu_read_lock(); system_blks = rcu_dereference(sbi->s_system_blks); if (system_blks == NULL) goto out_rcu; n = system_blks->root.rb_node; while (n) { entry = rb_entry(n, struct ext4_system_zone, node); if (start_blk + count - 1 < entry->start_blk) n = n->rb_left; else if (start_blk >= (entry->start_blk + entry->count)) n = n->rb_right; else { ret = 0; if (inode) ret = (entry->ino == inode->i_ino); break; } } out_rcu: rcu_read_unlock(); return ret; } /* * Returns 1 if the passed-in block region (start_blk, * start_blk+count) is valid; 0 if some part of the block region * overlaps with some other filesystem metadata blocks. */ int ext4_inode_block_valid(struct inode *inode, ext4_fsblk_t start_blk, unsigned int count) { return ext4_sb_block_valid(inode->i_sb, inode, start_blk, count); } int ext4_check_blockref(const char *function, unsigned int line, struct inode *inode, __le32 *p, unsigned int max) { __le32 *bref = p; unsigned int blk; journal_t *journal = EXT4_SB(inode->i_sb)->s_journal; if (journal && inode == journal->j_inode) return 0; while (bref < p+max) { blk = le32_to_cpu(*bref++); if (blk && unlikely(!ext4_inode_block_valid(inode, blk, 1))) { ext4_error_inode(inode, function, line, blk, "invalid block"); return -EFSCORRUPTED; } } return 0; } |
| 7 376 2 2 6275 6285 6282 4016 4009 2 5477 2 5477 5 4 1800 96 99 97 549 5 547 36 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 | // SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 1991, 1992 Linus Torvalds * * This file contains the interface functions for the various time related * system calls: time, stime, gettimeofday, settimeofday, adjtime * * Modification history: * * 1993-09-02 Philip Gladstone * Created file with time related functions from sched/core.c and adjtimex() * 1993-10-08 Torsten Duwe * adjtime interface update and CMOS clock write code * 1995-08-13 Torsten Duwe * kernel PLL updated to 1994-12-13 specs (rfc-1589) * 1999-01-16 Ulrich Windl * Introduced error checking for many cases in adjtimex(). * Updated NTP code according to technical memorandum Jan '96 * "A Kernel Model for Precision Timekeeping" by Dave Mills * Allow time_constant larger than MAXTC(6) for NTP v4 (MAXTC == 10) * (Even though the technical memorandum forbids it) * 2004-07-14 Christoph Lameter * Added getnstimeofday to allow the posix timer functions to return * with nanosecond accuracy */ #include <linux/export.h> #include <linux/kernel.h> #include <linux/timex.h> #include <linux/capability.h> #include <linux/timekeeper_internal.h> #include <linux/errno.h> #include <linux/syscalls.h> #include <linux/security.h> #include <linux/fs.h> #include <linux/math64.h> #include <linux/ptrace.h> #include <linux/uaccess.h> #include <linux/compat.h> #include <asm/unistd.h> #include <generated/timeconst.h> #include "timekeeping.h" /* * The timezone where the local system is located. Used as a default by some * programs who obtain this value by using gettimeofday. */ struct timezone sys_tz; EXPORT_SYMBOL(sys_tz); #ifdef __ARCH_WANT_SYS_TIME /* * sys_time() can be implemented in user-level using * sys_gettimeofday(). Is this for backwards compatibility? If so, * why not move it into the appropriate arch directory (for those * architectures that need it). */ SYSCALL_DEFINE1(time, __kernel_old_time_t __user *, tloc) { __kernel_old_time_t i = (__kernel_old_time_t)ktime_get_real_seconds(); if (tloc) { if (put_user(i,tloc)) return -EFAULT; } force_successful_syscall_return(); return i; } /* * sys_stime() can be implemented in user-level using * sys_settimeofday(). Is this for backwards compatibility? If so, * why not move it into the appropriate arch directory (for those * architectures that need it). */ SYSCALL_DEFINE1(stime, __kernel_old_time_t __user *, tptr) { struct timespec64 tv; int err; if (get_user(tv.tv_sec, tptr)) return -EFAULT; tv.tv_nsec = 0; err = security_settime64(&tv, NULL); if (err) return err; do_settimeofday64(&tv); return 0; } #endif /* __ARCH_WANT_SYS_TIME */ #ifdef CONFIG_COMPAT_32BIT_TIME #ifdef __ARCH_WANT_SYS_TIME32 /* old_time32_t is a 32 bit "long" and needs to get converted. */ SYSCALL_DEFINE1(time32, old_time32_t __user *, tloc) { old_time32_t i; i = (old_time32_t)ktime_get_real_seconds(); if (tloc) { if (put_user(i,tloc)) return -EFAULT; } force_successful_syscall_return(); return i; } SYSCALL_DEFINE1(stime32, old_time32_t __user *, tptr) { struct timespec64 tv; int err; if (get_user(tv.tv_sec, tptr)) return -EFAULT; tv.tv_nsec = 0; err = security_settime64(&tv, NULL); if (err) return err; do_settimeofday64(&tv); return 0; } #endif /* __ARCH_WANT_SYS_TIME32 */ #endif SYSCALL_DEFINE2(gettimeofday, struct __kernel_old_timeval __user *, tv, struct timezone __user *, tz) { if (likely(tv != NULL)) { struct timespec64 ts; ktime_get_real_ts64(&ts); if (put_user(ts.tv_sec, &tv->tv_sec) || put_user(ts.tv_nsec / 1000, &tv->tv_usec)) return -EFAULT; } if (unlikely(tz != NULL)) { if (copy_to_user(tz, &sys_tz, sizeof(sys_tz))) return -EFAULT; } return 0; } /* * In case for some reason the CMOS clock has not already been running * in UTC, but in some local time: The first time we set the timezone, * we will warp the clock so that it is ticking UTC time instead of * local time. Presumably, if someone is setting the timezone then we * are running in an environment where the programs understand about * timezones. This should be done at boot time in the /etc/rc script, * as soon as possible, so that the clock can be set right. Otherwise, * various programs will get confused when the clock gets warped. */ int do_sys_settimeofday64(const struct timespec64 *tv, const struct timezone *tz) { static int firsttime = 1; int error = 0; if (tv && !timespec64_valid_settod(tv)) return -EINVAL; error = security_settime64(tv, tz); if (error) return error; if (tz) { /* Verify we're within the +-15 hrs range */ if (tz->tz_minuteswest > 15*60 || tz->tz_minuteswest < -15*60) return -EINVAL; sys_tz = *tz; update_vsyscall_tz(); if (firsttime) { firsttime = 0; if (!tv) timekeeping_warp_clock(); } } if (tv) return do_settimeofday64(tv); return 0; } SYSCALL_DEFINE2(settimeofday, struct __kernel_old_timeval __user *, tv, struct timezone __user *, tz) { struct timespec64 new_ts; struct timezone new_tz; if (tv) { if (get_user(new_ts.tv_sec, &tv->tv_sec) || get_user(new_ts.tv_nsec, &tv->tv_usec)) return -EFAULT; if (new_ts.tv_nsec > USEC_PER_SEC || new_ts.tv_nsec < 0) return -EINVAL; new_ts.tv_nsec *= NSEC_PER_USEC; } if (tz) { if (copy_from_user(&new_tz, tz, sizeof(*tz))) return -EFAULT; } return do_sys_settimeofday64(tv ? &new_ts : NULL, tz ? &new_tz : NULL); } #ifdef CONFIG_COMPAT COMPAT_SYSCALL_DEFINE2(gettimeofday, struct old_timeval32 __user *, tv, struct timezone __user *, tz) { if (tv) { struct timespec64 ts; ktime_get_real_ts64(&ts); if (put_user(ts.tv_sec, &tv->tv_sec) || put_user(ts.tv_nsec / 1000, &tv->tv_usec)) return -EFAULT; } if (tz) { if (copy_to_user(tz, &sys_tz, sizeof(sys_tz))) return -EFAULT; } return 0; } COMPAT_SYSCALL_DEFINE2(settimeofday, struct old_timeval32 __user *, tv, struct timezone __user *, tz) { struct timespec64 new_ts; struct timezone new_tz; if (tv) { if (get_user(new_ts.tv_sec, &tv->tv_sec) || get_user(new_ts.tv_nsec, &tv->tv_usec)) return -EFAULT; if (new_ts.tv_nsec > USEC_PER_SEC || new_ts.tv_nsec < 0) return -EINVAL; new_ts.tv_nsec *= NSEC_PER_USEC; } if (tz) { if (copy_from_user(&new_tz, tz, sizeof(*tz))) return -EFAULT; } return do_sys_settimeofday64(tv ? &new_ts : NULL, tz ? &new_tz : NULL); } #endif #ifdef CONFIG_64BIT SYSCALL_DEFINE1(adjtimex, struct __kernel_timex __user *, txc_p) { struct __kernel_timex txc; /* Local copy of parameter */ int ret; /* Copy the user data space into the kernel copy * structure. But bear in mind that the structures * may change */ if (copy_from_user(&txc, txc_p, sizeof(struct __kernel_timex))) return -EFAULT; ret = do_adjtimex(&txc); return copy_to_user(txc_p, &txc, sizeof(struct __kernel_timex)) ? -EFAULT : ret; } #endif #ifdef CONFIG_COMPAT_32BIT_TIME int get_old_timex32(struct __kernel_timex *txc, const struct old_timex32 __user *utp) { struct old_timex32 tx32; memset(txc, 0, sizeof(struct __kernel_timex)); if (copy_from_user(&tx32, utp, sizeof(struct old_timex32))) return -EFAULT; txc->modes = tx32.modes; txc->offset = tx32.offset; txc->freq = tx32.freq; txc->maxerror = tx32.maxerror; txc->esterror = tx32.esterror; txc->status = tx32.status; txc->constant = tx32.constant; txc->precision = tx32.precision; txc->tolerance = tx32.tolerance; txc->time.tv_sec = tx32.time.tv_sec; txc->time.tv_usec = tx32.time.tv_usec; txc->tick = tx32.tick; txc->ppsfreq = tx32.ppsfreq; txc->jitter = tx32.jitter; txc->shift = tx32.shift; txc->stabil = tx32.stabil; txc->jitcnt = tx32.jitcnt; txc->calcnt = tx32.calcnt; txc->errcnt = tx32.errcnt; txc->stbcnt = tx32.stbcnt; return 0; } int put_old_timex32(struct old_timex32 __user *utp, const struct __kernel_timex *txc) { struct old_timex32 tx32; memset(&tx32, 0, sizeof(struct old_timex32)); tx32.modes = txc->modes; tx32.offset = txc->offset; tx32.freq = txc->freq; tx32.maxerror = txc->maxerror; tx32.esterror = txc->esterror; tx32.status = txc->status; tx32.constant = txc->constant; tx32.precision = txc->precision; tx32.tolerance = txc->tolerance; tx32.time.tv_sec = txc->time.tv_sec; tx32.time.tv_usec = txc->time.tv_usec; tx32.tick = txc->tick; tx32.ppsfreq = txc->ppsfreq; tx32.jitter = txc->jitter; tx32.shift = txc->shift; tx32.stabil = txc->stabil; tx32.jitcnt = txc->jitcnt; tx32.calcnt = txc->calcnt; tx32.errcnt = txc->errcnt; tx32.stbcnt = txc->stbcnt; tx32.tai = txc->tai; if (copy_to_user(utp, &tx32, sizeof(struct old_timex32))) return -EFAULT; return 0; } SYSCALL_DEFINE1(adjtimex_time32, struct old_timex32 __user *, utp) { struct __kernel_timex txc; int err, ret; err = get_old_timex32(&txc, utp); if (err) return err; ret = do_adjtimex(&txc); err = put_old_timex32(utp, &txc); if (err) return err; return ret; } #endif /** * jiffies_to_msecs - Convert jiffies to milliseconds * @j: jiffies value * * Avoid unnecessary multiplications/divisions in the * two most common HZ cases. * * Return: milliseconds value */ unsigned int jiffies_to_msecs(const unsigned long j) { #if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ) return (MSEC_PER_SEC / HZ) * j; #elif HZ > MSEC_PER_SEC && !(HZ % MSEC_PER_SEC) return (j + (HZ / MSEC_PER_SEC) - 1)/(HZ / MSEC_PER_SEC); #else # if BITS_PER_LONG == 32 return (HZ_TO_MSEC_MUL32 * j + (1ULL << HZ_TO_MSEC_SHR32) - 1) >> HZ_TO_MSEC_SHR32; # else return DIV_ROUND_UP(j * HZ_TO_MSEC_NUM, HZ_TO_MSEC_DEN); # endif #endif } EXPORT_SYMBOL(jiffies_to_msecs); /** * jiffies_to_usecs - Convert jiffies to microseconds * @j: jiffies value * * Return: microseconds value */ unsigned int jiffies_to_usecs(const unsigned long j) { /* * Hz usually doesn't go much further MSEC_PER_SEC. * jiffies_to_usecs() and usecs_to_jiffies() depend on that. */ BUILD_BUG_ON(HZ > USEC_PER_SEC); #if !(USEC_PER_SEC % HZ) return (USEC_PER_SEC / HZ) * j; #else # if BITS_PER_LONG == 32 return (HZ_TO_USEC_MUL32 * j) >> HZ_TO_USEC_SHR32; # else return (j * HZ_TO_USEC_NUM) / HZ_TO_USEC_DEN; # endif #endif } EXPORT_SYMBOL(jiffies_to_usecs); /** * mktime64 - Converts date to seconds. * @year0: year to convert * @mon0: month to convert * @day: day to convert * @hour: hour to convert * @min: minute to convert * @sec: second to convert * * Converts Gregorian date to seconds since 1970-01-01 00:00:00. * Assumes input in normal date format, i.e. 1980-12-31 23:59:59 * => year=1980, mon=12, day=31, hour=23, min=59, sec=59. * * [For the Julian calendar (which was used in Russia before 1917, * Britain & colonies before 1752, anywhere else before 1582, * and is still in use by some communities) leave out the * -year/100+year/400 terms, and add 10.] * * This algorithm was first published by Gauss (I think). * * A leap second can be indicated by calling this function with sec as * 60 (allowable under ISO 8601). The leap second is treated the same * as the following second since they don't exist in UNIX time. * * An encoding of midnight at the end of the day as 24:00:00 - ie. midnight * tomorrow - (allowable under ISO 8601) is supported. * * Return: seconds since the epoch time for the given input date */ time64_t mktime64(const unsigned int year0, const unsigned int mon0, const unsigned int day, const unsigned int hour, const unsigned int min, const unsigned int sec) { unsigned int mon = mon0, year = year0; /* 1..12 -> 11,12,1..10 */ if (0 >= (int) (mon -= 2)) { mon += 12; /* Puts Feb last since it has leap day */ year -= 1; } return ((((time64_t) (year/4 - year/100 + year/400 + 367*mon/12 + day) + year*365 - 719499 )*24 + hour /* now have hours - midnight tomorrow handled here */ )*60 + min /* now have minutes */ )*60 + sec; /* finally seconds */ } EXPORT_SYMBOL(mktime64); struct __kernel_old_timeval ns_to_kernel_old_timeval(s64 nsec) { struct timespec64 ts = ns_to_timespec64(nsec); struct __kernel_old_timeval tv; tv.tv_sec = ts.tv_sec; tv.tv_usec = (suseconds_t)ts.tv_nsec / 1000; return tv; } EXPORT_SYMBOL(ns_to_kernel_old_timeval); /** * set_normalized_timespec64 - set timespec sec and nsec parts and normalize * * @ts: pointer to timespec variable to be set * @sec: seconds to set * @nsec: nanoseconds to set * * Set seconds and nanoseconds field of a timespec variable and * normalize to the timespec storage format * * Note: The tv_nsec part is always in the range of 0 <= tv_nsec < NSEC_PER_SEC. * For negative values only the tv_sec field is negative ! */ void set_normalized_timespec64(struct timespec64 *ts, time64_t sec, s64 nsec) { while (nsec >= NSEC_PER_SEC) { /* * The following asm() prevents the compiler from * optimising this loop into a modulo operation. See * also __iter_div_u64_rem() in include/linux/time.h */ asm("" : "+rm"(nsec)); nsec -= NSEC_PER_SEC; ++sec; } while (nsec < 0) { asm("" : "+rm"(nsec)); nsec += NSEC_PER_SEC; --sec; } ts->tv_sec = sec; ts->tv_nsec = nsec; } EXPORT_SYMBOL(set_normalized_timespec64); /** * ns_to_timespec64 - Convert nanoseconds to timespec64 * @nsec: the nanoseconds value to be converted * * Return: the timespec64 representation of the nsec parameter. */ struct timespec64 ns_to_timespec64(s64 nsec) { struct timespec64 ts = { 0, 0 }; s32 rem; if (likely(nsec > 0)) { ts.tv_sec = div_u64_rem(nsec, NSEC_PER_SEC, &rem); ts.tv_nsec = rem; } else if (nsec < 0) { /* * With negative times, tv_sec points to the earlier * second, and tv_nsec counts the nanoseconds since * then, so tv_nsec is always a positive number. */ ts.tv_sec = -div_u64_rem(-nsec - 1, NSEC_PER_SEC, &rem) - 1; ts.tv_nsec = NSEC_PER_SEC - rem - 1; } return ts; } EXPORT_SYMBOL(ns_to_timespec64); /** * __msecs_to_jiffies: - convert milliseconds to jiffies * @m: time in milliseconds * * conversion is done as follows: * * - negative values mean 'infinite timeout' (MAX_JIFFY_OFFSET) * * - 'too large' values [that would result in larger than * MAX_JIFFY_OFFSET values] mean 'infinite timeout' too. * * - all other values are converted to jiffies by either multiplying * the input value by a factor or dividing it with a factor and * handling any 32-bit overflows. * for the details see _msecs_to_jiffies() * * msecs_to_jiffies() checks for the passed in value being a constant * via __builtin_constant_p() allowing gcc to eliminate most of the * code, __msecs_to_jiffies() is called if the value passed does not * allow constant folding and the actual conversion must be done at * runtime. * The _msecs_to_jiffies helpers are the HZ dependent conversion * routines found in include/linux/jiffies.h * * Return: jiffies value */ unsigned long __msecs_to_jiffies(const unsigned int m) { /* * Negative value, means infinite timeout: */ if ((int)m < 0) return MAX_JIFFY_OFFSET; return _msecs_to_jiffies(m); } EXPORT_SYMBOL(__msecs_to_jiffies); /** * __usecs_to_jiffies: - convert microseconds to jiffies * @u: time in milliseconds * * Return: jiffies value */ unsigned long __usecs_to_jiffies(const unsigned int u) { if (u > jiffies_to_usecs(MAX_JIFFY_OFFSET)) return MAX_JIFFY_OFFSET; return _usecs_to_jiffies(u); } EXPORT_SYMBOL(__usecs_to_jiffies); /** * timespec64_to_jiffies - convert a timespec64 value to jiffies * @value: pointer to &struct timespec64 * * The TICK_NSEC - 1 rounds up the value to the next resolution. Note * that a remainder subtract here would not do the right thing as the * resolution values don't fall on second boundaries. I.e. the line: * nsec -= nsec % TICK_NSEC; is NOT a correct resolution rounding. * Note that due to the small error in the multiplier here, this * rounding is incorrect for sufficiently large values of tv_nsec, but * well formed timespecs should have tv_nsec < NSEC_PER_SEC, so we're * OK. * * Rather, we just shift the bits off the right. * * The >> (NSEC_JIFFIE_SC - SEC_JIFFIE_SC) converts the scaled nsec * value to a scaled second value. * * Return: jiffies value */ unsigned long timespec64_to_jiffies(const struct timespec64 *value) { u64 sec = value->tv_sec; long nsec = value->tv_nsec + TICK_NSEC - 1; if (sec >= MAX_SEC_IN_JIFFIES){ sec = MAX_SEC_IN_JIFFIES; nsec = 0; } return ((sec * SEC_CONVERSION) + (((u64)nsec * NSEC_CONVERSION) >> (NSEC_JIFFIE_SC - SEC_JIFFIE_SC))) >> SEC_JIFFIE_SC; } EXPORT_SYMBOL(timespec64_to_jiffies); /** * jiffies_to_timespec64 - convert jiffies value to &struct timespec64 * @jiffies: jiffies value * @value: pointer to &struct timespec64 */ void jiffies_to_timespec64(const unsigned long jiffies, struct timespec64 *value) { /* * Convert jiffies to nanoseconds and separate with * one divide. */ u32 rem; value->tv_sec = div_u64_rem((u64)jiffies * TICK_NSEC, NSEC_PER_SEC, &rem); value->tv_nsec = rem; } EXPORT_SYMBOL(jiffies_to_timespec64); /* * Convert jiffies/jiffies_64 to clock_t and back. */ /** * jiffies_to_clock_t - Convert jiffies to clock_t * @x: jiffies value * * Return: jiffies converted to clock_t (CLOCKS_PER_SEC) */ clock_t jiffies_to_clock_t(unsigned long x) { #if (TICK_NSEC % (NSEC_PER_SEC / USER_HZ)) == 0 # if HZ < USER_HZ return x * (USER_HZ / HZ); # else return x / (HZ / USER_HZ); # endif #else return div_u64((u64)x * TICK_NSEC, NSEC_PER_SEC / USER_HZ); #endif } EXPORT_SYMBOL(jiffies_to_clock_t); /** * clock_t_to_jiffies - Convert clock_t to jiffies * @x: clock_t value * * Return: clock_t value converted to jiffies */ unsigned long clock_t_to_jiffies(unsigned long x) { #if (HZ % USER_HZ)==0 if (x >= ~0UL / (HZ / USER_HZ)) return ~0UL; return x * (HZ / USER_HZ); #else /* Don't worry about loss of precision here .. */ if (x >= ~0UL / HZ * USER_HZ) return ~0UL; /* .. but do try to contain it here */ return div_u64((u64)x * HZ, USER_HZ); #endif } EXPORT_SYMBOL(clock_t_to_jiffies); /** * jiffies_64_to_clock_t - Convert jiffies_64 to clock_t * @x: jiffies_64 value * * Return: jiffies_64 value converted to 64-bit "clock_t" (CLOCKS_PER_SEC) */ u64 jiffies_64_to_clock_t(u64 x) { #if (TICK_NSEC % (NSEC_PER_SEC / USER_HZ)) == 0 # if HZ < USER_HZ x = div_u64(x * USER_HZ, HZ); # elif HZ > USER_HZ x = div_u64(x, HZ / USER_HZ); # else /* Nothing to do */ # endif #else /* * There are better ways that don't overflow early, * but even this doesn't overflow in hundreds of years * in 64 bits, so.. */ x = div_u64(x * TICK_NSEC, (NSEC_PER_SEC / USER_HZ)); #endif return x; } EXPORT_SYMBOL(jiffies_64_to_clock_t); /** * nsec_to_clock_t - Convert nsec value to clock_t * @x: nsec value * * Return: nsec value converted to 64-bit "clock_t" (CLOCKS_PER_SEC) */ u64 nsec_to_clock_t(u64 x) { #if (NSEC_PER_SEC % USER_HZ) == 0 return div_u64(x, NSEC_PER_SEC / USER_HZ); #elif (USER_HZ % 512) == 0 return div_u64(x * USER_HZ / 512, NSEC_PER_SEC / 512); #else /* * max relative error 5.7e-8 (1.8s per year) for USER_HZ <= 1024, * overflow after 64.99 years. * exact for HZ=60, 72, 90, 120, 144, 180, 300, 600, 900, ... */ return div_u64(x * 9, (9ull * NSEC_PER_SEC + (USER_HZ / 2)) / USER_HZ); #endif } /** * jiffies64_to_nsecs - Convert jiffies64 to nanoseconds * @j: jiffies64 value * * Return: nanoseconds value */ u64 jiffies64_to_nsecs(u64 j) { #if !(NSEC_PER_SEC % HZ) return (NSEC_PER_SEC / HZ) * j; # else return div_u64(j * HZ_TO_NSEC_NUM, HZ_TO_NSEC_DEN); #endif } EXPORT_SYMBOL(jiffies64_to_nsecs); /** * jiffies64_to_msecs - Convert jiffies64 to milliseconds * @j: jiffies64 value * * Return: milliseconds value */ u64 jiffies64_to_msecs(const u64 j) { #if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ) return (MSEC_PER_SEC / HZ) * j; #else return div_u64(j * HZ_TO_MSEC_NUM, HZ_TO_MSEC_DEN); #endif } EXPORT_SYMBOL(jiffies64_to_msecs); /** * nsecs_to_jiffies64 - Convert nsecs in u64 to jiffies64 * * @n: nsecs in u64 * * Unlike {m,u}secs_to_jiffies, type of input is not unsigned int but u64. * And this doesn't return MAX_JIFFY_OFFSET since this function is designed * for scheduler, not for use in device drivers to calculate timeout value. * * note: * NSEC_PER_SEC = 10^9 = (5^9 * 2^9) = (1953125 * 512) * ULLONG_MAX ns = 18446744073.709551615 secs = about 584 years * * Return: nsecs converted to jiffies64 value */ u64 nsecs_to_jiffies64(u64 n) { #if (NSEC_PER_SEC % HZ) == 0 /* Common case, HZ = 100, 128, 200, 250, 256, 500, 512, 1000 etc. */ return div_u64(n, NSEC_PER_SEC / HZ); #elif (HZ % 512) == 0 /* overflow after 292 years if HZ = 1024 */ return div_u64(n * HZ / 512, NSEC_PER_SEC / 512); #else /* * Generic case - optimized for cases where HZ is a multiple of 3. * overflow after 64.99 years, exact for HZ = 60, 72, 90, 120 etc. */ return div_u64(n * 9, (9ull * NSEC_PER_SEC + HZ / 2) / HZ); #endif } EXPORT_SYMBOL(nsecs_to_jiffies64); /** * nsecs_to_jiffies - Convert nsecs in u64 to jiffies * * @n: nsecs in u64 * * Unlike {m,u}secs_to_jiffies, type of input is not unsigned int but u64. * And this doesn't return MAX_JIFFY_OFFSET since this function is designed * for scheduler, not for use in device drivers to calculate timeout value. * * note: * NSEC_PER_SEC = 10^9 = (5^9 * 2^9) = (1953125 * 512) * ULLONG_MAX ns = 18446744073.709551615 secs = about 584 years * * Return: nsecs converted to jiffies value */ unsigned long nsecs_to_jiffies(u64 n) { return (unsigned long)nsecs_to_jiffies64(n); } EXPORT_SYMBOL_GPL(nsecs_to_jiffies); /** * timespec64_add_safe - Add two timespec64 values and do a safety check * for overflow. * @lhs: first (left) timespec64 to add * @rhs: second (right) timespec64 to add * * It's assumed that both values are valid (>= 0). * And, each timespec64 is in normalized form. * * Return: sum of @lhs + @rhs */ struct timespec64 timespec64_add_safe(const struct timespec64 lhs, const struct timespec64 rhs) { struct timespec64 res; set_normalized_timespec64(&res, (timeu64_t) lhs.tv_sec + rhs.tv_sec, lhs.tv_nsec + rhs.tv_nsec); if (unlikely(res.tv_sec < lhs.tv_sec || res.tv_sec < rhs.tv_sec)) { res.tv_sec = TIME64_MAX; res.tv_nsec = 0; } return res; } /** * get_timespec64 - get user's time value into kernel space * @ts: destination &struct timespec64 * @uts: user's time value as &struct __kernel_timespec * * Handles compat or 32-bit modes. * * Return: 0 on success or negative errno on error */ int get_timespec64(struct timespec64 *ts, const struct __kernel_timespec __user *uts) { struct __kernel_timespec kts; int ret; ret = copy_from_user(&kts, uts, sizeof(kts)); if (ret) return -EFAULT; ts->tv_sec = kts.tv_sec; /* Zero out the padding in compat mode */ if (in_compat_syscall()) kts.tv_nsec &= 0xFFFFFFFFUL; /* In 32-bit mode, this drops the padding */ ts->tv_nsec = kts.tv_nsec; return 0; } EXPORT_SYMBOL_GPL(get_timespec64); /** * put_timespec64 - convert timespec64 value to __kernel_timespec format and * copy the latter to userspace * @ts: input &struct timespec64 * @uts: user's &struct __kernel_timespec * * Return: 0 on success or negative errno on error */ int put_timespec64(const struct timespec64 *ts, struct __kernel_timespec __user *uts) { struct __kernel_timespec kts = { .tv_sec = ts->tv_sec, .tv_nsec = ts->tv_nsec }; return copy_to_user(uts, &kts, sizeof(kts)) ? -EFAULT : 0; } EXPORT_SYMBOL_GPL(put_timespec64); static int __get_old_timespec32(struct timespec64 *ts64, const struct old_timespec32 __user *cts) { struct old_timespec32 ts; int ret; ret = copy_from_user(&ts, cts, sizeof(ts)); if (ret) return -EFAULT; ts64->tv_sec = ts.tv_sec; ts64->tv_nsec = ts.tv_nsec; return 0; } static int __put_old_timespec32(const struct timespec64 *ts64, struct old_timespec32 __user *cts) { struct old_timespec32 ts = { .tv_sec = ts64->tv_sec, .tv_nsec = ts64->tv_nsec }; return copy_to_user(cts, &ts, sizeof(ts)) ? -EFAULT : 0; } /** * get_old_timespec32 - get user's old-format time value into kernel space * @ts: destination &struct timespec64 * @uts: user's old-format time value (&struct old_timespec32) * * Handles X86_X32_ABI compatibility conversion. * * Return: 0 on success or negative errno on error */ int get_old_timespec32(struct timespec64 *ts, const void __user *uts) { if (COMPAT_USE_64BIT_TIME) return copy_from_user(ts, uts, sizeof(*ts)) ? -EFAULT : 0; else return __get_old_timespec32(ts, uts); } EXPORT_SYMBOL_GPL(get_old_timespec32); /** * put_old_timespec32 - convert timespec64 value to &struct old_timespec32 and * copy the latter to userspace * @ts: input &struct timespec64 * @uts: user's &struct old_timespec32 * * Handles X86_X32_ABI compatibility conversion. * * Return: 0 on success or negative errno on error */ int put_old_timespec32(const struct timespec64 *ts, void __user *uts) { if (COMPAT_USE_64BIT_TIME) return copy_to_user(uts, ts, sizeof(*ts)) ? -EFAULT : 0; else return __put_old_timespec32(ts, uts); } EXPORT_SYMBOL_GPL(put_old_timespec32); /** * get_itimerspec64 - get user's &struct __kernel_itimerspec into kernel space * @it: destination &struct itimerspec64 * @uit: user's &struct __kernel_itimerspec * * Return: 0 on success or negative errno on error */ int get_itimerspec64(struct itimerspec64 *it, const struct __kernel_itimerspec __user *uit) { int ret; ret = get_timespec64(&it->it_interval, &uit->it_interval); if (ret) return ret; ret = get_timespec64(&it->it_value, &uit->it_value); return ret; } EXPORT_SYMBOL_GPL(get_itimerspec64); /** * put_itimerspec64 - convert &struct itimerspec64 to __kernel_itimerspec format * and copy the latter to userspace * @it: input &struct itimerspec64 * @uit: user's &struct __kernel_itimerspec * * Return: 0 on success or negative errno on error */ int put_itimerspec64(const struct itimerspec64 *it, struct __kernel_itimerspec __user *uit) { int ret; ret = put_timespec64(&it->it_interval, &uit->it_interval); if (ret) return ret; ret = put_timespec64(&it->it_value, &uit->it_value); return ret; } EXPORT_SYMBOL_GPL(put_itimerspec64); /** * get_old_itimerspec32 - get user's &struct old_itimerspec32 into kernel space * @its: destination &struct itimerspec64 * @uits: user's &struct old_itimerspec32 * * Return: 0 on success or negative errno on error */ int get_old_itimerspec32(struct itimerspec64 *its, const struct old_itimerspec32 __user *uits) { if (__get_old_timespec32(&its->it_interval, &uits->it_interval) || __get_old_timespec32(&its->it_value, &uits->it_value)) return -EFAULT; return 0; } EXPORT_SYMBOL_GPL(get_old_itimerspec32); /** * put_old_itimerspec32 - convert &struct itimerspec64 to &struct * old_itimerspec32 and copy the latter to userspace * @its: input &struct itimerspec64 * @uits: user's &struct old_itimerspec32 * * Return: 0 on success or negative errno on error */ int put_old_itimerspec32(const struct itimerspec64 *its, struct old_itimerspec32 __user *uits) { if (__put_old_timespec32(&its->it_interval, &uits->it_interval) || __put_old_timespec32(&its->it_value, &uits->it_value)) return -EFAULT; return 0; } EXPORT_SYMBOL_GPL(put_old_itimerspec32); |
| 1 1 5 11 1 3 7 62 3 61 61 61 60 1 17 2 15 5 10 3 9 1 8 8 7 14 10 10 10 10 10 4 10 8 4 10 11 1 1 1 2 6 6 4 4 88 88 21 2 72 55 13 51 15 15 14 1 14 3 65 3 13 1 1 31 13 19 18 18 14 58 57 15 12 10 21 2 21 34 4 2 9 2 9 3 8 11 9 8 13 11 10 10 1 2 18 6 10 10 10 10 8 4 1 2 9 21 32 16 40 56 1 1 18 32 11 7 32 45 5 32 18 3 32 2 30 13 1 12 28 28 7 26 30 30 30 10 8 6 6 6 6 6 6 6 25 3 5 23 11 1 1 1 26 26 11 26 26 26 1 2 11 12 9 14 2 83 82 4 1 82 1 4 18 30 43 28 65 225 5 220 219 14 9 1 4 15 1 2 12 27 1 26 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 | // SPDX-License-Identifier: GPL-2.0 /* * * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved. * * Regular file handling primitives for NTFS-based filesystems. * */ #include <linux/backing-dev.h> #include <linux/blkdev.h> #include <linux/buffer_head.h> #include <linux/compat.h> #include <linux/falloc.h> #include <linux/fiemap.h> #include <linux/fileattr.h> #include "debug.h" #include "ntfs.h" #include "ntfs_fs.h" static int ntfs_ioctl_fitrim(struct ntfs_sb_info *sbi, unsigned long arg) { struct fstrim_range __user *user_range; struct fstrim_range range; struct block_device *dev; int err; if (!capable(CAP_SYS_ADMIN)) return -EPERM; dev = sbi->sb->s_bdev; if (!bdev_max_discard_sectors(dev)) return -EOPNOTSUPP; user_range = (struct fstrim_range __user *)arg; if (copy_from_user(&range, user_range, sizeof(range))) return -EFAULT; range.minlen = max_t(u32, range.minlen, bdev_discard_granularity(dev)); err = ntfs_trim_fs(sbi, &range); if (err < 0) return err; if (copy_to_user(user_range, &range, sizeof(range))) return -EFAULT; return 0; } /* * ntfs_ioctl - file_operations::unlocked_ioctl */ long ntfs_ioctl(struct file *filp, u32 cmd, unsigned long arg) { struct inode *inode = file_inode(filp); struct ntfs_sb_info *sbi = inode->i_sb->s_fs_info; /* Avoid any operation if inode is bad. */ if (unlikely(is_bad_ni(ntfs_i(inode)))) return -EINVAL; switch (cmd) { case FITRIM: return ntfs_ioctl_fitrim(sbi, arg); } return -ENOTTY; /* Inappropriate ioctl for device. */ } #ifdef CONFIG_COMPAT long ntfs_compat_ioctl(struct file *filp, u32 cmd, unsigned long arg) { return ntfs_ioctl(filp, cmd, (unsigned long)compat_ptr(arg)); } #endif /* * ntfs_getattr - inode_operations::getattr */ int ntfs_getattr(struct mnt_idmap *idmap, const struct path *path, struct kstat *stat, u32 request_mask, u32 flags) { struct inode *inode = d_inode(path->dentry); struct ntfs_inode *ni = ntfs_i(inode); /* Avoid any operation if inode is bad. */ if (unlikely(is_bad_ni(ni))) return -EINVAL; stat->result_mask |= STATX_BTIME; stat->btime = ni->i_crtime; stat->blksize = ni->mi.sbi->cluster_size; /* 512, 1K, ..., 2M */ if (inode->i_flags & S_IMMUTABLE) stat->attributes |= STATX_ATTR_IMMUTABLE; if (inode->i_flags & S_APPEND) stat->attributes |= STATX_ATTR_APPEND; if (is_compressed(ni)) stat->attributes |= STATX_ATTR_COMPRESSED; if (is_encrypted(ni)) stat->attributes |= STATX_ATTR_ENCRYPTED; stat->attributes_mask |= STATX_ATTR_COMPRESSED | STATX_ATTR_ENCRYPTED | STATX_ATTR_IMMUTABLE | STATX_ATTR_APPEND; generic_fillattr(idmap, request_mask, inode, stat); return 0; } static int ntfs_extend_initialized_size(struct file *file, struct ntfs_inode *ni, const loff_t valid, const loff_t new_valid) { struct inode *inode = &ni->vfs_inode; struct address_space *mapping = inode->i_mapping; struct ntfs_sb_info *sbi = inode->i_sb->s_fs_info; loff_t pos = valid; int err; if (valid >= new_valid) return 0; if (is_resident(ni)) { ni->i_valid = new_valid; return 0; } WARN_ON(is_compressed(ni)); for (;;) { u32 zerofrom, len; struct folio *folio; u8 bits; CLST vcn, lcn, clen; if (is_sparsed(ni)) { bits = sbi->cluster_bits; vcn = pos >> bits; err = attr_data_get_block(ni, vcn, 1, &lcn, &clen, NULL, false); if (err) goto out; if (lcn == SPARSE_LCN) { pos = ((loff_t)clen + vcn) << bits; ni->i_valid = pos; goto next; } } zerofrom = pos & (PAGE_SIZE - 1); len = PAGE_SIZE - zerofrom; if (pos + len > new_valid) len = new_valid - pos; err = ntfs_write_begin(NULL, mapping, pos, len, &folio, NULL); if (err) goto out; folio_zero_range(folio, zerofrom, folio_size(folio) - zerofrom); err = ntfs_write_end(NULL, mapping, pos, len, len, folio, NULL); if (err < 0) goto out; pos += len; next: if (pos >= new_valid) break; balance_dirty_pages_ratelimited(mapping); cond_resched(); } return 0; out: ni->i_valid = valid; ntfs_inode_warn(inode, "failed to extend initialized size to %llx.", new_valid); return err; } /* * ntfs_zero_range - Helper function for punch_hole. * * It zeroes a range [vbo, vbo_to). */ static int ntfs_zero_range(struct inode *inode, u64 vbo, u64 vbo_to) { int err = 0; struct address_space *mapping = inode->i_mapping; u32 blocksize = i_blocksize(inode); pgoff_t idx = vbo >> PAGE_SHIFT; u32 from = vbo & (PAGE_SIZE - 1); pgoff_t idx_end = (vbo_to + PAGE_SIZE - 1) >> PAGE_SHIFT; loff_t page_off; struct buffer_head *head, *bh; u32 bh_next, bh_off, to; sector_t iblock; struct folio *folio; bool dirty = false; for (; idx < idx_end; idx += 1, from = 0) { page_off = (loff_t)idx << PAGE_SHIFT; to = (page_off + PAGE_SIZE) > vbo_to ? (vbo_to - page_off) : PAGE_SIZE; iblock = page_off >> inode->i_blkbits; folio = __filemap_get_folio( mapping, idx, FGP_LOCK | FGP_ACCESSED | FGP_CREAT, mapping_gfp_constraint(mapping, ~__GFP_FS)); if (IS_ERR(folio)) return PTR_ERR(folio); head = folio_buffers(folio); if (!head) head = create_empty_buffers(folio, blocksize, 0); bh = head; bh_off = 0; do { bh_next = bh_off + blocksize; if (bh_next <= from || bh_off >= to) continue; if (!buffer_mapped(bh)) { ntfs_get_block(inode, iblock, bh, 0); /* Unmapped? It's a hole - nothing to do. */ if (!buffer_mapped(bh)) continue; } /* Ok, it's mapped. Make sure it's up-to-date. */ if (folio_test_uptodate(folio)) set_buffer_uptodate(bh); else if (bh_read(bh, 0) < 0) { err = -EIO; folio_unlock(folio); folio_put(folio); goto out; } mark_buffer_dirty(bh); } while (bh_off = bh_next, iblock += 1, head != (bh = bh->b_this_page)); folio_zero_segment(folio, from, to); dirty = true; folio_unlock(folio); folio_put(folio); cond_resched(); } out: if (dirty) mark_inode_dirty(inode); return err; } /* * ntfs_file_mmap_prepare - file_operations::mmap_prepare */ static int ntfs_file_mmap_prepare(struct vm_area_desc *desc) { struct file *file = desc->file; struct inode *inode = file_inode(file); struct ntfs_inode *ni = ntfs_i(inode); u64 from = ((u64)desc->pgoff << PAGE_SHIFT); bool rw = desc->vm_flags & VM_WRITE; int err; /* Avoid any operation if inode is bad. */ if (unlikely(is_bad_ni(ni))) return -EINVAL; if (unlikely(ntfs3_forced_shutdown(inode->i_sb))) return -EIO; if (is_encrypted(ni)) { ntfs_inode_warn(inode, "mmap encrypted not supported"); return -EOPNOTSUPP; } if (is_dedup(ni)) { ntfs_inode_warn(inode, "mmap deduplicated not supported"); return -EOPNOTSUPP; } if (is_compressed(ni) && rw) { ntfs_inode_warn(inode, "mmap(write) compressed not supported"); return -EOPNOTSUPP; } if (rw) { u64 to = min_t(loff_t, i_size_read(inode), from + desc->end - desc->start); if (is_sparsed(ni)) { /* Allocate clusters for rw map. */ struct ntfs_sb_info *sbi = inode->i_sb->s_fs_info; CLST lcn, len; CLST vcn = from >> sbi->cluster_bits; CLST end = bytes_to_cluster(sbi, to); bool new; for (; vcn < end; vcn += len) { err = attr_data_get_block(ni, vcn, 1, &lcn, &len, &new, true); if (err) goto out; } } if (ni->i_valid < to) { if (!inode_trylock(inode)) { err = -EAGAIN; goto out; } err = ntfs_extend_initialized_size(file, ni, ni->i_valid, to); inode_unlock(inode); if (err) goto out; } } err = generic_file_mmap_prepare(desc); out: return err; } static int ntfs_extend(struct inode *inode, loff_t pos, size_t count, struct file *file) { struct ntfs_inode *ni = ntfs_i(inode); struct address_space *mapping = inode->i_mapping; loff_t end = pos + count; bool extend_init = file && pos > ni->i_valid; int err; if (end <= inode->i_size && !extend_init) return 0; /* Mark rw ntfs as dirty. It will be cleared at umount. */ ntfs_set_state(ni->mi.sbi, NTFS_DIRTY_DIRTY); if (end > inode->i_size) { err = ntfs_set_size(inode, end); if (err) goto out; } if (extend_init && !is_compressed(ni)) { err = ntfs_extend_initialized_size(file, ni, ni->i_valid, pos); if (err) goto out; } else { err = 0; } if (file && is_sparsed(ni)) { /* * This code optimizes large writes to sparse file. * TODO: merge this fragment with fallocate fragment. */ struct ntfs_sb_info *sbi = ni->mi.sbi; CLST vcn = pos >> sbi->cluster_bits; CLST cend = bytes_to_cluster(sbi, end); CLST cend_v = bytes_to_cluster(sbi, ni->i_valid); CLST lcn, clen; bool new; if (cend_v > cend) cend_v = cend; /* * Allocate and zero new clusters. * Zeroing these clusters may be too long. */ for (; vcn < cend_v; vcn += clen) { err = attr_data_get_block(ni, vcn, cend_v - vcn, &lcn, &clen, &new, true); if (err) goto out; } /* * Allocate but not zero new clusters. */ for (; vcn < cend; vcn += clen) { err = attr_data_get_block(ni, vcn, cend - vcn, &lcn, &clen, &new, false); if (err) goto out; } } inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode)); mark_inode_dirty(inode); if (IS_SYNC(inode)) { int err2; err = filemap_fdatawrite_range(mapping, pos, end - 1); err2 = sync_mapping_buffers(mapping); if (!err) err = err2; err2 = write_inode_now(inode, 1); if (!err) err = err2; if (!err) err = filemap_fdatawait_range(mapping, pos, end - 1); } out: return err; } static int ntfs_truncate(struct inode *inode, loff_t new_size) { struct super_block *sb = inode->i_sb; struct ntfs_inode *ni = ntfs_i(inode); int err, dirty = 0; u64 new_valid; if (!S_ISREG(inode->i_mode)) return 0; if (is_compressed(ni)) { if (ni->i_valid > new_size) ni->i_valid = new_size; } else { err = block_truncate_page(inode->i_mapping, new_size, ntfs_get_block); if (err) return err; } new_valid = ntfs_up_block(sb, min_t(u64, ni->i_valid, new_size)); truncate_setsize(inode, new_size); ni_lock(ni); down_write(&ni->file.run_lock); err = attr_set_size(ni, ATTR_DATA, NULL, 0, &ni->file.run, new_size, &new_valid, ni->mi.sbi->options->prealloc, NULL); up_write(&ni->file.run_lock); if (new_valid < ni->i_valid) ni->i_valid = new_valid; ni_unlock(ni); ni->std_fa |= FILE_ATTRIBUTE_ARCHIVE; inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode)); if (!IS_DIRSYNC(inode)) { dirty = 1; } else { err = ntfs_sync_inode(inode); if (err) return err; } if (dirty) mark_inode_dirty(inode); /*ntfs_flush_inodes(inode->i_sb, inode, NULL);*/ return 0; } /* * ntfs_fallocate - file_operations::ntfs_fallocate * * Preallocate space for a file. This implements ntfs's fallocate file * operation, which gets called from sys_fallocate system call. User * space requests 'len' bytes at 'vbo'. If FALLOC_FL_KEEP_SIZE is set * we just allocate clusters without zeroing them out. Otherwise we * allocate and zero out clusters via an expanding truncate. */ static long ntfs_fallocate(struct file *file, int mode, loff_t vbo, loff_t len) { struct inode *inode = file_inode(file); struct address_space *mapping = inode->i_mapping; struct super_block *sb = inode->i_sb; struct ntfs_sb_info *sbi = sb->s_fs_info; struct ntfs_inode *ni = ntfs_i(inode); loff_t end = vbo + len; loff_t vbo_down = round_down(vbo, max_t(unsigned long, sbi->cluster_size, PAGE_SIZE)); bool is_supported_holes = is_sparsed(ni) || is_compressed(ni); loff_t i_size, new_size; bool map_locked; int err; /* No support for dir. */ if (!S_ISREG(inode->i_mode)) return -EOPNOTSUPP; /* * vfs_fallocate checks all possible combinations of mode. * Do additional checks here before ntfs_set_state(dirty). */ if (mode & FALLOC_FL_PUNCH_HOLE) { if (!is_supported_holes) return -EOPNOTSUPP; } else if (mode & FALLOC_FL_COLLAPSE_RANGE) { } else if (mode & FALLOC_FL_INSERT_RANGE) { if (!is_supported_holes) return -EOPNOTSUPP; } else if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE | FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_INSERT_RANGE)) { ntfs_inode_warn(inode, "fallocate(0x%x) is not supported", mode); return -EOPNOTSUPP; } ntfs_set_state(sbi, NTFS_DIRTY_DIRTY); inode_lock(inode); i_size = inode->i_size; new_size = max(end, i_size); map_locked = false; if (WARN_ON(ni->ni_flags & NI_FLAG_COMPRESSED_MASK)) { /* Should never be here, see ntfs_file_open. */ err = -EOPNOTSUPP; goto out; } if (mode & (FALLOC_FL_PUNCH_HOLE | FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_INSERT_RANGE)) { inode_dio_wait(inode); filemap_invalidate_lock(mapping); map_locked = true; } if (mode & FALLOC_FL_PUNCH_HOLE) { u32 frame_size; loff_t mask, vbo_a, end_a, tmp; err = filemap_write_and_wait_range(mapping, vbo_down, LLONG_MAX); if (err) goto out; truncate_pagecache(inode, vbo_down); ni_lock(ni); err = attr_punch_hole(ni, vbo, len, &frame_size); ni_unlock(ni); if (!err) goto ok; if (err != E_NTFS_NOTALIGNED) goto out; /* Process not aligned punch. */ err = 0; mask = frame_size - 1; vbo_a = (vbo + mask) & ~mask; end_a = end & ~mask; tmp = min(vbo_a, end); if (tmp > vbo) { err = ntfs_zero_range(inode, vbo, tmp); if (err) goto out; } if (vbo < end_a && end_a < end) { err = ntfs_zero_range(inode, end_a, end); if (err) goto out; } /* Aligned punch_hole */ if (end_a > vbo_a) { ni_lock(ni); err = attr_punch_hole(ni, vbo_a, end_a - vbo_a, NULL); ni_unlock(ni); if (err) goto out; } } else if (mode & FALLOC_FL_COLLAPSE_RANGE) { /* * Write tail of the last page before removed range since * it will get removed from the page cache below. */ err = filemap_write_and_wait_range(mapping, vbo_down, vbo); if (err) goto out; /* * Write data that will be shifted to preserve them * when discarding page cache below. */ err = filemap_write_and_wait_range(mapping, end, LLONG_MAX); if (err) goto out; truncate_pagecache(inode, vbo_down); ni_lock(ni); err = attr_collapse_range(ni, vbo, len); ni_unlock(ni); if (err) goto out; } else if (mode & FALLOC_FL_INSERT_RANGE) { /* Check new size. */ err = inode_newsize_ok(inode, new_size); if (err) goto out; /* Write out all dirty pages. */ err = filemap_write_and_wait_range(mapping, vbo_down, LLONG_MAX); if (err) goto out; truncate_pagecache(inode, vbo_down); ni_lock(ni); err = attr_insert_range(ni, vbo, len); ni_unlock(ni); if (err) goto out; } else { /* Check new size. */ u8 cluster_bits = sbi->cluster_bits; /* Be sure file is non resident. */ if (is_resident(ni)) { ni_lock(ni); err = attr_force_nonresident(ni); ni_unlock(ni); if (err) goto out; } /* generic/213: expected -ENOSPC instead of -EFBIG. */ if (!is_supported_holes) { loff_t to_alloc = new_size - inode_get_bytes(inode); if (to_alloc > 0 && (to_alloc >> cluster_bits) > wnd_zeroes(&sbi->used.bitmap)) { err = -ENOSPC; goto out; } } err = inode_newsize_ok(inode, new_size); if (err) goto out; if (new_size > i_size) { /* * Allocate clusters, do not change 'valid' size. */ err = ntfs_set_size(inode, new_size); if (err) goto out; } if (is_supported_holes) { CLST vcn = vbo >> cluster_bits; CLST cend = bytes_to_cluster(sbi, end); CLST cend_v = bytes_to_cluster(sbi, ni->i_valid); CLST lcn, clen; bool new; if (cend_v > cend) cend_v = cend; /* * Allocate and zero new clusters. * Zeroing these clusters may be too long. */ for (; vcn < cend_v; vcn += clen) { err = attr_data_get_block(ni, vcn, cend_v - vcn, &lcn, &clen, &new, true); if (err) goto out; } /* * Allocate but not zero new clusters. */ for (; vcn < cend; vcn += clen) { err = attr_data_get_block(ni, vcn, cend - vcn, &lcn, &clen, &new, false); if (err) goto out; } } if (mode & FALLOC_FL_KEEP_SIZE) { ni_lock(ni); /* True - Keep preallocated. */ err = attr_set_size(ni, ATTR_DATA, NULL, 0, &ni->file.run, i_size, &ni->i_valid, true, NULL); ni_unlock(ni); if (err) goto out; } else if (new_size > i_size) { i_size_write(inode, new_size); } } ok: err = file_modified(file); if (err) goto out; out: if (map_locked) filemap_invalidate_unlock(mapping); if (!err) { inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode)); mark_inode_dirty(inode); } inode_unlock(inode); return err; } /* * ntfs_setattr - inode_operations::setattr */ int ntfs_setattr(struct mnt_idmap *idmap, struct dentry *dentry, struct iattr *attr) { struct inode *inode = d_inode(dentry); struct ntfs_inode *ni = ntfs_i(inode); u32 ia_valid = attr->ia_valid; umode_t mode = inode->i_mode; int err; /* Avoid any operation if inode is bad. */ if (unlikely(is_bad_ni(ni))) return -EINVAL; if (unlikely(ntfs3_forced_shutdown(inode->i_sb))) return -EIO; err = setattr_prepare(idmap, dentry, attr); if (err) goto out; if (ia_valid & ATTR_SIZE) { loff_t newsize, oldsize; if (WARN_ON(ni->ni_flags & NI_FLAG_COMPRESSED_MASK)) { /* Should never be here, see ntfs_file_open(). */ err = -EOPNOTSUPP; goto out; } inode_dio_wait(inode); oldsize = i_size_read(inode); newsize = attr->ia_size; if (newsize <= oldsize) err = ntfs_truncate(inode, newsize); else err = ntfs_extend(inode, newsize, 0, NULL); if (err) goto out; ni->ni_flags |= NI_FLAG_UPDATE_PARENT; i_size_write(inode, newsize); } setattr_copy(idmap, inode, attr); if (mode != inode->i_mode) { err = ntfs_acl_chmod(idmap, dentry); if (err) goto out; /* Linux 'w' -> Windows 'ro'. */ if (0222 & inode->i_mode) ni->std_fa &= ~FILE_ATTRIBUTE_READONLY; else ni->std_fa |= FILE_ATTRIBUTE_READONLY; } if (ia_valid & (ATTR_UID | ATTR_GID | ATTR_MODE)) ntfs_save_wsl_perm(inode, NULL); mark_inode_dirty(inode); out: return err; } /* * check_read_restriction: * common code for ntfs_file_read_iter and ntfs_file_splice_read */ static int check_read_restriction(struct inode *inode) { struct ntfs_inode *ni = ntfs_i(inode); /* Avoid any operation if inode is bad. */ if (unlikely(is_bad_ni(ni))) return -EINVAL; if (unlikely(ntfs3_forced_shutdown(inode->i_sb))) return -EIO; if (is_encrypted(ni)) { ntfs_inode_warn(inode, "encrypted i/o not supported"); return -EOPNOTSUPP; } #ifndef CONFIG_NTFS3_LZX_XPRESS if (ni->ni_flags & NI_FLAG_COMPRESSED_MASK) { ntfs_inode_warn( inode, "activate CONFIG_NTFS3_LZX_XPRESS to read external compressed files"); return -EOPNOTSUPP; } #endif if (is_dedup(ni)) { ntfs_inode_warn(inode, "read deduplicated not supported"); return -EOPNOTSUPP; } return 0; } /* * ntfs_file_read_iter - file_operations::read_iter */ static ssize_t ntfs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter) { struct file *file = iocb->ki_filp; struct inode *inode = file_inode(file); struct ntfs_inode *ni = ntfs_i(inode); ssize_t err; err = check_read_restriction(inode); if (err) return err; if (is_compressed(ni) && (iocb->ki_flags & IOCB_DIRECT)) { ntfs_inode_warn(inode, "direct i/o + compressed not supported"); return -EOPNOTSUPP; } return generic_file_read_iter(iocb, iter); } /* * ntfs_file_splice_read - file_operations::splice_read */ static ssize_t ntfs_file_splice_read(struct file *in, loff_t *ppos, struct pipe_inode_info *pipe, size_t len, unsigned int flags) { struct inode *inode = file_inode(in); ssize_t err; err = check_read_restriction(inode); if (err) return err; return filemap_splice_read(in, ppos, pipe, len, flags); } /* * ntfs_get_frame_pages * * Return: Array of locked pages. */ static int ntfs_get_frame_pages(struct address_space *mapping, pgoff_t index, struct page **pages, u32 pages_per_frame, bool *frame_uptodate) { gfp_t gfp_mask = mapping_gfp_mask(mapping); u32 npages; *frame_uptodate = true; for (npages = 0; npages < pages_per_frame; npages++, index++) { struct folio *folio; folio = __filemap_get_folio(mapping, index, FGP_LOCK | FGP_ACCESSED | FGP_CREAT, gfp_mask); if (IS_ERR(folio)) { while (npages--) { folio = page_folio(pages[npages]); folio_unlock(folio); folio_put(folio); } return -ENOMEM; } if (!folio_test_uptodate(folio)) *frame_uptodate = false; pages[npages] = &folio->page; } return 0; } /* * ntfs_compress_write - Helper for ntfs_file_write_iter() (compressed files). */ static ssize_t ntfs_compress_write(struct kiocb *iocb, struct iov_iter *from) { int err; struct file *file = iocb->ki_filp; size_t count = iov_iter_count(from); loff_t pos = iocb->ki_pos; struct inode *inode = file_inode(file); loff_t i_size = i_size_read(inode); struct address_space *mapping = inode->i_mapping; struct ntfs_inode *ni = ntfs_i(inode); u64 valid = ni->i_valid; struct ntfs_sb_info *sbi = ni->mi.sbi; struct page **pages = NULL; struct folio *folio; size_t written = 0; u8 frame_bits = NTFS_LZNT_CUNIT + sbi->cluster_bits; u32 frame_size = 1u << frame_bits; u32 pages_per_frame = frame_size >> PAGE_SHIFT; u32 ip, off; CLST frame; u64 frame_vbo; pgoff_t index; bool frame_uptodate; if (frame_size < PAGE_SIZE) { /* * frame_size == 8K if cluster 512 * frame_size == 64K if cluster 4096 */ ntfs_inode_warn(inode, "page size is bigger than frame size"); return -EOPNOTSUPP; } pages = kmalloc_array(pages_per_frame, sizeof(struct page *), GFP_NOFS); if (!pages) return -ENOMEM; err = file_remove_privs(file); if (err) goto out; err = file_update_time(file); if (err) goto out; /* Zero range [valid : pos). */ while (valid < pos) { CLST lcn, clen; frame = valid >> frame_bits; frame_vbo = valid & ~(frame_size - 1); off = valid & (frame_size - 1); err = attr_data_get_block(ni, frame << NTFS_LZNT_CUNIT, 1, &lcn, &clen, NULL, false); if (err) goto out; if (lcn == SPARSE_LCN) { ni->i_valid = valid = frame_vbo + ((u64)clen << sbi->cluster_bits); continue; } /* Load full frame. */ err = ntfs_get_frame_pages(mapping, frame_vbo >> PAGE_SHIFT, pages, pages_per_frame, &frame_uptodate); if (err) goto out; if (!frame_uptodate && off) { err = ni_read_frame(ni, frame_vbo, pages, pages_per_frame); if (err) { for (ip = 0; ip < pages_per_frame; ip++) { folio = page_folio(pages[ip]); folio_unlock(folio); folio_put(folio); } goto out; } } ip = off >> PAGE_SHIFT; off = offset_in_page(valid); for (; ip < pages_per_frame; ip++, off = 0) { folio = page_folio(pages[ip]); folio_zero_segment(folio, off, PAGE_SIZE); flush_dcache_folio(folio); folio_mark_uptodate(folio); } ni_lock(ni); err = ni_write_frame(ni, pages, pages_per_frame); ni_unlock(ni); for (ip = 0; ip < pages_per_frame; ip++) { folio = page_folio(pages[ip]); folio_mark_uptodate(folio); folio_unlock(folio); folio_put(folio); } if (err) goto out; ni->i_valid = valid = frame_vbo + frame_size; } /* Copy user data [pos : pos + count). */ while (count) { size_t copied, bytes; off = pos & (frame_size - 1); bytes = frame_size - off; if (bytes > count) bytes = count; frame_vbo = pos & ~(frame_size - 1); index = frame_vbo >> PAGE_SHIFT; if (unlikely(fault_in_iov_iter_readable(from, bytes))) { err = -EFAULT; goto out; } /* Load full frame. */ err = ntfs_get_frame_pages(mapping, index, pages, pages_per_frame, &frame_uptodate); if (err) goto out; if (!frame_uptodate) { loff_t to = pos + bytes; if (off || (to < i_size && (to & (frame_size - 1)))) { err = ni_read_frame(ni, frame_vbo, pages, pages_per_frame); if (err) { for (ip = 0; ip < pages_per_frame; ip++) { folio = page_folio(pages[ip]); folio_unlock(folio); folio_put(folio); } goto out; } } } WARN_ON(!bytes); copied = 0; ip = off >> PAGE_SHIFT; off = offset_in_page(pos); /* Copy user data to pages. */ for (;;) { size_t cp, tail = PAGE_SIZE - off; folio = page_folio(pages[ip]); cp = copy_folio_from_iter_atomic(folio, off, min(tail, bytes), from); flush_dcache_folio(folio); copied += cp; bytes -= cp; if (!bytes || !cp) break; if (cp < tail) { off += cp; } else { ip++; off = 0; } } ni_lock(ni); err = ni_write_frame(ni, pages, pages_per_frame); ni_unlock(ni); for (ip = 0; ip < pages_per_frame; ip++) { folio = page_folio(pages[ip]); folio_clear_dirty(folio); folio_mark_uptodate(folio); folio_unlock(folio); folio_put(folio); } if (err) goto out; /* * We can loop for a long time in here. Be nice and allow * us to schedule out to avoid softlocking if preempt * is disabled. */ cond_resched(); pos += copied; written += copied; count = iov_iter_count(from); } out: kfree(pages); if (err < 0) return err; iocb->ki_pos += written; if (iocb->ki_pos > ni->i_valid) ni->i_valid = iocb->ki_pos; if (iocb->ki_pos > i_size) i_size_write(inode, iocb->ki_pos); return written; } /* * check_write_restriction: * common code for ntfs_file_write_iter and ntfs_file_splice_write */ static int check_write_restriction(struct inode *inode) { struct ntfs_inode *ni = ntfs_i(inode); /* Avoid any operation if inode is bad. */ if (unlikely(is_bad_ni(ni))) return -EINVAL; if (unlikely(ntfs3_forced_shutdown(inode->i_sb))) return -EIO; if (is_encrypted(ni)) { ntfs_inode_warn(inode, "encrypted i/o not supported"); return -EOPNOTSUPP; } if (is_dedup(ni)) { ntfs_inode_warn(inode, "write into deduplicated not supported"); return -EOPNOTSUPP; } return 0; } /* * ntfs_file_write_iter - file_operations::write_iter */ static ssize_t ntfs_file_write_iter(struct kiocb *iocb, struct iov_iter *from) { struct file *file = iocb->ki_filp; struct inode *inode = file_inode(file); struct ntfs_inode *ni = ntfs_i(inode); ssize_t ret; int err; if (!inode_trylock(inode)) { if (iocb->ki_flags & IOCB_NOWAIT) return -EAGAIN; inode_lock(inode); } ret = check_write_restriction(inode); if (ret) goto out; if (is_compressed(ni) && (iocb->ki_flags & IOCB_DIRECT)) { ntfs_inode_warn(inode, "direct i/o + compressed not supported"); ret = -EOPNOTSUPP; goto out; } ret = generic_write_checks(iocb, from); if (ret <= 0) goto out; err = file_modified(iocb->ki_filp); if (err) { ret = err; goto out; } if (WARN_ON(ni->ni_flags & NI_FLAG_COMPRESSED_MASK)) { /* Should never be here, see ntfs_file_open(). */ ret = -EOPNOTSUPP; goto out; } ret = ntfs_extend(inode, iocb->ki_pos, ret, file); if (ret) goto out; ret = is_compressed(ni) ? ntfs_compress_write(iocb, from) : __generic_file_write_iter(iocb, from); out: inode_unlock(inode); if (ret > 0) ret = generic_write_sync(iocb, ret); return ret; } /* * ntfs_file_open - file_operations::open */ int ntfs_file_open(struct inode *inode, struct file *file) { struct ntfs_inode *ni = ntfs_i(inode); /* Avoid any operation if inode is bad. */ if (unlikely(is_bad_ni(ni))) return -EINVAL; if (unlikely(ntfs3_forced_shutdown(inode->i_sb))) return -EIO; if (unlikely((is_compressed(ni) || is_encrypted(ni)) && (file->f_flags & O_DIRECT))) { return -EOPNOTSUPP; } /* Decompress "external compressed" file if opened for rw. */ if ((ni->ni_flags & NI_FLAG_COMPRESSED_MASK) && (file->f_flags & (O_WRONLY | O_RDWR | O_TRUNC))) { #ifdef CONFIG_NTFS3_LZX_XPRESS int err = ni_decompress_file(ni); if (err) return err; #else ntfs_inode_warn( inode, "activate CONFIG_NTFS3_LZX_XPRESS to write external compressed files"); return -EOPNOTSUPP; #endif } return generic_file_open(inode, file); } /* * ntfs_file_release - file_operations::release */ static int ntfs_file_release(struct inode *inode, struct file *file) { struct ntfs_inode *ni = ntfs_i(inode); struct ntfs_sb_info *sbi = ni->mi.sbi; int err = 0; /* If we are last writer on the inode, drop the block reservation. */ if (sbi->options->prealloc && ((file->f_mode & FMODE_WRITE) && atomic_read(&inode->i_writecount) == 1) /* * The only file when inode->i_fop = &ntfs_file_operations and * init_rwsem(&ni->file.run_lock) is not called explicitly is MFT. * * Add additional check here. */ && inode->i_ino != MFT_REC_MFT) { ni_lock(ni); down_write(&ni->file.run_lock); err = attr_set_size(ni, ATTR_DATA, NULL, 0, &ni->file.run, i_size_read(inode), &ni->i_valid, false, NULL); up_write(&ni->file.run_lock); ni_unlock(ni); } return err; } /* * ntfs_fiemap - inode_operations::fiemap */ int ntfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, __u64 start, __u64 len) { int err; struct ntfs_inode *ni = ntfs_i(inode); /* Avoid any operation if inode is bad. */ if (unlikely(is_bad_ni(ni))) return -EINVAL; err = fiemap_prep(inode, fieinfo, start, &len, ~FIEMAP_FLAG_XATTR); if (err) return err; ni_lock(ni); err = ni_fiemap(ni, fieinfo, start, len); ni_unlock(ni); return err; } /* * ntfs_file_splice_write - file_operations::splice_write */ static ssize_t ntfs_file_splice_write(struct pipe_inode_info *pipe, struct file *file, loff_t *ppos, size_t len, unsigned int flags) { ssize_t err; struct inode *inode = file_inode(file); err = check_write_restriction(inode); if (err) return err; return iter_file_splice_write(pipe, file, ppos, len, flags); } // clang-format off const struct inode_operations ntfs_file_inode_operations = { .getattr = ntfs_getattr, .setattr = ntfs_setattr, .listxattr = ntfs_listxattr, .get_acl = ntfs_get_acl, .set_acl = ntfs_set_acl, .fiemap = ntfs_fiemap, }; const struct file_operations ntfs_file_operations = { .llseek = generic_file_llseek, .read_iter = ntfs_file_read_iter, .write_iter = ntfs_file_write_iter, .unlocked_ioctl = ntfs_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = ntfs_compat_ioctl, #endif .splice_read = ntfs_file_splice_read, .splice_write = ntfs_file_splice_write, .mmap_prepare = ntfs_file_mmap_prepare, .open = ntfs_file_open, .fsync = generic_file_fsync, .fallocate = ntfs_fallocate, .release = ntfs_file_release, }; #if IS_ENABLED(CONFIG_NTFS_FS) const struct file_operations ntfs_legacy_file_operations = { .llseek = generic_file_llseek, .read_iter = ntfs_file_read_iter, .splice_read = ntfs_file_splice_read, .open = ntfs_file_open, .release = ntfs_file_release, }; #endif // clang-format on |
| 3033 3033 3045 100 4628 2560 3189 2754 39 849 1117 1115 42 36 850 1 1 1 1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 | /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_PGTABLE_H #define _LINUX_PGTABLE_H #include <linux/pfn.h> #include <asm/pgtable.h> #define PMD_ORDER (PMD_SHIFT - PAGE_SHIFT) #define PUD_ORDER (PUD_SHIFT - PAGE_SHIFT) #ifndef __ASSEMBLY__ #ifdef CONFIG_MMU #include <linux/mm_types.h> #include <linux/bug.h> #include <linux/errno.h> #include <asm-generic/pgtable_uffd.h> #include <linux/page_table_check.h> #if 5 - defined(__PAGETABLE_P4D_FOLDED) - defined(__PAGETABLE_PUD_FOLDED) - \ defined(__PAGETABLE_PMD_FOLDED) != CONFIG_PGTABLE_LEVELS #error CONFIG_PGTABLE_LEVELS is not consistent with __PAGETABLE_{P4D,PUD,PMD}_FOLDED #endif /* * On almost all architectures and configurations, 0 can be used as the * upper ceiling to free_pgtables(): on many architectures it has the same * effect as using TASK_SIZE. However, there is one configuration which * must impose a more careful limit, to avoid freeing kernel pgtables. */ #ifndef USER_PGTABLES_CEILING #define USER_PGTABLES_CEILING 0UL #endif /* * This defines the first usable user address. Platforms * can override its value with custom FIRST_USER_ADDRESS * defined in their respective <asm/pgtable.h>. */ #ifndef FIRST_USER_ADDRESS #define FIRST_USER_ADDRESS 0UL #endif /* * This defines the generic helper for accessing PMD page * table page. Although platforms can still override this * via their respective <asm/pgtable.h>. */ #ifndef pmd_pgtable #define pmd_pgtable(pmd) pmd_page(pmd) #endif #define pmd_folio(pmd) page_folio(pmd_page(pmd)) /* * A page table page can be thought of an array like this: pXd_t[PTRS_PER_PxD] * * The pXx_index() functions return the index of the entry in the page * table page which would control the given virtual address * * As these functions may be used by the same code for different levels of * the page table folding, they are always available, regardless of * CONFIG_PGTABLE_LEVELS value. For the folded levels they simply return 0 * because in such cases PTRS_PER_PxD equals 1. */ static inline unsigned long pte_index(unsigned long address) { return (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1); } #ifndef pmd_index static inline unsigned long pmd_index(unsigned long address) { return (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1); } #define pmd_index pmd_index #endif #ifndef pud_index static inline unsigned long pud_index(unsigned long address) { return (address >> PUD_SHIFT) & (PTRS_PER_PUD - 1); } #define pud_index pud_index #endif #ifndef pgd_index /* Must be a compile-time constant, so implement it as a macro */ #define pgd_index(a) (((a) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1)) #endif #ifndef kernel_pte_init static inline void kernel_pte_init(void *addr) { } #define kernel_pte_init kernel_pte_init #endif #ifndef pmd_init static inline void pmd_init(void *addr) { } #define pmd_init pmd_init #endif #ifndef pud_init static inline void pud_init(void *addr) { } #define pud_init pud_init #endif #ifndef pte_offset_kernel static inline pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long address) { return (pte_t *)pmd_page_vaddr(*pmd) + pte_index(address); } #define pte_offset_kernel pte_offset_kernel #endif #ifdef CONFIG_HIGHPTE #define __pte_map(pmd, address) \ ((pte_t *)kmap_local_page(pmd_page(*(pmd))) + pte_index((address))) #define pte_unmap(pte) do { \ kunmap_local((pte)); \ rcu_read_unlock(); \ } while (0) #else static inline pte_t *__pte_map(pmd_t *pmd, unsigned long address) { return pte_offset_kernel(pmd, address); } static inline void pte_unmap(pte_t *pte) { rcu_read_unlock(); } #endif void pte_free_defer(struct mm_struct *mm, pgtable_t pgtable); /* Find an entry in the second-level page table.. */ #ifndef pmd_offset static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address) { return pud_pgtable(*pud) + pmd_index(address); } #define pmd_offset pmd_offset #endif #ifndef pud_offset static inline pud_t *pud_offset(p4d_t *p4d, unsigned long address) { return p4d_pgtable(*p4d) + pud_index(address); } #define pud_offset pud_offset #endif static inline pgd_t *pgd_offset_pgd(pgd_t *pgd, unsigned long address) { return (pgd + pgd_index(address)); }; /* * a shortcut to get a pgd_t in a given mm */ #ifndef pgd_offset #define pgd_offset(mm, address) pgd_offset_pgd((mm)->pgd, (address)) #endif /* * a shortcut which implies the use of the kernel's pgd, instead * of a process's */ #define pgd_offset_k(address) pgd_offset(&init_mm, (address)) /* * In many cases it is known that a virtual address is mapped at PMD or PTE * level, so instead of traversing all the page table levels, we can get a * pointer to the PMD entry in user or kernel page table or translate a virtual * address to the pointer in the PTE in the kernel page tables with simple * helpers. */ static inline pmd_t *pmd_off(struct mm_struct *mm, unsigned long va) { return pmd_offset(pud_offset(p4d_offset(pgd_offset(mm, va), va), va), va); } static inline pmd_t *pmd_off_k(unsigned long va) { return pmd_offset(pud_offset(p4d_offset(pgd_offset_k(va), va), va), va); } static inline pte_t *virt_to_kpte(unsigned long vaddr) { pmd_t *pmd = pmd_off_k(vaddr); return pmd_none(*pmd) ? NULL : pte_offset_kernel(pmd, vaddr); } #ifndef pmd_young static inline int pmd_young(pmd_t pmd) { return 0; } #endif #ifndef pmd_dirty static inline int pmd_dirty(pmd_t pmd) { return 0; } #endif /* * A facility to provide lazy MMU batching. This allows PTE updates and * page invalidations to be delayed until a call to leave lazy MMU mode * is issued. Some architectures may benefit from doing this, and it is * beneficial for both shadow and direct mode hypervisors, which may batch * the PTE updates which happen during this window. Note that using this * interface requires that read hazards be removed from the code. A read * hazard could result in the direct mode hypervisor case, since the actual * write to the page tables may not yet have taken place, so reads though * a raw PTE pointer after it has been modified are not guaranteed to be * up to date. * * In the general case, no lock is guaranteed to be held between entry and exit * of the lazy mode. So the implementation must assume preemption may be enabled * and cpu migration is possible; it must take steps to be robust against this. * (In practice, for user PTE updates, the appropriate page table lock(s) are * held, but for kernel PTE updates, no lock is held). Nesting is not permitted * and the mode cannot be used in interrupt context. */ #ifndef __HAVE_ARCH_ENTER_LAZY_MMU_MODE #define arch_enter_lazy_mmu_mode() do {} while (0) #define arch_leave_lazy_mmu_mode() do {} while (0) #define arch_flush_lazy_mmu_mode() do {} while (0) #endif #ifndef pte_batch_hint /** * pte_batch_hint - Number of pages that can be added to batch without scanning. * @ptep: Page table pointer for the entry. * @pte: Page table entry. * * Some architectures know that a set of contiguous ptes all map the same * contiguous memory with the same permissions. In this case, it can provide a * hint to aid pte batching without the core code needing to scan every pte. * * An architecture implementation may ignore the PTE accessed state. Further, * the dirty state must apply atomically to all the PTEs described by the hint. * * May be overridden by the architecture, else pte_batch_hint is always 1. */ static inline unsigned int pte_batch_hint(pte_t *ptep, pte_t pte) { return 1; } #endif #ifndef pte_advance_pfn static inline pte_t pte_advance_pfn(pte_t pte, unsigned long nr) { return __pte(pte_val(pte) + (nr << PFN_PTE_SHIFT)); } #endif #define pte_next_pfn(pte) pte_advance_pfn(pte, 1) #ifndef set_ptes /** * set_ptes - Map consecutive pages to a contiguous range of addresses. * @mm: Address space to map the pages into. * @addr: Address to map the first page at. * @ptep: Page table pointer for the first entry. * @pte: Page table entry for the first page. * @nr: Number of pages to map. * * When nr==1, initial state of pte may be present or not present, and new state * may be present or not present. When nr>1, initial state of all ptes must be * not present, and new state must be present. * * May be overridden by the architecture, or the architecture can define * set_pte() and PFN_PTE_SHIFT. * * Context: The caller holds the page table lock. The pages all belong * to the same folio. The PTEs are all in the same PMD. */ static inline void set_ptes(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte, unsigned int nr) { page_table_check_ptes_set(mm, ptep, pte, nr); for (;;) { set_pte(ptep, pte); if (--nr == 0) break; ptep++; pte = pte_next_pfn(pte); } } #endif #define set_pte_at(mm, addr, ptep, pte) set_ptes(mm, addr, ptep, pte, 1) #ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS extern int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long address, pte_t *ptep, pte_t entry, int dirty); #endif #ifndef __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS #ifdef CONFIG_TRANSPARENT_HUGEPAGE extern int pmdp_set_access_flags(struct vm_area_struct *vma, unsigned long address, pmd_t *pmdp, pmd_t entry, int dirty); extern int pudp_set_access_flags(struct vm_area_struct *vma, unsigned long address, pud_t *pudp, pud_t entry, int dirty); #else static inline int pmdp_set_access_flags(struct vm_area_struct *vma, unsigned long address, pmd_t *pmdp, pmd_t entry, int dirty) { BUILD_BUG(); return 0; } static inline int pudp_set_access_flags(struct vm_area_struct *vma, unsigned long address, pud_t *pudp, pud_t entry, int dirty) { BUILD_BUG(); return 0; } #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ #endif #ifndef ptep_get static inline pte_t ptep_get(pte_t *ptep) { return READ_ONCE(*ptep); } #endif #ifndef pmdp_get static inline pmd_t pmdp_get(pmd_t *pmdp) { return READ_ONCE(*pmdp); } #endif #ifndef pudp_get static inline pud_t pudp_get(pud_t *pudp) { return READ_ONCE(*pudp); } #endif #ifndef p4dp_get static inline p4d_t p4dp_get(p4d_t *p4dp) { return READ_ONCE(*p4dp); } #endif #ifndef pgdp_get static inline pgd_t pgdp_get(pgd_t *pgdp) { return READ_ONCE(*pgdp); } #endif #ifndef __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) { pte_t pte = ptep_get(ptep); int r = 1; if (!pte_young(pte)) r = 0; else set_pte_at(vma->vm_mm, address, ptep, pte_mkold(pte)); return r; } #endif #ifndef __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG) static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma, unsigned long address, pmd_t *pmdp) { pmd_t pmd = *pmdp; int r = 1; if (!pmd_young(pmd)) r = 0; else set_pmd_at(vma->vm_mm, address, pmdp, pmd_mkold(pmd)); return r; } #else static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma, unsigned long address, pmd_t *pmdp) { BUILD_BUG(); return 0; } #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG */ #endif #ifndef __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH int ptep_clear_flush_young(struct vm_area_struct *vma, unsigned long address, pte_t *ptep); #endif #ifndef __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH #ifdef CONFIG_TRANSPARENT_HUGEPAGE extern int pmdp_clear_flush_young(struct vm_area_struct *vma, unsigned long address, pmd_t *pmdp); #else /* * Despite relevant to THP only, this API is called from generic rmap code * under PageTransHuge(), hence needs a dummy implementation for !THP */ static inline int pmdp_clear_flush_young(struct vm_area_struct *vma, unsigned long address, pmd_t *pmdp) { BUILD_BUG(); return 0; } #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ #endif #ifndef arch_has_hw_nonleaf_pmd_young /* * Return whether the accessed bit in non-leaf PMD entries is supported on the * local CPU. */ static inline bool arch_has_hw_nonleaf_pmd_young(void) { return IS_ENABLED(CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG); } #endif #ifndef arch_has_hw_pte_young /* * Return whether the accessed bit is supported on the local CPU. * * This stub assumes accessing through an old PTE triggers a page fault. * Architectures that automatically set the access bit should overwrite it. */ static inline bool arch_has_hw_pte_young(void) { return IS_ENABLED(CONFIG_ARCH_HAS_HW_PTE_YOUNG); } #endif #ifndef exec_folio_order /* * Returns preferred minimum folio order for executable file-backed memory. Must * be in range [0, PMD_ORDER). Default to order-0. */ static inline unsigned int exec_folio_order(void) { return 0; } #endif #ifndef arch_check_zapped_pte static inline void arch_check_zapped_pte(struct vm_area_struct *vma, pte_t pte) { } #endif #ifndef arch_check_zapped_pmd static inline void arch_check_zapped_pmd(struct vm_area_struct *vma, pmd_t pmd) { } #endif #ifndef arch_check_zapped_pud static inline void arch_check_zapped_pud(struct vm_area_struct *vma, pud_t pud) { } #endif #ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long address, pte_t *ptep) { pte_t pte = ptep_get(ptep); pte_clear(mm, address, ptep); page_table_check_pte_clear(mm, pte); return pte; } #endif #ifndef clear_young_dirty_ptes /** * clear_young_dirty_ptes - Mark PTEs that map consecutive pages of the * same folio as old/clean. * @mm: Address space the pages are mapped into. * @addr: Address the first page is mapped at. * @ptep: Page table pointer for the first entry. * @nr: Number of entries to mark old/clean. * @flags: Flags to modify the PTE batch semantics. * * May be overridden by the architecture; otherwise, implemented by * get_and_clear/modify/set for each pte in the range. * * Note that PTE bits in the PTE range besides the PFN can differ. For example, * some PTEs might be write-protected. * * Context: The caller holds the page table lock. The PTEs map consecutive * pages that belong to the same folio. The PTEs are all in the same PMD. */ static inline void clear_young_dirty_ptes(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep, unsigned int nr, cydp_t flags) { pte_t pte; for (;;) { if (flags == CYDP_CLEAR_YOUNG) ptep_test_and_clear_young(vma, addr, ptep); else { pte = ptep_get_and_clear(vma->vm_mm, addr, ptep); if (flags & CYDP_CLEAR_YOUNG) pte = pte_mkold(pte); if (flags & CYDP_CLEAR_DIRTY) pte = pte_mkclean(pte); set_pte_at(vma->vm_mm, addr, ptep, pte); } if (--nr == 0) break; ptep++; addr += PAGE_SIZE; } } #endif static inline void ptep_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { pte_t pte = ptep_get(ptep); pte_clear(mm, addr, ptep); /* * No need for ptep_get_and_clear(): page table check doesn't care about * any bits that could have been set by HW concurrently. */ page_table_check_pte_clear(mm, pte); } #ifdef CONFIG_GUP_GET_PXX_LOW_HIGH /* * For walking the pagetables without holding any locks. Some architectures * (eg x86-32 PAE) cannot load the entries atomically without using expensive * instructions. We are guaranteed that a PTE will only either go from not * present to present, or present to not present -- it will not switch to a * completely different present page without a TLB flush inbetween; which we * are blocking by holding interrupts off. * * Setting ptes from not present to present goes: * * ptep->pte_high = h; * smp_wmb(); * ptep->pte_low = l; * * And present to not present goes: * * ptep->pte_low = 0; * smp_wmb(); * ptep->pte_high = 0; * * We must ensure here that the load of pte_low sees 'l' IFF pte_high sees 'h'. * We load pte_high *after* loading pte_low, which ensures we don't see an older * value of pte_high. *Then* we recheck pte_low, which ensures that we haven't * picked up a changed pte high. We might have gotten rubbish values from * pte_low and pte_high, but we are guaranteed that pte_low will not have the * present bit set *unless* it is 'l'. Because get_user_pages_fast() only * operates on present ptes we're safe. */ static inline pte_t ptep_get_lockless(pte_t *ptep) { pte_t pte; do { pte.pte_low = ptep->pte_low; smp_rmb(); pte.pte_high = ptep->pte_high; smp_rmb(); } while (unlikely(pte.pte_low != ptep->pte_low)); return pte; } #define ptep_get_lockless ptep_get_lockless #if CONFIG_PGTABLE_LEVELS > 2 static inline pmd_t pmdp_get_lockless(pmd_t *pmdp) { pmd_t pmd; do { pmd.pmd_low = pmdp->pmd_low; smp_rmb(); pmd.pmd_high = pmdp->pmd_high; smp_rmb(); } while (unlikely(pmd.pmd_low != pmdp->pmd_low)); return pmd; } #define pmdp_get_lockless pmdp_get_lockless #define pmdp_get_lockless_sync() tlb_remove_table_sync_one() #endif /* CONFIG_PGTABLE_LEVELS > 2 */ #endif /* CONFIG_GUP_GET_PXX_LOW_HIGH */ /* * We require that the PTE can be read atomically. */ #ifndef ptep_get_lockless static inline pte_t ptep_get_lockless(pte_t *ptep) { return ptep_get(ptep); } #endif #ifndef pmdp_get_lockless static inline pmd_t pmdp_get_lockless(pmd_t *pmdp) { return pmdp_get(pmdp); } static inline void pmdp_get_lockless_sync(void) { } #endif #ifdef CONFIG_TRANSPARENT_HUGEPAGE #ifndef __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm, unsigned long address, pmd_t *pmdp) { pmd_t pmd = *pmdp; pmd_clear(pmdp); page_table_check_pmd_clear(mm, pmd); return pmd; } #endif /* __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR */ #ifndef __HAVE_ARCH_PUDP_HUGE_GET_AND_CLEAR static inline pud_t pudp_huge_get_and_clear(struct mm_struct *mm, unsigned long address, pud_t *pudp) { pud_t pud = *pudp; pud_clear(pudp); page_table_check_pud_clear(mm, pud); return pud; } #endif /* __HAVE_ARCH_PUDP_HUGE_GET_AND_CLEAR */ #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ #ifdef CONFIG_TRANSPARENT_HUGEPAGE #ifndef __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR_FULL static inline pmd_t pmdp_huge_get_and_clear_full(struct vm_area_struct *vma, unsigned long address, pmd_t *pmdp, int full) { return pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp); } #endif #ifndef __HAVE_ARCH_PUDP_HUGE_GET_AND_CLEAR_FULL static inline pud_t pudp_huge_get_and_clear_full(struct vm_area_struct *vma, unsigned long address, pud_t *pudp, int full) { return pudp_huge_get_and_clear(vma->vm_mm, address, pudp); } #endif #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ #ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, unsigned long address, pte_t *ptep, int full) { return ptep_get_and_clear(mm, address, ptep); } #endif #ifndef get_and_clear_full_ptes /** * get_and_clear_full_ptes - Clear present PTEs that map consecutive pages of * the same folio, collecting dirty/accessed bits. * @mm: Address space the pages are mapped into. * @addr: Address the first page is mapped at. * @ptep: Page table pointer for the first entry. * @nr: Number of entries to clear. * @full: Whether we are clearing a full mm. * * May be overridden by the architecture; otherwise, implemented as a simple * loop over ptep_get_and_clear_full(), merging dirty/accessed bits into the * returned PTE. * * Note that PTE bits in the PTE range besides the PFN can differ. For example, * some PTEs might be write-protected. * * Context: The caller holds the page table lock. The PTEs map consecutive * pages that belong to the same folio. The PTEs are all in the same PMD. */ static inline pte_t get_and_clear_full_ptes(struct mm_struct *mm, unsigned long addr, pte_t *ptep, unsigned int nr, int full) { pte_t pte, tmp_pte; pte = ptep_get_and_clear_full(mm, addr, ptep, full); while (--nr) { ptep++; addr += PAGE_SIZE; tmp_pte = ptep_get_and_clear_full(mm, addr, ptep, full); if (pte_dirty(tmp_pte)) pte = pte_mkdirty(pte); if (pte_young(tmp_pte)) pte = pte_mkyoung(pte); } return pte; } #endif /** * get_and_clear_ptes - Clear present PTEs that map consecutive pages of * the same folio, collecting dirty/accessed bits. * @mm: Address space the pages are mapped into. * @addr: Address the first page is mapped at. * @ptep: Page table pointer for the first entry. * @nr: Number of entries to clear. * * Use this instead of get_and_clear_full_ptes() if it is known that we don't * need to clear the full mm, which is mostly the case. * * Note that PTE bits in the PTE range besides the PFN can differ. For example, * some PTEs might be write-protected. * * Context: The caller holds the page table lock. The PTEs map consecutive * pages that belong to the same folio. The PTEs are all in the same PMD. */ static inline pte_t get_and_clear_ptes(struct mm_struct *mm, unsigned long addr, pte_t *ptep, unsigned int nr) { return get_and_clear_full_ptes(mm, addr, ptep, nr, 0); } #ifndef clear_full_ptes /** * clear_full_ptes - Clear present PTEs that map consecutive pages of the same * folio. * @mm: Address space the pages are mapped into. * @addr: Address the first page is mapped at. * @ptep: Page table pointer for the first entry. * @nr: Number of entries to clear. * @full: Whether we are clearing a full mm. * * May be overridden by the architecture; otherwise, implemented as a simple * loop over ptep_get_and_clear_full(). * * Note that PTE bits in the PTE range besides the PFN can differ. For example, * some PTEs might be write-protected. * * Context: The caller holds the page table lock. The PTEs map consecutive * pages that belong to the same folio. The PTEs are all in the same PMD. */ static inline void clear_full_ptes(struct mm_struct *mm, unsigned long addr, pte_t *ptep, unsigned int nr, int full) { for (;;) { ptep_get_and_clear_full(mm, addr, ptep, full); if (--nr == 0) break; ptep++; addr += PAGE_SIZE; } } #endif /** * clear_ptes - Clear present PTEs that map consecutive pages of the same folio. * @mm: Address space the pages are mapped into. * @addr: Address the first page is mapped at. * @ptep: Page table pointer for the first entry. * @nr: Number of entries to clear. * * Use this instead of clear_full_ptes() if it is known that we don't need to * clear the full mm, which is mostly the case. * * Note that PTE bits in the PTE range besides the PFN can differ. For example, * some PTEs might be write-protected. * * Context: The caller holds the page table lock. The PTEs map consecutive * pages that belong to the same folio. The PTEs are all in the same PMD. */ static inline void clear_ptes(struct mm_struct *mm, unsigned long addr, pte_t *ptep, unsigned int nr) { clear_full_ptes(mm, addr, ptep, nr, 0); } /* * If two threads concurrently fault at the same page, the thread that * won the race updates the PTE and its local TLB/Cache. The other thread * gives up, simply does nothing, and continues; on architectures where * software can update TLB, local TLB can be updated here to avoid next page * fault. This function updates TLB only, do nothing with cache or others. * It is the difference with function update_mmu_cache. */ #ifndef update_mmu_tlb_range static inline void update_mmu_tlb_range(struct vm_area_struct *vma, unsigned long address, pte_t *ptep, unsigned int nr) { } #endif static inline void update_mmu_tlb(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) { update_mmu_tlb_range(vma, address, ptep, 1); } /* * Some architectures may be able to avoid expensive synchronization * primitives when modifications are made to PTE's which are already * not present, or in the process of an address space destruction. */ #ifndef __HAVE_ARCH_PTE_CLEAR_NOT_PRESENT_FULL static inline void pte_clear_not_present_full(struct mm_struct *mm, unsigned long address, pte_t *ptep, int full) { pte_clear(mm, address, ptep); } #endif #ifndef clear_not_present_full_ptes /** * clear_not_present_full_ptes - Clear multiple not present PTEs which are * consecutive in the pgtable. * @mm: Address space the ptes represent. * @addr: Address of the first pte. * @ptep: Page table pointer for the first entry. * @nr: Number of entries to clear. * @full: Whether we are clearing a full mm. * * May be overridden by the architecture; otherwise, implemented as a simple * loop over pte_clear_not_present_full(). * * Context: The caller holds the page table lock. The PTEs are all not present. * The PTEs are all in the same PMD. */ static inline void clear_not_present_full_ptes(struct mm_struct *mm, unsigned long addr, pte_t *ptep, unsigned int nr, int full) { for (;;) { pte_clear_not_present_full(mm, addr, ptep, full); if (--nr == 0) break; ptep++; addr += PAGE_SIZE; } } #endif #ifndef __HAVE_ARCH_PTEP_CLEAR_FLUSH extern pte_t ptep_clear_flush(struct vm_area_struct *vma, unsigned long address, pte_t *ptep); #endif #ifndef __HAVE_ARCH_PMDP_HUGE_CLEAR_FLUSH extern pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma, unsigned long address, pmd_t *pmdp); extern pud_t pudp_huge_clear_flush(struct vm_area_struct *vma, unsigned long address, pud_t *pudp); #endif #ifndef pte_mkwrite static inline pte_t pte_mkwrite(pte_t pte, struct vm_area_struct *vma) { return pte_mkwrite_novma(pte); } #endif #if defined(CONFIG_ARCH_WANT_PMD_MKWRITE) && !defined(pmd_mkwrite) static inline pmd_t pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma) { return pmd_mkwrite_novma(pmd); } #endif #ifndef __HAVE_ARCH_PTEP_SET_WRPROTECT struct mm_struct; static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long address, pte_t *ptep) { pte_t old_pte = ptep_get(ptep); set_pte_at(mm, address, ptep, pte_wrprotect(old_pte)); } #endif #ifndef wrprotect_ptes /** * wrprotect_ptes - Write-protect PTEs that map consecutive pages of the same * folio. * @mm: Address space the pages are mapped into. * @addr: Address the first page is mapped at. * @ptep: Page table pointer for the first entry. * @nr: Number of entries to write-protect. * * May be overridden by the architecture; otherwise, implemented as a simple * loop over ptep_set_wrprotect(). * * Note that PTE bits in the PTE range besides the PFN can differ. For example, * some PTEs might be write-protected. * * Context: The caller holds the page table lock. The PTEs map consecutive * pages that belong to the same folio. The PTEs are all in the same PMD. */ static inline void wrprotect_ptes(struct mm_struct *mm, unsigned long addr, pte_t *ptep, unsigned int nr) { for (;;) { ptep_set_wrprotect(mm, addr, ptep); if (--nr == 0) break; ptep++; addr += PAGE_SIZE; } } #endif /* * On some architectures hardware does not set page access bit when accessing * memory page, it is responsibility of software setting this bit. It brings * out extra page fault penalty to track page access bit. For optimization page * access bit can be set during all page fault flow on these arches. * To be differentiate with macro pte_mkyoung, this macro is used on platforms * where software maintains page access bit. */ #ifndef pte_sw_mkyoung static inline pte_t pte_sw_mkyoung(pte_t pte) { return pte; } #define pte_sw_mkyoung pte_sw_mkyoung #endif #ifndef __HAVE_ARCH_PMDP_SET_WRPROTECT #ifdef CONFIG_TRANSPARENT_HUGEPAGE static inline void pmdp_set_wrprotect(struct mm_struct *mm, unsigned long address, pmd_t *pmdp) { pmd_t old_pmd = *pmdp; set_pmd_at(mm, address, pmdp, pmd_wrprotect(old_pmd)); } #else static inline void pmdp_set_wrprotect(struct mm_struct *mm, unsigned long address, pmd_t *pmdp) { BUILD_BUG(); } #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ #endif #ifndef __HAVE_ARCH_PUDP_SET_WRPROTECT #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD #ifdef CONFIG_TRANSPARENT_HUGEPAGE static inline void pudp_set_wrprotect(struct mm_struct *mm, unsigned long address, pud_t *pudp) { pud_t old_pud = *pudp; set_pud_at(mm, address, pudp, pud_wrprotect(old_pud)); } #else static inline void pudp_set_wrprotect(struct mm_struct *mm, unsigned long address, pud_t *pudp) { BUILD_BUG(); } #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */ #endif #ifndef pmdp_collapse_flush #ifdef CONFIG_TRANSPARENT_HUGEPAGE extern pmd_t pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long address, pmd_t *pmdp); #else static inline pmd_t pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long address, pmd_t *pmdp) { BUILD_BUG(); return *pmdp; } #define pmdp_collapse_flush pmdp_collapse_flush #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ #endif #ifndef __HAVE_ARCH_PGTABLE_DEPOSIT extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp, pgtable_t pgtable); #endif #ifndef __HAVE_ARCH_PGTABLE_WITHDRAW extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp); #endif #ifndef arch_needs_pgtable_deposit #define arch_needs_pgtable_deposit() (false) #endif #ifdef CONFIG_TRANSPARENT_HUGEPAGE /* * This is an implementation of pmdp_establish() that is only suitable for an * architecture that doesn't have hardware dirty/accessed bits. In this case we * can't race with CPU which sets these bits and non-atomic approach is fine. */ static inline pmd_t generic_pmdp_establish(struct vm_area_struct *vma, unsigned long address, pmd_t *pmdp, pmd_t pmd) { pmd_t old_pmd = *pmdp; set_pmd_at(vma->vm_mm, address, pmdp, pmd); return old_pmd; } #endif #ifndef __HAVE_ARCH_PMDP_INVALIDATE extern pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address, pmd_t *pmdp); #endif #ifndef __HAVE_ARCH_PMDP_INVALIDATE_AD /* * pmdp_invalidate_ad() invalidates the PMD while changing a transparent * hugepage mapping in the page tables. This function is similar to * pmdp_invalidate(), but should only be used if the access and dirty bits would * not be cleared by the software in the new PMD value. The function ensures * that hardware changes of the access and dirty bits updates would not be lost. * * Doing so can allow in certain architectures to avoid a TLB flush in most * cases. Yet, another TLB flush might be necessary later if the PMD update * itself requires such flush (e.g., if protection was set to be stricter). Yet, * even when a TLB flush is needed because of the update, the caller may be able * to batch these TLB flushing operations, so fewer TLB flush operations are * needed. */ extern pmd_t pmdp_invalidate_ad(struct vm_area_struct *vma, unsigned long address, pmd_t *pmdp); #endif #ifndef __HAVE_ARCH_PTE_SAME static inline int pte_same(pte_t pte_a, pte_t pte_b) { return pte_val(pte_a) == pte_val(pte_b); } #endif #ifndef __HAVE_ARCH_PTE_UNUSED /* * Some architectures provide facilities to virtualization guests * so that they can flag allocated pages as unused. This allows the * host to transparently reclaim unused pages. This function returns * whether the pte's page is unused. */ static inline int pte_unused(pte_t pte) { return 0; } #endif #ifndef pte_access_permitted #define pte_access_permitted(pte, write) \ (pte_present(pte) && (!(write) || pte_write(pte))) #endif #ifndef pmd_access_permitted #define pmd_access_permitted(pmd, write) \ (pmd_present(pmd) && (!(write) || pmd_write(pmd))) #endif #ifndef pud_access_permitted #define pud_access_permitted(pud, write) \ (pud_present(pud) && (!(write) || pud_write(pud))) #endif #ifndef p4d_access_permitted #define p4d_access_permitted(p4d, write) \ (p4d_present(p4d) && (!(write) || p4d_write(p4d))) #endif #ifndef pgd_access_permitted #define pgd_access_permitted(pgd, write) \ (pgd_present(pgd) && (!(write) || pgd_write(pgd))) #endif #ifndef __HAVE_ARCH_PMD_SAME static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b) { return pmd_val(pmd_a) == pmd_val(pmd_b); } #endif #ifndef pud_same static inline int pud_same(pud_t pud_a, pud_t pud_b) { return pud_val(pud_a) == pud_val(pud_b); } #define pud_same pud_same #endif #ifndef __HAVE_ARCH_P4D_SAME static inline int p4d_same(p4d_t p4d_a, p4d_t p4d_b) { return p4d_val(p4d_a) == p4d_val(p4d_b); } #endif #ifndef __HAVE_ARCH_PGD_SAME static inline int pgd_same(pgd_t pgd_a, pgd_t pgd_b) { return pgd_val(pgd_a) == pgd_val(pgd_b); } #endif #ifndef __HAVE_ARCH_DO_SWAP_PAGE static inline void arch_do_swap_page_nr(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, pte_t pte, pte_t oldpte, int nr) { } #else /* * Some architectures support metadata associated with a page. When a * page is being swapped out, this metadata must be saved so it can be * restored when the page is swapped back in. SPARC M7 and newer * processors support an ADI (Application Data Integrity) tag for the * page as metadata for the page. arch_do_swap_page() can restore this * metadata when a page is swapped back in. */ static inline void arch_do_swap_page_nr(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, pte_t pte, pte_t oldpte, int nr) { for (int i = 0; i < nr; i++) { arch_do_swap_page(vma->vm_mm, vma, addr + i * PAGE_SIZE, pte_advance_pfn(pte, i), pte_advance_pfn(oldpte, i)); } } #endif #ifndef __HAVE_ARCH_UNMAP_ONE /* * Some architectures support metadata associated with a page. When a * page is being swapped out, this metadata must be saved so it can be * restored when the page is swapped back in. SPARC M7 and newer * processors support an ADI (Application Data Integrity) tag for the * page as metadata for the page. arch_unmap_one() can save this * metadata on a swap-out of a page. */ static inline int arch_unmap_one(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, pte_t orig_pte) { return 0; } #endif /* * Allow architectures to preserve additional metadata associated with * swapped-out pages. The corresponding __HAVE_ARCH_SWAP_* macros and function * prototypes must be defined in the arch-specific asm/pgtable.h file. */ #ifndef __HAVE_ARCH_PREPARE_TO_SWAP static inline int arch_prepare_to_swap(struct folio *folio) { return 0; } #endif #ifndef __HAVE_ARCH_SWAP_INVALIDATE static inline void arch_swap_invalidate_page(int type, pgoff_t offset) { } static inline void arch_swap_invalidate_area(int type) { } #endif #ifndef __HAVE_ARCH_SWAP_RESTORE static inline void arch_swap_restore(swp_entry_t entry, struct folio *folio) { } #endif #ifndef __HAVE_ARCH_MOVE_PTE #define move_pte(pte, old_addr, new_addr) (pte) #endif #ifndef pte_accessible # define pte_accessible(mm, pte) ((void)(pte), 1) #endif #ifndef flush_tlb_fix_spurious_fault #define flush_tlb_fix_spurious_fault(vma, address, ptep) flush_tlb_page(vma, address) #endif /* * When walking page tables, get the address of the next boundary, * or the end address of the range if that comes earlier. Although no * vma end wraps to 0, rounded up __boundary may wrap to 0 throughout. */ #define pgd_addr_end(addr, end) \ ({ unsigned long __boundary = ((addr) + PGDIR_SIZE) & PGDIR_MASK; \ (__boundary - 1 < (end) - 1)? __boundary: (end); \ }) #ifndef p4d_addr_end #define p4d_addr_end(addr, end) \ ({ unsigned long __boundary = ((addr) + P4D_SIZE) & P4D_MASK; \ (__boundary - 1 < (end) - 1)? __boundary: (end); \ }) #endif #ifndef pud_addr_end #define pud_addr_end(addr, end) \ ({ unsigned long __boundary = ((addr) + PUD_SIZE) & PUD_MASK; \ (__boundary - 1 < (end) - 1)? __boundary: (end); \ }) #endif #ifndef pmd_addr_end #define pmd_addr_end(addr, end) \ ({ unsigned long __boundary = ((addr) + PMD_SIZE) & PMD_MASK; \ (__boundary - 1 < (end) - 1)? __boundary: (end); \ }) #endif /* * When walking page tables, we usually want to skip any p?d_none entries; * and any p?d_bad entries - reporting the error before resetting to none. * Do the tests inline, but report and clear the bad entry in mm/memory.c. */ void pgd_clear_bad(pgd_t *); #ifndef __PAGETABLE_P4D_FOLDED void p4d_clear_bad(p4d_t *); #else #define p4d_clear_bad(p4d) do { } while (0) #endif #ifndef __PAGETABLE_PUD_FOLDED void pud_clear_bad(pud_t *); #else #define pud_clear_bad(p4d) do { } while (0) #endif void pmd_clear_bad(pmd_t *); static inline int pgd_none_or_clear_bad(pgd_t *pgd) { if (pgd_none(*pgd)) return 1; if (unlikely(pgd_bad(*pgd))) { pgd_clear_bad(pgd); return 1; } return 0; } static inline int p4d_none_or_clear_bad(p4d_t *p4d) { if (p4d_none(*p4d)) return 1; if (unlikely(p4d_bad(*p4d))) { p4d_clear_bad(p4d); return 1; } return 0; } static inline int pud_none_or_clear_bad(pud_t *pud) { if (pud_none(*pud)) return 1; if (unlikely(pud_bad(*pud))) { pud_clear_bad(pud); return 1; } return 0; } static inline int pmd_none_or_clear_bad(pmd_t *pmd) { if (pmd_none(*pmd)) return 1; if (unlikely(pmd_bad(*pmd))) { pmd_clear_bad(pmd); return 1; } return 0; } static inline pte_t __ptep_modify_prot_start(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) { /* * Get the current pte state, but zero it out to make it * non-present, preventing the hardware from asynchronously * updating it. */ return ptep_get_and_clear(vma->vm_mm, addr, ptep); } static inline void __ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep, pte_t pte) { /* * The pte is non-present, so there's no hardware state to * preserve. */ set_pte_at(vma->vm_mm, addr, ptep, pte); } #ifndef __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION /* * Start a pte protection read-modify-write transaction, which * protects against asynchronous hardware modifications to the pte. * The intention is not to prevent the hardware from making pte * updates, but to prevent any updates it may make from being lost. * * This does not protect against other software modifications of the * pte; the appropriate pte lock must be held over the transaction. * * Note that this interface is intended to be batchable, meaning that * ptep_modify_prot_commit may not actually update the pte, but merely * queue the update to be done at some later time. The update must be * actually committed before the pte lock is released, however. */ static inline pte_t ptep_modify_prot_start(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) { return __ptep_modify_prot_start(vma, addr, ptep); } /* * Commit an update to a pte, leaving any hardware-controlled bits in * the PTE unmodified. The pte returned from ptep_modify_prot_start() may * additionally have young and/or dirty bits set where previously they were not, * so the updated pte may have these additional changes. */ static inline void ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep, pte_t old_pte, pte_t pte) { __ptep_modify_prot_commit(vma, addr, ptep, pte); } #endif /* __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION */ /** * modify_prot_start_ptes - Start a pte protection read-modify-write transaction * over a batch of ptes, which protects against asynchronous hardware * modifications to the ptes. The intention is not to prevent the hardware from * making pte updates, but to prevent any updates it may make from being lost. * Please see the comment above ptep_modify_prot_start() for full description. * * @vma: The virtual memory area the pages are mapped into. * @addr: Address the first page is mapped at. * @ptep: Page table pointer for the first entry. * @nr: Number of entries. * * May be overridden by the architecture; otherwise, implemented as a simple * loop over ptep_modify_prot_start(), collecting the a/d bits from each pte * in the batch. * * Note that PTE bits in the PTE batch besides the PFN can differ. * * Context: The caller holds the page table lock. The PTEs map consecutive * pages that belong to the same folio. All other PTE bits must be identical for * all PTEs in the batch except for young and dirty bits. The PTEs are all in * the same PMD. */ #ifndef modify_prot_start_ptes static inline pte_t modify_prot_start_ptes(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep, unsigned int nr) { pte_t pte, tmp_pte; pte = ptep_modify_prot_start(vma, addr, ptep); while (--nr) { ptep++; addr += PAGE_SIZE; tmp_pte = ptep_modify_prot_start(vma, addr, ptep); if (pte_dirty(tmp_pte)) pte = pte_mkdirty(pte); if (pte_young(tmp_pte)) pte = pte_mkyoung(pte); } return pte; } #endif /** * modify_prot_commit_ptes - Commit an update to a batch of ptes, leaving any * hardware-controlled bits in the PTE unmodified. * * @vma: The virtual memory area the pages are mapped into. * @addr: Address the first page is mapped at. * @ptep: Page table pointer for the first entry. * @old_pte: Old page table entry (for the first entry) which is now cleared. * @pte: New page table entry to be set. * @nr: Number of entries. * * May be overridden by the architecture; otherwise, implemented as a simple * loop over ptep_modify_prot_commit(). * * Context: The caller holds the page table lock. The PTEs are all in the same * PMD. On exit, the set ptes in the batch map the same folio. The ptes set by * ptep_modify_prot_start() may additionally have young and/or dirty bits set * where previously they were not, so the updated ptes may have these * additional changes. */ #ifndef modify_prot_commit_ptes static inline void modify_prot_commit_ptes(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep, pte_t old_pte, pte_t pte, unsigned int nr) { int i; for (i = 0; i < nr; ++i, ++ptep, addr += PAGE_SIZE) { ptep_modify_prot_commit(vma, addr, ptep, old_pte, pte); /* Advance PFN only, set same prot */ old_pte = pte_next_pfn(old_pte); pte = pte_next_pfn(pte); } } #endif #endif /* CONFIG_MMU */ /* * No-op macros that just return the current protection value. Defined here * because these macros can be used even if CONFIG_MMU is not defined. */ #ifndef pgprot_nx #define pgprot_nx(prot) (prot) #endif #ifndef pgprot_noncached #define pgprot_noncached(prot) (prot) #endif #ifndef pgprot_writecombine #define pgprot_writecombine pgprot_noncached #endif #ifndef pgprot_writethrough #define pgprot_writethrough pgprot_noncached #endif #ifndef pgprot_device #define pgprot_device pgprot_noncached #endif #ifndef pgprot_mhp #define pgprot_mhp(prot) (prot) #endif #ifdef CONFIG_MMU #ifndef pgprot_modify #define pgprot_modify pgprot_modify static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot) { if (pgprot_val(oldprot) == pgprot_val(pgprot_noncached(oldprot))) newprot = pgprot_noncached(newprot); if (pgprot_val(oldprot) == pgprot_val(pgprot_writecombine(oldprot))) newprot = pgprot_writecombine(newprot); if (pgprot_val(oldprot) == pgprot_val(pgprot_device(oldprot))) newprot = pgprot_device(newprot); return newprot; } #endif #endif /* CONFIG_MMU */ #ifndef pgprot_encrypted #define pgprot_encrypted(prot) (prot) #endif #ifndef pgprot_decrypted #define pgprot_decrypted(prot) (prot) #endif /* * A facility to provide batching of the reload of page tables and * other process state with the actual context switch code for * paravirtualized guests. By convention, only one of the batched * update (lazy) modes (CPU, MMU) should be active at any given time, * entry should never be nested, and entry and exits should always be * paired. This is for sanity of maintaining and reasoning about the * kernel code. In this case, the exit (end of the context switch) is * in architecture-specific code, and so doesn't need a generic * definition. */ #ifndef __HAVE_ARCH_START_CONTEXT_SWITCH #define arch_start_context_switch(prev) do {} while (0) #endif #ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY #ifndef CONFIG_ARCH_ENABLE_THP_MIGRATION static inline pmd_t pmd_swp_mksoft_dirty(pmd_t pmd) { return pmd; } static inline int pmd_swp_soft_dirty(pmd_t pmd) { return 0; } static inline pmd_t pmd_swp_clear_soft_dirty(pmd_t pmd) { return pmd; } #endif #else /* !CONFIG_HAVE_ARCH_SOFT_DIRTY */ static inline int pte_soft_dirty(pte_t pte) { return 0; } static inline int pmd_soft_dirty(pmd_t pmd) { return 0; } static inline pte_t pte_mksoft_dirty(pte_t pte) { return pte; } static inline pmd_t pmd_mksoft_dirty(pmd_t pmd) { return pmd; } static inline pte_t pte_clear_soft_dirty(pte_t pte) { return pte; } static inline pmd_t pmd_clear_soft_dirty(pmd_t pmd) { return pmd; } static inline pte_t pte_swp_mksoft_dirty(pte_t pte) { return pte; } static inline int pte_swp_soft_dirty(pte_t pte) { return 0; } static inline pte_t pte_swp_clear_soft_dirty(pte_t pte) { return pte; } static inline pmd_t pmd_swp_mksoft_dirty(pmd_t pmd) { return pmd; } static inline int pmd_swp_soft_dirty(pmd_t pmd) { return 0; } static inline pmd_t pmd_swp_clear_soft_dirty(pmd_t pmd) { return pmd; } #endif #ifndef __HAVE_PFNMAP_TRACKING /* * Interfaces that can be used by architecture code to keep track of * memory type of pfn mappings specified by the remap_pfn_range, * vmf_insert_pfn. */ static inline int pfnmap_setup_cachemode(unsigned long pfn, unsigned long size, pgprot_t *prot) { return 0; } static inline int pfnmap_track(unsigned long pfn, unsigned long size, pgprot_t *prot) { return 0; } static inline void pfnmap_untrack(unsigned long pfn, unsigned long size) { } #else /** * pfnmap_setup_cachemode - setup the cachemode in the pgprot for a pfn range * @pfn: the start of the pfn range * @size: the size of the pfn range in bytes * @prot: the pgprot to modify * * Lookup the cachemode for the pfn range starting at @pfn with the size * @size and store it in @prot, leaving other data in @prot unchanged. * * This allows for a hardware implementation to have fine-grained control of * memory cache behavior at page level granularity. Without a hardware * implementation, this function does nothing. * * Currently there is only one implementation for this - x86 Page Attribute * Table (PAT). See Documentation/arch/x86/pat.rst for more details. * * This function can fail if the pfn range spans pfns that require differing * cachemodes. If the pfn range was previously verified to have a single * cachemode, it is sufficient to query only a single pfn. The assumption is * that this is the case for drivers using the vmf_insert_pfn*() interface. * * Returns 0 on success and -EINVAL on error. */ int pfnmap_setup_cachemode(unsigned long pfn, unsigned long size, pgprot_t *prot); /** * pfnmap_track - track a pfn range * @pfn: the start of the pfn range * @size: the size of the pfn range in bytes * @prot: the pgprot to track * * Requested the pfn range to be 'tracked' by a hardware implementation and * setup the cachemode in @prot similar to pfnmap_setup_cachemode(). * * This allows for fine-grained control of memory cache behaviour at page * level granularity. Tracking memory this way is persisted across VMA splits * (VMA merging does not apply for VM_PFNMAP). * * Currently, there is only one implementation for this - x86 Page Attribute * Table (PAT). See Documentation/arch/x86/pat.rst for more details. * * Returns 0 on success and -EINVAL on error. */ int pfnmap_track(unsigned long pfn, unsigned long size, pgprot_t *prot); /** * pfnmap_untrack - untrack a pfn range * @pfn: the start of the pfn range * @size: the size of the pfn range in bytes * * Untrack a pfn range previously tracked through pfnmap_track(). */ void pfnmap_untrack(unsigned long pfn, unsigned long size); #endif /** * pfnmap_setup_cachemode_pfn - setup the cachemode in the pgprot for a pfn * @pfn: the pfn * @prot: the pgprot to modify * * Lookup the cachemode for @pfn and store it in @prot, leaving other * data in @prot unchanged. * * See pfnmap_setup_cachemode() for details. */ static inline void pfnmap_setup_cachemode_pfn(unsigned long pfn, pgprot_t *prot) { pfnmap_setup_cachemode(pfn, PAGE_SIZE, prot); } #ifdef CONFIG_MMU #ifdef __HAVE_COLOR_ZERO_PAGE static inline int is_zero_pfn(unsigned long pfn) { extern unsigned long zero_pfn; unsigned long offset_from_zero_pfn = pfn - zero_pfn; return offset_from_zero_pfn <= (zero_page_mask >> PAGE_SHIFT); } #define my_zero_pfn(addr) page_to_pfn(ZERO_PAGE(addr)) #else static inline int is_zero_pfn(unsigned long pfn) { extern unsigned long zero_pfn; return pfn == zero_pfn; } static inline unsigned long my_zero_pfn(unsigned long addr) { extern unsigned long zero_pfn; return zero_pfn; } #endif #else static inline int is_zero_pfn(unsigned long pfn) { return 0; } static inline unsigned long my_zero_pfn(unsigned long addr) { return 0; } #endif /* CONFIG_MMU */ #ifdef CONFIG_MMU #ifndef CONFIG_TRANSPARENT_HUGEPAGE static inline int pmd_trans_huge(pmd_t pmd) { return 0; } #ifndef pmd_write static inline int pmd_write(pmd_t pmd) { BUG(); return 0; } #endif /* pmd_write */ #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ #ifndef pud_write static inline int pud_write(pud_t pud) { BUG(); return 0; } #endif /* pud_write */ #if !defined(CONFIG_TRANSPARENT_HUGEPAGE) || \ !defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD) static inline int pud_trans_huge(pud_t pud) { return 0; } #endif static inline int pud_trans_unstable(pud_t *pud) { #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && \ defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD) pud_t pudval = READ_ONCE(*pud); if (pud_none(pudval) || pud_trans_huge(pudval)) return 1; if (unlikely(pud_bad(pudval))) { pud_clear_bad(pud); return 1; } #endif return 0; } #ifndef CONFIG_NUMA_BALANCING /* * In an inaccessible (PROT_NONE) VMA, pte_protnone() may indicate "yes". It is * perfectly valid to indicate "no" in that case, which is why our default * implementation defaults to "always no". * * In an accessible VMA, however, pte_protnone() reliably indicates PROT_NONE * page protection due to NUMA hinting. NUMA hinting faults only apply in * accessible VMAs. * * So, to reliably identify PROT_NONE PTEs that require a NUMA hinting fault, * looking at the VMA accessibility is sufficient. */ static inline int pte_protnone(pte_t pte) { return 0; } static inline int pmd_protnone(pmd_t pmd) { return 0; } #endif /* CONFIG_NUMA_BALANCING */ #endif /* CONFIG_MMU */ #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP #ifndef __PAGETABLE_P4D_FOLDED int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot); void p4d_clear_huge(p4d_t *p4d); #else static inline int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot) { return 0; } static inline void p4d_clear_huge(p4d_t *p4d) { } #endif /* !__PAGETABLE_P4D_FOLDED */ int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot); int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot); int pud_clear_huge(pud_t *pud); int pmd_clear_huge(pmd_t *pmd); int p4d_free_pud_page(p4d_t *p4d, unsigned long addr); int pud_free_pmd_page(pud_t *pud, unsigned long addr); int pmd_free_pte_page(pmd_t *pmd, unsigned long addr); #else /* !CONFIG_HAVE_ARCH_HUGE_VMAP */ static inline int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot) { return 0; } static inline int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot) { return 0; } static inline int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot) { return 0; } static inline void p4d_clear_huge(p4d_t *p4d) { } static inline int pud_clear_huge(pud_t *pud) { return 0; } static inline int pmd_clear_huge(pmd_t *pmd) { return 0; } static inline int p4d_free_pud_page(p4d_t *p4d, unsigned long addr) { return 0; } static inline int pud_free_pmd_page(pud_t *pud, unsigned long addr) { return 0; } static inline int pmd_free_pte_page(pmd_t *pmd, unsigned long addr) { return 0; } #endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */ #ifndef __HAVE_ARCH_FLUSH_PMD_TLB_RANGE #ifdef CONFIG_TRANSPARENT_HUGEPAGE /* * ARCHes with special requirements for evicting THP backing TLB entries can * implement this. Otherwise also, it can help optimize normal TLB flush in * THP regime. Stock flush_tlb_range() typically has optimization to nuke the * entire TLB if flush span is greater than a threshold, which will * likely be true for a single huge page. Thus a single THP flush will * invalidate the entire TLB which is not desirable. * e.g. see arch/arc: flush_pmd_tlb_range */ #define flush_pmd_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end) #define flush_pud_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end) #else #define flush_pmd_tlb_range(vma, addr, end) BUILD_BUG() #define flush_pud_tlb_range(vma, addr, end) BUILD_BUG() #endif #endif struct file; int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn, unsigned long size, pgprot_t *vma_prot); #ifndef CONFIG_X86_ESPFIX64 static inline void init_espfix_bsp(void) { } #endif extern void __init pgtable_cache_init(void); #ifndef __HAVE_ARCH_PFN_MODIFY_ALLOWED static inline bool pfn_modify_allowed(unsigned long pfn, pgprot_t prot) { return true; } static inline bool arch_has_pfn_modify_check(void) { return false; } #endif /* !_HAVE_ARCH_PFN_MODIFY_ALLOWED */ /* * Architecture PAGE_KERNEL_* fallbacks * * Some architectures don't define certain PAGE_KERNEL_* flags. This is either * because they really don't support them, or the port needs to be updated to * reflect the required functionality. Below are a set of relatively safe * fallbacks, as best effort, which we can count on in lieu of the architectures * not defining them on their own yet. */ #ifndef PAGE_KERNEL_RO # define PAGE_KERNEL_RO PAGE_KERNEL #endif #ifndef PAGE_KERNEL_EXEC # define PAGE_KERNEL_EXEC PAGE_KERNEL #endif /* * Page Table Modification bits for pgtbl_mod_mask. * * These are used by the p?d_alloc_track*() set of functions an in the generic * vmalloc/ioremap code to track at which page-table levels entries have been * modified. Based on that the code can better decide when vmalloc and ioremap * mapping changes need to be synchronized to other page-tables in the system. */ #define __PGTBL_PGD_MODIFIED 0 #define __PGTBL_P4D_MODIFIED 1 #define __PGTBL_PUD_MODIFIED 2 #define __PGTBL_PMD_MODIFIED 3 #define __PGTBL_PTE_MODIFIED 4 #define PGTBL_PGD_MODIFIED BIT(__PGTBL_PGD_MODIFIED) #define PGTBL_P4D_MODIFIED BIT(__PGTBL_P4D_MODIFIED) #define PGTBL_PUD_MODIFIED BIT(__PGTBL_PUD_MODIFIED) #define PGTBL_PMD_MODIFIED BIT(__PGTBL_PMD_MODIFIED) #define PGTBL_PTE_MODIFIED BIT(__PGTBL_PTE_MODIFIED) /* Page-Table Modification Mask */ typedef unsigned int pgtbl_mod_mask; #endif /* !__ASSEMBLY__ */ #if !defined(MAX_POSSIBLE_PHYSMEM_BITS) && !defined(CONFIG_64BIT) #ifdef CONFIG_PHYS_ADDR_T_64BIT /* * ZSMALLOC needs to know the highest PFN on 32-bit architectures * with physical address space extension, but falls back to * BITS_PER_LONG otherwise. */ #error Missing MAX_POSSIBLE_PHYSMEM_BITS definition #else #define MAX_POSSIBLE_PHYSMEM_BITS 32 #endif #endif #ifndef has_transparent_hugepage #define has_transparent_hugepage() IS_BUILTIN(CONFIG_TRANSPARENT_HUGEPAGE) #endif #ifndef has_transparent_pud_hugepage #define has_transparent_pud_hugepage() IS_BUILTIN(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD) #endif /* * On some architectures it depends on the mm if the p4d/pud or pmd * layer of the page table hierarchy is folded or not. */ #ifndef mm_p4d_folded #define mm_p4d_folded(mm) __is_defined(__PAGETABLE_P4D_FOLDED) #endif #ifndef mm_pud_folded #define mm_pud_folded(mm) __is_defined(__PAGETABLE_PUD_FOLDED) #endif #ifndef mm_pmd_folded #define mm_pmd_folded(mm) __is_defined(__PAGETABLE_PMD_FOLDED) #endif #ifndef p4d_offset_lockless #define p4d_offset_lockless(pgdp, pgd, address) p4d_offset(&(pgd), address) #endif #ifndef pud_offset_lockless #define pud_offset_lockless(p4dp, p4d, address) pud_offset(&(p4d), address) #endif #ifndef pmd_offset_lockless #define pmd_offset_lockless(pudp, pud, address) pmd_offset(&(pud), address) #endif /* * pXd_leaf() is the API to check whether a pgtable entry is a huge page * mapping. It should work globally across all archs, without any * dependency on CONFIG_* options. For architectures that do not support * huge mappings on specific levels, below fallbacks will be used. * * A leaf pgtable entry should always imply the following: * * - It is a "present" entry. IOW, before using this API, please check it * with pXd_present() first. NOTE: it may not always mean the "present * bit" is set. For example, PROT_NONE entries are always "present". * * - It should _never_ be a swap entry of any type. Above "present" check * should have guarded this, but let's be crystal clear on this. * * - It should contain a huge PFN, which points to a huge page larger than * PAGE_SIZE of the platform. The PFN format isn't important here. * * - It should cover all kinds of huge mappings (i.e. pXd_trans_huge() * or hugetlb mappings). */ #ifndef pgd_leaf #define pgd_leaf(x) false #endif #ifndef p4d_leaf #define p4d_leaf(x) false #endif #ifndef pud_leaf #define pud_leaf(x) false #endif #ifndef pmd_leaf #define pmd_leaf(x) false #endif #ifndef pgd_leaf_size #define pgd_leaf_size(x) (1ULL << PGDIR_SHIFT) #endif #ifndef p4d_leaf_size #define p4d_leaf_size(x) P4D_SIZE #endif #ifndef pud_leaf_size #define pud_leaf_size(x) PUD_SIZE #endif #ifndef pmd_leaf_size #define pmd_leaf_size(x) PMD_SIZE #endif #ifndef __pte_leaf_size #ifndef pte_leaf_size #define pte_leaf_size(x) PAGE_SIZE #endif #define __pte_leaf_size(x,y) pte_leaf_size(y) #endif /* * We always define pmd_pfn for all archs as it's used in lots of generic * code. Now it happens too for pud_pfn (and can happen for larger * mappings too in the future; we're not there yet). Instead of defining * it for all archs (like pmd_pfn), provide a fallback. * * Note that returning 0 here means any arch that didn't define this can * get severely wrong when it hits a real pud leaf. It's arch's * responsibility to properly define it when a huge pud is possible. */ #ifndef pud_pfn #define pud_pfn(x) 0 #endif /* * Some architectures have MMUs that are configurable or selectable at boot * time. These lead to variable PTRS_PER_x. For statically allocated arrays it * helps to have a static maximum value. */ #ifndef MAX_PTRS_PER_PTE #define MAX_PTRS_PER_PTE PTRS_PER_PTE #endif #ifndef MAX_PTRS_PER_PMD #define MAX_PTRS_PER_PMD PTRS_PER_PMD #endif #ifndef MAX_PTRS_PER_PUD #define MAX_PTRS_PER_PUD PTRS_PER_PUD #endif #ifndef MAX_PTRS_PER_P4D #define MAX_PTRS_PER_P4D PTRS_PER_P4D #endif #ifndef pte_pgprot #define pte_pgprot(x) ((pgprot_t) {0}) #endif #ifndef pmd_pgprot #define pmd_pgprot(x) ((pgprot_t) {0}) #endif #ifndef pud_pgprot #define pud_pgprot(x) ((pgprot_t) {0}) #endif /* description of effects of mapping type and prot in current implementation. * this is due to the limited x86 page protection hardware. The expected * behavior is in parens: * * map_type prot * PROT_NONE PROT_READ PROT_WRITE PROT_EXEC * MAP_SHARED r: (no) no r: (yes) yes r: (no) yes r: (no) yes * w: (no) no w: (no) no w: (yes) yes w: (no) no * x: (no) no x: (no) yes x: (no) yes x: (yes) yes * * MAP_PRIVATE r: (no) no r: (yes) yes r: (no) yes r: (no) yes * w: (no) no w: (no) no w: (copy) copy w: (no) no * x: (no) no x: (no) yes x: (no) yes x: (yes) yes * * On arm64, PROT_EXEC has the following behaviour for both MAP_SHARED and * MAP_PRIVATE (with Enhanced PAN supported): * r: (no) no * w: (no) no * x: (yes) yes */ #define DECLARE_VM_GET_PAGE_PROT \ pgprot_t vm_get_page_prot(vm_flags_t vm_flags) \ { \ return protection_map[vm_flags & \ (VM_READ | VM_WRITE | VM_EXEC | VM_SHARED)]; \ } \ EXPORT_SYMBOL(vm_get_page_prot); #endif /* _LINUX_PGTABLE_H */ |
| 78 1 16 107 107 3 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 | /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2017-2018 HUAWEI, Inc. * https://www.huawei.com/ * Copyright (C) 2021, Alibaba Cloud */ #ifndef __EROFS_INTERNAL_H #define __EROFS_INTERNAL_H #include <linux/fs.h> #include <linux/dax.h> #include <linux/dcache.h> #include <linux/mm.h> #include <linux/module.h> #include <linux/pagemap.h> #include <linux/bio.h> #include <linux/magic.h> #include <linux/slab.h> #include <linux/vmalloc.h> #include <linux/iomap.h> #include "erofs_fs.h" __printf(2, 3) void _erofs_printk(struct super_block *sb, const char *fmt, ...); #define erofs_err(sb, fmt, ...) \ _erofs_printk(sb, KERN_ERR fmt "\n", ##__VA_ARGS__) #define erofs_info(sb, fmt, ...) \ _erofs_printk(sb, KERN_INFO fmt "\n", ##__VA_ARGS__) #ifdef CONFIG_EROFS_FS_DEBUG #define DBG_BUGON BUG_ON #else #define DBG_BUGON(x) ((void)(x)) #endif /* !CONFIG_EROFS_FS_DEBUG */ /* EROFS_SUPER_MAGIC_V1 to represent the whole file system */ #define EROFS_SUPER_MAGIC EROFS_SUPER_MAGIC_V1 typedef u64 erofs_nid_t; typedef u64 erofs_off_t; typedef u64 erofs_blk_t; struct erofs_device_info { char *path; struct erofs_fscache *fscache; struct file *file; struct dax_device *dax_dev; u64 fsoff, dax_part_off; erofs_blk_t blocks; erofs_blk_t uniaddr; }; enum { EROFS_SYNC_DECOMPRESS_AUTO, EROFS_SYNC_DECOMPRESS_FORCE_ON, EROFS_SYNC_DECOMPRESS_FORCE_OFF }; struct erofs_mount_opts { /* current strategy of how to use managed cache */ unsigned char cache_strategy; /* strategy of sync decompression (0 - auto, 1 - force on, 2 - force off) */ unsigned int sync_decompress; /* threshold for decompression synchronously */ unsigned int max_sync_decompress_pages; unsigned int mount_opt; }; struct erofs_dev_context { struct idr tree; struct rw_semaphore rwsem; unsigned int extra_devices; bool flatdev; }; /* all filesystem-wide lz4 configurations */ struct erofs_sb_lz4_info { /* # of pages needed for EROFS lz4 rolling decompression */ u16 max_distance_pages; /* maximum possible blocks for pclusters in the filesystem */ u16 max_pclusterblks; }; struct erofs_domain { refcount_t ref; struct list_head list; struct fscache_volume *volume; char *domain_id; }; struct erofs_fscache { struct fscache_cookie *cookie; struct inode *inode; /* anonymous inode for the blob */ /* used for share domain mode */ struct erofs_domain *domain; struct list_head node; refcount_t ref; char *name; }; struct erofs_xattr_prefix_item { struct erofs_xattr_long_prefix *prefix; u8 infix_len; }; struct erofs_sb_info { struct erofs_device_info dif0; struct erofs_mount_opts opt; /* options */ #ifdef CONFIG_EROFS_FS_ZIP /* list for all registered superblocks, mainly for shrinker */ struct list_head list; struct mutex umount_mutex; /* managed XArray arranged in physical block number */ struct xarray managed_pslots; unsigned int shrinker_run_no; u16 available_compr_algs; /* pseudo inode to manage cached pages */ struct inode *managed_cache; struct erofs_sb_lz4_info lz4; #endif /* CONFIG_EROFS_FS_ZIP */ struct inode *packed_inode; struct inode *metabox_inode; struct erofs_dev_context *devs; u64 total_blocks; u32 meta_blkaddr; #ifdef CONFIG_EROFS_FS_XATTR u32 xattr_blkaddr; u32 xattr_prefix_start; u8 xattr_prefix_count; struct erofs_xattr_prefix_item *xattr_prefixes; unsigned int xattr_filter_reserved; #endif u16 device_id_mask; /* valid bits of device id to be used */ unsigned char islotbits; /* inode slot unit size in bit shift */ unsigned char blkszbits; /* filesystem block size in bit shift */ u32 sb_size; /* total superblock size */ u32 fixed_nsec; s64 epoch; /* what we really care is nid, rather than ino.. */ erofs_nid_t root_nid; erofs_nid_t packed_nid; erofs_nid_t metabox_nid; /* used for statfs, f_files - f_favail */ u64 inos; u32 feature_compat; u32 feature_incompat; /* sysfs support */ struct kobject s_kobj; /* /sys/fs/erofs/<devname> */ struct completion s_kobj_unregister; erofs_off_t dir_ra_bytes; /* fscache support */ struct fscache_volume *volume; struct erofs_domain *domain; char *fsid; char *domain_id; }; #define EROFS_SB(sb) ((struct erofs_sb_info *)(sb)->s_fs_info) #define EROFS_I_SB(inode) ((struct erofs_sb_info *)(inode)->i_sb->s_fs_info) /* Mount flags set via mount options or defaults */ #define EROFS_MOUNT_XATTR_USER 0x00000010 #define EROFS_MOUNT_POSIX_ACL 0x00000020 #define EROFS_MOUNT_DAX_ALWAYS 0x00000040 #define EROFS_MOUNT_DAX_NEVER 0x00000080 #define EROFS_MOUNT_DIRECT_IO 0x00000100 #define clear_opt(opt, option) ((opt)->mount_opt &= ~EROFS_MOUNT_##option) #define set_opt(opt, option) ((opt)->mount_opt |= EROFS_MOUNT_##option) #define test_opt(opt, option) ((opt)->mount_opt & EROFS_MOUNT_##option) static inline bool erofs_is_fileio_mode(struct erofs_sb_info *sbi) { return IS_ENABLED(CONFIG_EROFS_FS_BACKED_BY_FILE) && sbi->dif0.file; } static inline bool erofs_is_fscache_mode(struct super_block *sb) { return IS_ENABLED(CONFIG_EROFS_FS_ONDEMAND) && !erofs_is_fileio_mode(EROFS_SB(sb)) && !sb->s_bdev; } enum { EROFS_ZIP_CACHE_DISABLED, EROFS_ZIP_CACHE_READAHEAD, EROFS_ZIP_CACHE_READAROUND }; struct erofs_buf { struct address_space *mapping; struct file *file; u64 off; struct page *page; void *base; }; #define __EROFS_BUF_INITIALIZER ((struct erofs_buf){ .page = NULL }) #define erofs_blknr(sb, pos) ((erofs_blk_t)((pos) >> (sb)->s_blocksize_bits)) #define erofs_blkoff(sb, pos) ((pos) & ((sb)->s_blocksize - 1)) #define erofs_pos(sb, blk) ((erofs_off_t)(blk) << (sb)->s_blocksize_bits) #define erofs_iblks(i) (round_up((i)->i_size, i_blocksize(i)) >> (i)->i_blkbits) #define EROFS_FEATURE_FUNCS(name, compat, feature) \ static inline bool erofs_sb_has_##name(struct erofs_sb_info *sbi) \ { \ return sbi->feature_##compat & EROFS_FEATURE_##feature; \ } EROFS_FEATURE_FUNCS(zero_padding, incompat, INCOMPAT_ZERO_PADDING) EROFS_FEATURE_FUNCS(compr_cfgs, incompat, INCOMPAT_COMPR_CFGS) EROFS_FEATURE_FUNCS(big_pcluster, incompat, INCOMPAT_BIG_PCLUSTER) EROFS_FEATURE_FUNCS(chunked_file, incompat, INCOMPAT_CHUNKED_FILE) EROFS_FEATURE_FUNCS(device_table, incompat, INCOMPAT_DEVICE_TABLE) EROFS_FEATURE_FUNCS(compr_head2, incompat, INCOMPAT_COMPR_HEAD2) EROFS_FEATURE_FUNCS(ztailpacking, incompat, INCOMPAT_ZTAILPACKING) EROFS_FEATURE_FUNCS(fragments, incompat, INCOMPAT_FRAGMENTS) EROFS_FEATURE_FUNCS(dedupe, incompat, INCOMPAT_DEDUPE) EROFS_FEATURE_FUNCS(xattr_prefixes, incompat, INCOMPAT_XATTR_PREFIXES) EROFS_FEATURE_FUNCS(48bit, incompat, INCOMPAT_48BIT) EROFS_FEATURE_FUNCS(metabox, incompat, INCOMPAT_METABOX) EROFS_FEATURE_FUNCS(sb_chksum, compat, COMPAT_SB_CHKSUM) EROFS_FEATURE_FUNCS(xattr_filter, compat, COMPAT_XATTR_FILTER) EROFS_FEATURE_FUNCS(shared_ea_in_metabox, compat, COMPAT_SHARED_EA_IN_METABOX) static inline u64 erofs_nid_to_ino64(struct erofs_sb_info *sbi, erofs_nid_t nid) { if (!erofs_sb_has_metabox(sbi)) return nid; /* * When metadata compression is enabled, avoid generating excessively * large inode numbers for metadata-compressed inodes. Shift NIDs in * the 31-62 bit range left by one and move the metabox flag to bit 31. * * Note: on-disk NIDs remain unchanged as they are primarily used for * compatibility with non-LFS 32-bit applications. */ return ((nid << 1) & GENMASK_ULL(63, 32)) | (nid & GENMASK(30, 0)) | ((nid >> EROFS_DIRENT_NID_METABOX_BIT) << 31); } /* atomic flag definitions */ #define EROFS_I_EA_INITED_BIT 0 #define EROFS_I_Z_INITED_BIT 1 /* bitlock definitions (arranged in reverse order) */ #define EROFS_I_BL_XATTR_BIT (BITS_PER_LONG - 1) #define EROFS_I_BL_Z_BIT (BITS_PER_LONG - 2) /* default readahead size of directories */ #define EROFS_DIR_RA_BYTES 16384 struct erofs_inode { erofs_nid_t nid; /* atomic flags (including bitlocks) */ unsigned long flags; unsigned char datalayout; unsigned char inode_isize; bool dot_omitted; unsigned int xattr_isize; unsigned int xattr_name_filter; unsigned int xattr_shared_count; unsigned int *xattr_shared_xattrs; union { erofs_blk_t startblk; struct { unsigned short chunkformat; unsigned char chunkbits; }; #ifdef CONFIG_EROFS_FS_ZIP struct { unsigned short z_advise; unsigned char z_algorithmtype[2]; unsigned char z_lclusterbits; union { u64 z_tailextent_headlcn; u64 z_extents; }; erofs_off_t z_fragmentoff; unsigned short z_idata_size; }; #endif /* CONFIG_EROFS_FS_ZIP */ }; /* the corresponding vfs inode */ struct inode vfs_inode; }; #define EROFS_I(ptr) container_of(ptr, struct erofs_inode, vfs_inode) static inline bool erofs_inode_in_metabox(struct inode *inode) { return EROFS_I(inode)->nid & BIT_ULL(EROFS_DIRENT_NID_METABOX_BIT); } static inline erofs_off_t erofs_iloc(struct inode *inode) { struct erofs_sb_info *sbi = EROFS_I_SB(inode); erofs_nid_t nid_lo = EROFS_I(inode)->nid & EROFS_DIRENT_NID_MASK; if (erofs_inode_in_metabox(inode)) return nid_lo << sbi->islotbits; return erofs_pos(inode->i_sb, sbi->meta_blkaddr) + (nid_lo << sbi->islotbits); } static inline unsigned int erofs_inode_version(unsigned int ifmt) { return (ifmt >> EROFS_I_VERSION_BIT) & EROFS_I_VERSION_MASK; } static inline unsigned int erofs_inode_datalayout(unsigned int ifmt) { return (ifmt >> EROFS_I_DATALAYOUT_BIT) & EROFS_I_DATALAYOUT_MASK; } /* reclaiming is never triggered when allocating new folios. */ static inline struct folio *erofs_grab_folio_nowait(struct address_space *as, pgoff_t index) { return __filemap_get_folio(as, index, FGP_LOCK|FGP_CREAT|FGP_NOFS|FGP_NOWAIT, readahead_gfp_mask(as) & ~__GFP_RECLAIM); } /* Has a disk mapping */ #define EROFS_MAP_MAPPED 0x0001 /* Located in metadata (could be copied from bd_inode) */ #define EROFS_MAP_META 0x0002 /* The extent is encoded */ #define EROFS_MAP_ENCODED 0x0004 /* The length of extent is full */ #define EROFS_MAP_FULL_MAPPED 0x0008 /* Located in the special packed inode */ #define __EROFS_MAP_FRAGMENT 0x0010 /* The extent refers to partial decompressed data */ #define EROFS_MAP_PARTIAL_REF 0x0020 #define EROFS_MAP_FRAGMENT (EROFS_MAP_MAPPED | __EROFS_MAP_FRAGMENT) struct erofs_map_blocks { struct erofs_buf buf; erofs_off_t m_pa, m_la; u64 m_plen, m_llen; unsigned short m_deviceid; char m_algorithmformat; unsigned int m_flags; }; /* * Used to get the exact decompressed length, e.g. fiemap (consider lookback * approach instead if possible since it's more metadata lightweight.) */ #define EROFS_GET_BLOCKS_FIEMAP 0x0001 /* Used to map the whole extent if non-negligible data is requested for LZMA */ #define EROFS_GET_BLOCKS_READMORE 0x0002 /* Used to map tail extent for tailpacking inline or fragment pcluster */ #define EROFS_GET_BLOCKS_FINDTAIL 0x0004 enum { Z_EROFS_COMPRESSION_SHIFTED = Z_EROFS_COMPRESSION_MAX, Z_EROFS_COMPRESSION_INTERLACED, Z_EROFS_COMPRESSION_RUNTIME_MAX }; struct erofs_map_dev { struct super_block *m_sb; struct erofs_device_info *m_dif; struct block_device *m_bdev; erofs_off_t m_pa; unsigned int m_deviceid; }; extern const struct super_operations erofs_sops; extern const struct address_space_operations erofs_aops; extern const struct address_space_operations erofs_fileio_aops; extern const struct address_space_operations z_erofs_aops; extern const struct address_space_operations erofs_fscache_access_aops; extern const struct inode_operations erofs_generic_iops; extern const struct inode_operations erofs_symlink_iops; extern const struct inode_operations erofs_fast_symlink_iops; extern const struct inode_operations erofs_dir_iops; extern const struct file_operations erofs_file_fops; extern const struct file_operations erofs_dir_fops; extern const struct iomap_ops z_erofs_iomap_report_ops; /* flags for erofs_fscache_register_cookie() */ #define EROFS_REG_COOKIE_SHARE 0x0001 #define EROFS_REG_COOKIE_NEED_NOEXIST 0x0002 void *erofs_read_metadata(struct super_block *sb, struct erofs_buf *buf, erofs_off_t *offset, int *lengthp); void erofs_unmap_metabuf(struct erofs_buf *buf); void erofs_put_metabuf(struct erofs_buf *buf); void *erofs_bread(struct erofs_buf *buf, erofs_off_t offset, bool need_kmap); int erofs_init_metabuf(struct erofs_buf *buf, struct super_block *sb, bool in_metabox); void *erofs_read_metabuf(struct erofs_buf *buf, struct super_block *sb, erofs_off_t offset, bool in_metabox); int erofs_map_dev(struct super_block *sb, struct erofs_map_dev *dev); int erofs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, u64 start, u64 len); int erofs_map_blocks(struct inode *inode, struct erofs_map_blocks *map); void erofs_onlinefolio_init(struct folio *folio); void erofs_onlinefolio_split(struct folio *folio); void erofs_onlinefolio_end(struct folio *folio, int err, bool dirty); struct inode *erofs_iget(struct super_block *sb, erofs_nid_t nid); int erofs_getattr(struct mnt_idmap *idmap, const struct path *path, struct kstat *stat, u32 request_mask, unsigned int query_flags); int erofs_namei(struct inode *dir, const struct qstr *name, erofs_nid_t *nid, unsigned int *d_type); static inline void *erofs_vm_map_ram(struct page **pages, unsigned int count) { int retried = 0; while (1) { void *p = vm_map_ram(pages, count, -1); /* retry two more times (totally 3 times) */ if (p || ++retried >= 3) return p; vm_unmap_aliases(); } return NULL; } int erofs_register_sysfs(struct super_block *sb); void erofs_unregister_sysfs(struct super_block *sb); int __init erofs_init_sysfs(void); void erofs_exit_sysfs(void); struct page *__erofs_allocpage(struct page **pagepool, gfp_t gfp, bool tryrsv); static inline struct page *erofs_allocpage(struct page **pagepool, gfp_t gfp) { return __erofs_allocpage(pagepool, gfp, false); } static inline void erofs_pagepool_add(struct page **pagepool, struct page *page) { set_page_private(page, (unsigned long)*pagepool); *pagepool = page; } void erofs_release_pages(struct page **pagepool); #ifdef CONFIG_EROFS_FS_ZIP #define MNGD_MAPPING(sbi) ((sbi)->managed_cache->i_mapping) extern atomic_long_t erofs_global_shrink_cnt; void erofs_shrinker_register(struct super_block *sb); void erofs_shrinker_unregister(struct super_block *sb); int __init erofs_init_shrinker(void); void erofs_exit_shrinker(void); int __init z_erofs_init_subsystem(void); void z_erofs_exit_subsystem(void); int z_erofs_init_super(struct super_block *sb); unsigned long z_erofs_shrink_scan(struct erofs_sb_info *sbi, unsigned long nr_shrink); int z_erofs_map_blocks_iter(struct inode *inode, struct erofs_map_blocks *map, int flags); void *z_erofs_get_gbuf(unsigned int requiredpages); void z_erofs_put_gbuf(void *ptr); int z_erofs_gbuf_growsize(unsigned int nrpages); int __init z_erofs_gbuf_init(void); void z_erofs_gbuf_exit(void); int z_erofs_parse_cfgs(struct super_block *sb, struct erofs_super_block *dsb); #else static inline void erofs_shrinker_register(struct super_block *sb) {} static inline void erofs_shrinker_unregister(struct super_block *sb) {} static inline int erofs_init_shrinker(void) { return 0; } static inline void erofs_exit_shrinker(void) {} static inline int z_erofs_init_subsystem(void) { return 0; } static inline void z_erofs_exit_subsystem(void) {} static inline int z_erofs_init_super(struct super_block *sb) { return 0; } #endif /* !CONFIG_EROFS_FS_ZIP */ #ifdef CONFIG_EROFS_FS_BACKED_BY_FILE struct bio *erofs_fileio_bio_alloc(struct erofs_map_dev *mdev); void erofs_fileio_submit_bio(struct bio *bio); #else static inline struct bio *erofs_fileio_bio_alloc(struct erofs_map_dev *mdev) { return NULL; } static inline void erofs_fileio_submit_bio(struct bio *bio) {} #endif #ifdef CONFIG_EROFS_FS_ONDEMAND int erofs_fscache_register_fs(struct super_block *sb); void erofs_fscache_unregister_fs(struct super_block *sb); struct erofs_fscache *erofs_fscache_register_cookie(struct super_block *sb, char *name, unsigned int flags); void erofs_fscache_unregister_cookie(struct erofs_fscache *fscache); struct bio *erofs_fscache_bio_alloc(struct erofs_map_dev *mdev); void erofs_fscache_submit_bio(struct bio *bio); #else static inline int erofs_fscache_register_fs(struct super_block *sb) { return -EOPNOTSUPP; } static inline void erofs_fscache_unregister_fs(struct super_block *sb) {} static inline struct erofs_fscache *erofs_fscache_register_cookie(struct super_block *sb, char *name, unsigned int flags) { return ERR_PTR(-EOPNOTSUPP); } static inline void erofs_fscache_unregister_cookie(struct erofs_fscache *fscache) { } static inline struct bio *erofs_fscache_bio_alloc(struct erofs_map_dev *mdev) { return NULL; } static inline void erofs_fscache_submit_bio(struct bio *bio) {} #endif #define EFSCORRUPTED EUCLEAN /* Filesystem is corrupted */ #endif /* __EROFS_INTERNAL_H */ |
| 392 287 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 | /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _BCACHEFS_BKEY_SORT_H #define _BCACHEFS_BKEY_SORT_H struct sort_iter { struct btree *b; unsigned used; unsigned size; struct sort_iter_set { struct bkey_packed *k, *end; } data[]; }; static inline void sort_iter_init(struct sort_iter *iter, struct btree *b, unsigned size) { iter->b = b; iter->used = 0; iter->size = size; } struct sort_iter_stack { struct sort_iter iter; struct sort_iter_set sets[MAX_BSETS + 1]; }; static inline void sort_iter_stack_init(struct sort_iter_stack *iter, struct btree *b) { sort_iter_init(&iter->iter, b, ARRAY_SIZE(iter->sets)); } static inline void sort_iter_add(struct sort_iter *iter, struct bkey_packed *k, struct bkey_packed *end) { BUG_ON(iter->used >= iter->size); if (k != end) iter->data[iter->used++] = (struct sort_iter_set) { k, end }; } struct btree_nr_keys bch2_key_sort_fix_overlapping(struct bch_fs *, struct bset *, struct sort_iter *); struct btree_nr_keys bch2_sort_repack(struct bset *, struct btree *, struct btree_node_iter *, struct bkey_format *, bool); unsigned bch2_sort_keys_keep_unwritten_whiteouts(struct bkey_packed *, struct sort_iter *); unsigned bch2_sort_keys(struct bkey_packed *, struct sort_iter *); #endif /* _BCACHEFS_BKEY_SORT_H */ |
| 275 2441 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 | /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_SWAP_H #define _LINUX_SWAP_H #include <linux/spinlock.h> #include <linux/linkage.h> #include <linux/mmzone.h> #include <linux/list.h> #include <linux/memcontrol.h> #include <linux/sched.h> #include <linux/node.h> #include <linux/fs.h> #include <linux/pagemap.h> #include <linux/atomic.h> #include <linux/page-flags.h> #include <uapi/linux/mempolicy.h> #include <asm/page.h> struct notifier_block; struct bio; struct pagevec; #define SWAP_FLAG_PREFER 0x8000 /* set if swap priority specified */ #define SWAP_FLAG_PRIO_MASK 0x7fff #define SWAP_FLAG_DISCARD 0x10000 /* enable discard for swap */ #define SWAP_FLAG_DISCARD_ONCE 0x20000 /* discard swap area at swapon-time */ #define SWAP_FLAG_DISCARD_PAGES 0x40000 /* discard page-clusters after use */ #define SWAP_FLAGS_VALID (SWAP_FLAG_PRIO_MASK | SWAP_FLAG_PREFER | \ SWAP_FLAG_DISCARD | SWAP_FLAG_DISCARD_ONCE | \ SWAP_FLAG_DISCARD_PAGES) #define SWAP_BATCH 64 static inline int current_is_kswapd(void) { return current->flags & PF_KSWAPD; } /* * MAX_SWAPFILES defines the maximum number of swaptypes: things which can * be swapped to. The swap type and the offset into that swap type are * encoded into pte's and into pgoff_t's in the swapcache. Using five bits * for the type means that the maximum number of swapcache pages is 27 bits * on 32-bit-pgoff_t architectures. And that assumes that the architecture packs * the type/offset into the pte as 5/27 as well. */ #define MAX_SWAPFILES_SHIFT 5 /* * Use some of the swap files numbers for other purposes. This * is a convenient way to hook into the VM to trigger special * actions on faults. */ /* * PTE markers are used to persist information onto PTEs that otherwise * should be a none pte. As its name "PTE" hints, it should only be * applied to the leaves of pgtables. */ #define SWP_PTE_MARKER_NUM 1 #define SWP_PTE_MARKER (MAX_SWAPFILES + SWP_HWPOISON_NUM + \ SWP_MIGRATION_NUM + SWP_DEVICE_NUM) /* * Unaddressable device memory support. See include/linux/hmm.h and * Documentation/mm/hmm.rst. Short description is we need struct pages for * device memory that is unaddressable (inaccessible) by CPU, so that we can * migrate part of a process memory to device memory. * * When a page is migrated from CPU to device, we set the CPU page table entry * to a special SWP_DEVICE_{READ|WRITE} entry. * * When a page is mapped by the device for exclusive access we set the CPU page * table entries to a special SWP_DEVICE_EXCLUSIVE entry. */ #ifdef CONFIG_DEVICE_PRIVATE #define SWP_DEVICE_NUM 3 #define SWP_DEVICE_WRITE (MAX_SWAPFILES+SWP_HWPOISON_NUM+SWP_MIGRATION_NUM) #define SWP_DEVICE_READ (MAX_SWAPFILES+SWP_HWPOISON_NUM+SWP_MIGRATION_NUM+1) #define SWP_DEVICE_EXCLUSIVE (MAX_SWAPFILES+SWP_HWPOISON_NUM+SWP_MIGRATION_NUM+2) #else #define SWP_DEVICE_NUM 0 #endif /* * Page migration support. * * SWP_MIGRATION_READ_EXCLUSIVE is only applicable to anonymous pages and * indicates that the referenced (part of) an anonymous page is exclusive to * a single process. For SWP_MIGRATION_WRITE, that information is implicit: * (part of) an anonymous page that are mapped writable are exclusive to a * single process. */ #ifdef CONFIG_MIGRATION #define SWP_MIGRATION_NUM 3 #define SWP_MIGRATION_READ (MAX_SWAPFILES + SWP_HWPOISON_NUM) #define SWP_MIGRATION_READ_EXCLUSIVE (MAX_SWAPFILES + SWP_HWPOISON_NUM + 1) #define SWP_MIGRATION_WRITE (MAX_SWAPFILES + SWP_HWPOISON_NUM + 2) #else #define SWP_MIGRATION_NUM 0 #endif /* * Handling of hardware poisoned pages with memory corruption. */ #ifdef CONFIG_MEMORY_FAILURE #define SWP_HWPOISON_NUM 1 #define SWP_HWPOISON MAX_SWAPFILES #else #define SWP_HWPOISON_NUM 0 #endif #define MAX_SWAPFILES \ ((1 << MAX_SWAPFILES_SHIFT) - SWP_DEVICE_NUM - \ SWP_MIGRATION_NUM - SWP_HWPOISON_NUM - \ SWP_PTE_MARKER_NUM) /* * Magic header for a swap area. The first part of the union is * what the swap magic looks like for the old (limited to 128MB) * swap area format, the second part of the union adds - in the * old reserved area - some extra information. Note that the first * kilobyte is reserved for boot loader or disk label stuff... * * Having the magic at the end of the PAGE_SIZE makes detecting swap * areas somewhat tricky on machines that support multiple page sizes. * For 2.5 we'll probably want to move the magic to just beyond the * bootbits... */ union swap_header { struct { char reserved[PAGE_SIZE - 10]; char magic[10]; /* SWAP-SPACE or SWAPSPACE2 */ } magic; struct { char bootbits[1024]; /* Space for disklabel etc. */ __u32 version; __u32 last_page; __u32 nr_badpages; unsigned char sws_uuid[16]; unsigned char sws_volume[16]; __u32 padding[117]; __u32 badpages[1]; } info; }; /* * current->reclaim_state points to one of these when a task is running * memory reclaim */ struct reclaim_state { /* pages reclaimed outside of LRU-based reclaim */ unsigned long reclaimed; #ifdef CONFIG_LRU_GEN /* per-thread mm walk data */ struct lru_gen_mm_walk *mm_walk; #endif }; /* * mm_account_reclaimed_pages(): account reclaimed pages outside of LRU-based * reclaim * @pages: number of pages reclaimed * * If the current process is undergoing a reclaim operation, increment the * number of reclaimed pages by @pages. */ static inline void mm_account_reclaimed_pages(unsigned long pages) { if (current->reclaim_state) current->reclaim_state->reclaimed += pages; } #ifdef __KERNEL__ struct address_space; struct sysinfo; struct writeback_control; struct zone; /* * A swap extent maps a range of a swapfile's PAGE_SIZE pages onto a range of * disk blocks. A rbtree of swap extents maps the entire swapfile (Where the * term `swapfile' refers to either a blockdevice or an IS_REG file). Apart * from setup, they're handled identically. * * We always assume that blocks are of size PAGE_SIZE. */ struct swap_extent { struct rb_node rb_node; pgoff_t start_page; pgoff_t nr_pages; sector_t start_block; }; /* * Max bad pages in the new format.. */ #define MAX_SWAP_BADPAGES \ ((offsetof(union swap_header, magic.magic) - \ offsetof(union swap_header, info.badpages)) / sizeof(int)) enum { SWP_USED = (1 << 0), /* is slot in swap_info[] used? */ SWP_WRITEOK = (1 << 1), /* ok to write to this swap? */ SWP_DISCARDABLE = (1 << 2), /* blkdev support discard */ SWP_DISCARDING = (1 << 3), /* now discarding a free cluster */ SWP_SOLIDSTATE = (1 << 4), /* blkdev seeks are cheap */ SWP_CONTINUED = (1 << 5), /* swap_map has count continuation */ SWP_BLKDEV = (1 << 6), /* its a block device */ SWP_ACTIVATED = (1 << 7), /* set after swap_activate success */ SWP_FS_OPS = (1 << 8), /* swapfile operations go through fs */ SWP_AREA_DISCARD = (1 << 9), /* single-time swap area discards */ SWP_PAGE_DISCARD = (1 << 10), /* freed swap page-cluster discards */ SWP_STABLE_WRITES = (1 << 11), /* no overwrite PG_writeback pages */ SWP_SYNCHRONOUS_IO = (1 << 12), /* synchronous IO is efficient */ /* add others here before... */ }; #define SWAP_CLUSTER_MAX 32UL #define SWAP_CLUSTER_MAX_SKIPPED (SWAP_CLUSTER_MAX << 10) #define COMPACT_CLUSTER_MAX SWAP_CLUSTER_MAX /* Bit flag in swap_map */ #define SWAP_HAS_CACHE 0x40 /* Flag page is cached, in first swap_map */ #define COUNT_CONTINUED 0x80 /* Flag swap_map continuation for full count */ /* Special value in first swap_map */ #define SWAP_MAP_MAX 0x3e /* Max count */ #define SWAP_MAP_BAD 0x3f /* Note page is bad */ #define SWAP_MAP_SHMEM 0xbf /* Owned by shmem/tmpfs */ /* Special value in each swap_map continuation */ #define SWAP_CONT_MAX 0x7f /* Max count */ /* * We use this to track usage of a cluster. A cluster is a block of swap disk * space with SWAPFILE_CLUSTER pages long and naturally aligns in disk. All * free clusters are organized into a list. We fetch an entry from the list to * get a free cluster. * * The flags field determines if a cluster is free. This is * protected by cluster lock. */ struct swap_cluster_info { spinlock_t lock; /* * Protect swap_cluster_info fields * other than list, and swap_info_struct->swap_map * elements corresponding to the swap cluster. */ u16 count; u8 flags; u8 order; struct list_head list; }; /* All on-list cluster must have a non-zero flag. */ enum swap_cluster_flags { CLUSTER_FLAG_NONE = 0, /* For temporary off-list cluster */ CLUSTER_FLAG_FREE, CLUSTER_FLAG_NONFULL, CLUSTER_FLAG_FRAG, /* Clusters with flags above are allocatable */ CLUSTER_FLAG_USABLE = CLUSTER_FLAG_FRAG, CLUSTER_FLAG_FULL, CLUSTER_FLAG_DISCARD, CLUSTER_FLAG_MAX, }; /* * The first page in the swap file is the swap header, which is always marked * bad to prevent it from being allocated as an entry. This also prevents the * cluster to which it belongs being marked free. Therefore 0 is safe to use as * a sentinel to indicate an entry is not valid. */ #define SWAP_ENTRY_INVALID 0 #ifdef CONFIG_THP_SWAP #define SWAP_NR_ORDERS (PMD_ORDER + 1) #else #define SWAP_NR_ORDERS 1 #endif /* * We keep using same cluster for rotational device so IO will be sequential. * The purpose is to optimize SWAP throughput on these device. */ struct swap_sequential_cluster { unsigned int next[SWAP_NR_ORDERS]; /* Likely next allocation offset */ }; /* * The in-memory structure used to track swap areas. */ struct swap_info_struct { struct percpu_ref users; /* indicate and keep swap device valid. */ unsigned long flags; /* SWP_USED etc: see above */ signed short prio; /* swap priority of this type */ struct plist_node list; /* entry in swap_active_head */ signed char type; /* strange name for an index */ unsigned int max; /* extent of the swap_map */ unsigned char *swap_map; /* vmalloc'ed array of usage counts */ unsigned long *zeromap; /* kvmalloc'ed bitmap to track zero pages */ struct swap_cluster_info *cluster_info; /* cluster info. Only for SSD */ struct list_head free_clusters; /* free clusters list */ struct list_head full_clusters; /* full clusters list */ struct list_head nonfull_clusters[SWAP_NR_ORDERS]; /* list of cluster that contains at least one free slot */ struct list_head frag_clusters[SWAP_NR_ORDERS]; /* list of cluster that are fragmented or contented */ atomic_long_t frag_cluster_nr[SWAP_NR_ORDERS]; unsigned int pages; /* total of usable pages of swap */ atomic_long_t inuse_pages; /* number of those currently in use */ struct swap_sequential_cluster *global_cluster; /* Use one global cluster for rotating device */ spinlock_t global_cluster_lock; /* Serialize usage of global cluster */ struct rb_root swap_extent_root;/* root of the swap extent rbtree */ struct block_device *bdev; /* swap device or bdev of swap file */ struct file *swap_file; /* seldom referenced */ struct completion comp; /* seldom referenced */ spinlock_t lock; /* * protect map scan related fields like * swap_map, lowest_bit, highest_bit, * inuse_pages, cluster_next, * cluster_nr, lowest_alloc, * highest_alloc, free/discard cluster * list. other fields are only changed * at swapon/swapoff, so are protected * by swap_lock. changing flags need * hold this lock and swap_lock. If * both locks need hold, hold swap_lock * first. */ spinlock_t cont_lock; /* * protect swap count continuation page * list. */ struct work_struct discard_work; /* discard worker */ struct work_struct reclaim_work; /* reclaim worker */ struct list_head discard_clusters; /* discard clusters list */ struct plist_node avail_lists[]; /* * entries in swap_avail_heads, one * entry per node. * Must be last as the number of the * array is nr_node_ids, which is not * a fixed value so have to allocate * dynamically. * And it has to be an array so that * plist_for_each_* can work. */ }; static inline swp_entry_t page_swap_entry(struct page *page) { struct folio *folio = page_folio(page); swp_entry_t entry = folio->swap; entry.val += folio_page_idx(folio, page); return entry; } /* linux/mm/workingset.c */ bool workingset_test_recent(void *shadow, bool file, bool *workingset, bool flush); void workingset_age_nonresident(struct lruvec *lruvec, unsigned long nr_pages); void *workingset_eviction(struct folio *folio, struct mem_cgroup *target_memcg); void workingset_refault(struct folio *folio, void *shadow); void workingset_activation(struct folio *folio); /* linux/mm/page_alloc.c */ extern unsigned long totalreserve_pages; /* Definition of global_zone_page_state not available yet */ #define nr_free_pages() global_zone_page_state(NR_FREE_PAGES) /* linux/mm/swap.c */ void lru_note_cost_unlock_irq(struct lruvec *lruvec, bool file, unsigned int nr_io, unsigned int nr_rotated) __releases(lruvec->lru_lock); void lru_note_cost_refault(struct folio *); void folio_add_lru(struct folio *); void folio_add_lru_vma(struct folio *, struct vm_area_struct *); void mark_page_accessed(struct page *); void folio_mark_accessed(struct folio *); extern atomic_t lru_disable_count; static inline bool lru_cache_disabled(void) { return atomic_read(&lru_disable_count); } static inline void lru_cache_enable(void) { atomic_dec(&lru_disable_count); } extern void lru_cache_disable(void); extern void lru_add_drain(void); extern void lru_add_drain_cpu(int cpu); extern void lru_add_drain_cpu_zone(struct zone *zone); extern void lru_add_drain_all(void); void folio_deactivate(struct folio *folio); void folio_mark_lazyfree(struct folio *folio); extern void swap_setup(void); /* linux/mm/vmscan.c */ extern unsigned long zone_reclaimable_pages(struct zone *zone); extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order, gfp_t gfp_mask, nodemask_t *mask); #define MEMCG_RECLAIM_MAY_SWAP (1 << 1) #define MEMCG_RECLAIM_PROACTIVE (1 << 2) #define MIN_SWAPPINESS 0 #define MAX_SWAPPINESS 200 /* Just reclaim from anon folios in proactive memory reclaim */ #define SWAPPINESS_ANON_ONLY (MAX_SWAPPINESS + 1) extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg, unsigned long nr_pages, gfp_t gfp_mask, unsigned int reclaim_options, int *swappiness); extern unsigned long mem_cgroup_shrink_node(struct mem_cgroup *mem, gfp_t gfp_mask, bool noswap, pg_data_t *pgdat, unsigned long *nr_scanned); extern unsigned long shrink_all_memory(unsigned long nr_pages); extern int vm_swappiness; long remove_mapping(struct address_space *mapping, struct folio *folio); #if defined(CONFIG_SYSFS) && defined(CONFIG_NUMA) extern int reclaim_register_node(struct node *node); extern void reclaim_unregister_node(struct node *node); #else static inline int reclaim_register_node(struct node *node) { return 0; } static inline void reclaim_unregister_node(struct node *node) { } #endif /* CONFIG_SYSFS && CONFIG_NUMA */ #ifdef CONFIG_NUMA extern int sysctl_min_unmapped_ratio; extern int sysctl_min_slab_ratio; #endif void check_move_unevictable_folios(struct folio_batch *fbatch); extern void __meminit kswapd_run(int nid); extern void __meminit kswapd_stop(int nid); #ifdef CONFIG_SWAP int add_swap_extent(struct swap_info_struct *sis, unsigned long start_page, unsigned long nr_pages, sector_t start_block); int generic_swapfile_activate(struct swap_info_struct *, struct file *, sector_t *); static inline unsigned long total_swapcache_pages(void) { return global_node_page_state(NR_SWAPCACHE); } void free_swap_cache(struct folio *folio); void free_folio_and_swap_cache(struct folio *folio); void free_pages_and_swap_cache(struct encoded_page **, int); /* linux/mm/swapfile.c */ extern atomic_long_t nr_swap_pages; extern long total_swap_pages; extern atomic_t nr_rotate_swap; /* Swap 50% full? Release swapcache more aggressively.. */ static inline bool vm_swap_full(void) { return atomic_long_read(&nr_swap_pages) * 2 < total_swap_pages; } static inline long get_nr_swap_pages(void) { return atomic_long_read(&nr_swap_pages); } extern void si_swapinfo(struct sysinfo *); int folio_alloc_swap(struct folio *folio, gfp_t gfp_mask); bool folio_free_swap(struct folio *folio); void put_swap_folio(struct folio *folio, swp_entry_t entry); extern swp_entry_t get_swap_page_of_type(int); extern int add_swap_count_continuation(swp_entry_t, gfp_t); extern void swap_shmem_alloc(swp_entry_t, int); extern int swap_duplicate(swp_entry_t); extern int swapcache_prepare(swp_entry_t entry, int nr); extern void swap_free_nr(swp_entry_t entry, int nr_pages); extern void free_swap_and_cache_nr(swp_entry_t entry, int nr); int swap_type_of(dev_t device, sector_t offset); int find_first_swap(dev_t *device); extern unsigned int count_swap_pages(int, int); extern sector_t swapdev_block(int, pgoff_t); extern int __swap_count(swp_entry_t entry); extern bool swap_entry_swapped(struct swap_info_struct *si, swp_entry_t entry); extern int swp_swapcount(swp_entry_t entry); struct swap_info_struct *swp_swap_info(swp_entry_t entry); struct backing_dev_info; extern int init_swap_address_space(unsigned int type, unsigned long nr_pages); extern void exit_swap_address_space(unsigned int type); extern struct swap_info_struct *get_swap_device(swp_entry_t entry); sector_t swap_folio_sector(struct folio *folio); static inline void put_swap_device(struct swap_info_struct *si) { percpu_ref_put(&si->users); } #else /* CONFIG_SWAP */ static inline struct swap_info_struct *swp_swap_info(swp_entry_t entry) { return NULL; } static inline struct swap_info_struct *get_swap_device(swp_entry_t entry) { return NULL; } static inline void put_swap_device(struct swap_info_struct *si) { } #define get_nr_swap_pages() 0L #define total_swap_pages 0L #define total_swapcache_pages() 0UL #define vm_swap_full() 0 #define si_swapinfo(val) \ do { (val)->freeswap = (val)->totalswap = 0; } while (0) #define free_folio_and_swap_cache(folio) \ folio_put(folio) #define free_pages_and_swap_cache(pages, nr) \ release_pages((pages), (nr)); static inline void free_swap_and_cache_nr(swp_entry_t entry, int nr) { } static inline void free_swap_cache(struct folio *folio) { } static inline int add_swap_count_continuation(swp_entry_t swp, gfp_t gfp_mask) { return 0; } static inline void swap_shmem_alloc(swp_entry_t swp, int nr) { } static inline int swap_duplicate(swp_entry_t swp) { return 0; } static inline int swapcache_prepare(swp_entry_t swp, int nr) { return 0; } static inline void swap_free_nr(swp_entry_t entry, int nr_pages) { } static inline void put_swap_folio(struct folio *folio, swp_entry_t swp) { } static inline int __swap_count(swp_entry_t entry) { return 0; } static inline bool swap_entry_swapped(struct swap_info_struct *si, swp_entry_t entry) { return false; } static inline int swp_swapcount(swp_entry_t entry) { return 0; } static inline int folio_alloc_swap(struct folio *folio, gfp_t gfp_mask) { return -EINVAL; } static inline bool folio_free_swap(struct folio *folio) { return false; } static inline int add_swap_extent(struct swap_info_struct *sis, unsigned long start_page, unsigned long nr_pages, sector_t start_block) { return -EINVAL; } #endif /* CONFIG_SWAP */ static inline void free_swap_and_cache(swp_entry_t entry) { free_swap_and_cache_nr(entry, 1); } static inline void swap_free(swp_entry_t entry) { swap_free_nr(entry, 1); } #ifdef CONFIG_MEMCG static inline int mem_cgroup_swappiness(struct mem_cgroup *memcg) { /* Cgroup2 doesn't have per-cgroup swappiness */ if (cgroup_subsys_on_dfl(memory_cgrp_subsys)) return READ_ONCE(vm_swappiness); /* root ? */ if (mem_cgroup_disabled() || mem_cgroup_is_root(memcg)) return READ_ONCE(vm_swappiness); return READ_ONCE(memcg->swappiness); } #else static inline int mem_cgroup_swappiness(struct mem_cgroup *mem) { return READ_ONCE(vm_swappiness); } #endif #if defined(CONFIG_SWAP) && defined(CONFIG_MEMCG) && defined(CONFIG_BLK_CGROUP) void __folio_throttle_swaprate(struct folio *folio, gfp_t gfp); static inline void folio_throttle_swaprate(struct folio *folio, gfp_t gfp) { if (mem_cgroup_disabled()) return; __folio_throttle_swaprate(folio, gfp); } #else static inline void folio_throttle_swaprate(struct folio *folio, gfp_t gfp) { } #endif #if defined(CONFIG_MEMCG) && defined(CONFIG_SWAP) int __mem_cgroup_try_charge_swap(struct folio *folio, swp_entry_t entry); static inline int mem_cgroup_try_charge_swap(struct folio *folio, swp_entry_t entry) { if (mem_cgroup_disabled()) return 0; return __mem_cgroup_try_charge_swap(folio, entry); } extern void __mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages); static inline void mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages) { if (mem_cgroup_disabled()) return; __mem_cgroup_uncharge_swap(entry, nr_pages); } extern long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg); extern bool mem_cgroup_swap_full(struct folio *folio); #else static inline int mem_cgroup_try_charge_swap(struct folio *folio, swp_entry_t entry) { return 0; } static inline void mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages) { } static inline long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg) { return get_nr_swap_pages(); } static inline bool mem_cgroup_swap_full(struct folio *folio) { return vm_swap_full(); } #endif #endif /* __KERNEL__*/ #endif /* _LINUX_SWAP_H */ |
| 536 8863 9131 220 7 9034 9032 27 9034 8900 8947 8851 8832 188 8899 8938 8936 7 8950 3 8971 8946 8925 3 8943 8949 7 8925 188 8894 442 213 215 204 213 8666 8657 316 8582 1509 1513 1496 1487 38 1488 1496 1496 53 55 7 7 678 679 676 3 677 674 213 506 580 580 100 101 1 102 5 5 5 3 2 5 1 4 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 | /* SPDX-License-Identifier: GPL-2.0 */ #include <linux/syscalls.h> #include <linux/export.h> #include <linux/uaccess.h> #include <linux/fs_struct.h> #include <linux/fs.h> #include <linux/slab.h> #include <linux/prefetch.h> #include "mount.h" #include "internal.h" struct prepend_buffer { char *buf; int len; }; #define DECLARE_BUFFER(__name, __buf, __len) \ struct prepend_buffer __name = {.buf = __buf + __len, .len = __len} static char *extract_string(struct prepend_buffer *p) { if (likely(p->len >= 0)) return p->buf; return ERR_PTR(-ENAMETOOLONG); } static bool prepend_char(struct prepend_buffer *p, unsigned char c) { if (likely(p->len > 0)) { p->len--; *--p->buf = c; return true; } p->len = -1; return false; } /* * The source of the prepend data can be an optimistic load * of a dentry name and length. And because we don't hold any * locks, the length and the pointer to the name may not be * in sync if a concurrent rename happens, and the kernel * copy might fault as a result. * * The end result will correct itself when we check the * rename sequence count, but we need to be able to handle * the fault gracefully. */ static bool prepend_copy(void *dst, const void *src, int len) { if (unlikely(copy_from_kernel_nofault(dst, src, len))) { memset(dst, 'x', len); return false; } return true; } static bool prepend(struct prepend_buffer *p, const char *str, int namelen) { // Already overflowed? if (p->len < 0) return false; // Will overflow? if (p->len < namelen) { // Fill as much as possible from the end of the name str += namelen - p->len; p->buf -= p->len; prepend_copy(p->buf, str, p->len); p->len = -1; return false; } // Fits fully p->len -= namelen; p->buf -= namelen; return prepend_copy(p->buf, str, namelen); } /** * prepend_name - prepend a pathname in front of current buffer pointer * @p: prepend buffer which contains buffer pointer and allocated length * @name: name string and length qstr structure * * With RCU path tracing, it may race with d_move(). Use READ_ONCE() to * make sure that either the old or the new name pointer and length are * fetched. However, there may be mismatch between length and pointer. * But since the length cannot be trusted, we need to copy the name very * carefully when doing the prepend_copy(). It also prepends "/" at * the beginning of the name. The sequence number check at the caller will * retry it again when a d_move() does happen. So any garbage in the buffer * due to mismatched pointer and length will be discarded. * * Load acquire is needed to make sure that we see the new name data even * if we might get the length wrong. */ static bool prepend_name(struct prepend_buffer *p, const struct qstr *name) { const char *dname = smp_load_acquire(&name->name); /* ^^^ */ u32 dlen = READ_ONCE(name->len); return prepend(p, dname, dlen) && prepend_char(p, '/'); } static int __prepend_path(const struct dentry *dentry, const struct mount *mnt, const struct path *root, struct prepend_buffer *p) { while (dentry != root->dentry || &mnt->mnt != root->mnt) { const struct dentry *parent = READ_ONCE(dentry->d_parent); if (dentry == mnt->mnt.mnt_root) { struct mount *m = READ_ONCE(mnt->mnt_parent); struct mnt_namespace *mnt_ns; if (likely(mnt != m)) { dentry = READ_ONCE(mnt->mnt_mountpoint); mnt = m; continue; } /* Global root */ mnt_ns = READ_ONCE(mnt->mnt_ns); /* open-coded is_mounted() to use local mnt_ns */ if (!IS_ERR_OR_NULL(mnt_ns) && !is_anon_ns(mnt_ns)) return 1; // absolute root else return 2; // detached or not attached yet } if (unlikely(dentry == parent)) /* Escaped? */ return 3; prefetch(parent); if (!prepend_name(p, &dentry->d_name)) break; dentry = parent; } return 0; } /** * prepend_path - Prepend path string to a buffer * @path: the dentry/vfsmount to report * @root: root vfsmnt/dentry * @p: prepend buffer which contains buffer pointer and allocated length * * The function will first try to write out the pathname without taking any * lock other than the RCU read lock to make sure that dentries won't go away. * It only checks the sequence number of the global rename_lock as any change * in the dentry's d_seq will be preceded by changes in the rename_lock * sequence number. If the sequence number had been changed, it will restart * the whole pathname back-tracing sequence again by taking the rename_lock. * In this case, there is no need to take the RCU read lock as the recursive * parent pointer references will keep the dentry chain alive as long as no * rename operation is performed. */ static int prepend_path(const struct path *path, const struct path *root, struct prepend_buffer *p) { unsigned seq, m_seq = 0; struct prepend_buffer b; int error; rcu_read_lock(); restart_mnt: read_seqbegin_or_lock(&mount_lock, &m_seq); seq = 0; rcu_read_lock(); restart: b = *p; read_seqbegin_or_lock(&rename_lock, &seq); error = __prepend_path(path->dentry, real_mount(path->mnt), root, &b); if (!(seq & 1)) rcu_read_unlock(); if (need_seqretry(&rename_lock, seq)) { seq = 1; goto restart; } done_seqretry(&rename_lock, seq); if (!(m_seq & 1)) rcu_read_unlock(); if (need_seqretry(&mount_lock, m_seq)) { m_seq = 1; goto restart_mnt; } done_seqretry(&mount_lock, m_seq); if (unlikely(error == 3)) b = *p; if (b.len == p->len) prepend_char(&b, '/'); *p = b; return error; } /** * __d_path - return the path of a dentry * @path: the dentry/vfsmount to report * @root: root vfsmnt/dentry * @buf: buffer to return value in * @buflen: buffer length * * Convert a dentry into an ASCII path name. * * Returns a pointer into the buffer or an error code if the * path was too long. * * "buflen" should be positive. * * If the path is not reachable from the supplied root, return %NULL. */ char *__d_path(const struct path *path, const struct path *root, char *buf, int buflen) { DECLARE_BUFFER(b, buf, buflen); prepend_char(&b, 0); if (unlikely(prepend_path(path, root, &b) > 0)) return NULL; return extract_string(&b); } char *d_absolute_path(const struct path *path, char *buf, int buflen) { struct path root = {}; DECLARE_BUFFER(b, buf, buflen); prepend_char(&b, 0); if (unlikely(prepend_path(path, &root, &b) > 1)) return ERR_PTR(-EINVAL); return extract_string(&b); } static void get_fs_root_rcu(struct fs_struct *fs, struct path *root) { unsigned seq; do { seq = read_seqbegin(&fs->seq); *root = fs->root; } while (read_seqretry(&fs->seq, seq)); } /** * d_path - return the path of a dentry * @path: path to report * @buf: buffer to return value in * @buflen: buffer length * * Convert a dentry into an ASCII path name. If the entry has been deleted * the string " (deleted)" is appended. Note that this is ambiguous. * * Returns a pointer into the buffer or an error code if the path was * too long. Note: Callers should use the returned pointer, not the passed * in buffer, to use the name! The implementation often starts at an offset * into the buffer, and may leave 0 bytes at the start. * * "buflen" should be positive. */ char *d_path(const struct path *path, char *buf, int buflen) { DECLARE_BUFFER(b, buf, buflen); struct path root; /* * We have various synthetic filesystems that never get mounted. On * these filesystems dentries are never used for lookup purposes, and * thus don't need to be hashed. They also don't need a name until a * user wants to identify the object in /proc/pid/fd/. The little hack * below allows us to generate a name for these objects on demand: * * Some pseudo inodes are mountable. When they are mounted * path->dentry == path->mnt->mnt_root. In that case don't call d_dname * and instead have d_path return the mounted path. */ if (path->dentry->d_op && path->dentry->d_op->d_dname && (!IS_ROOT(path->dentry) || path->dentry != path->mnt->mnt_root)) return path->dentry->d_op->d_dname(path->dentry, buf, buflen); rcu_read_lock(); get_fs_root_rcu(current->fs, &root); if (unlikely(d_unlinked(path->dentry))) prepend(&b, " (deleted)", 11); else prepend_char(&b, 0); prepend_path(path, &root, &b); rcu_read_unlock(); return extract_string(&b); } EXPORT_SYMBOL(d_path); /* * Helper function for dentry_operations.d_dname() members */ char *dynamic_dname(char *buffer, int buflen, const char *fmt, ...) { va_list args; char temp[64]; int sz; va_start(args, fmt); sz = vsnprintf(temp, sizeof(temp), fmt, args) + 1; va_end(args); if (sz > sizeof(temp) || sz > buflen) return ERR_PTR(-ENAMETOOLONG); buffer += buflen - sz; return memcpy(buffer, temp, sz); } char *simple_dname(struct dentry *dentry, char *buffer, int buflen) { DECLARE_BUFFER(b, buffer, buflen); /* these dentries are never renamed, so d_lock is not needed */ prepend(&b, " (deleted)", 11); prepend(&b, dentry->d_name.name, dentry->d_name.len); prepend_char(&b, '/'); return extract_string(&b); } /* * Write full pathname from the root of the filesystem into the buffer. */ static char *__dentry_path(const struct dentry *d, struct prepend_buffer *p) { const struct dentry *dentry; struct prepend_buffer b; int seq = 0; rcu_read_lock(); restart: dentry = d; b = *p; read_seqbegin_or_lock(&rename_lock, &seq); while (!IS_ROOT(dentry)) { const struct dentry *parent = dentry->d_parent; prefetch(parent); if (!prepend_name(&b, &dentry->d_name)) break; dentry = parent; } if (!(seq & 1)) rcu_read_unlock(); if (need_seqretry(&rename_lock, seq)) { seq = 1; goto restart; } done_seqretry(&rename_lock, seq); if (b.len == p->len) prepend_char(&b, '/'); return extract_string(&b); } char *dentry_path_raw(const struct dentry *dentry, char *buf, int buflen) { DECLARE_BUFFER(b, buf, buflen); prepend_char(&b, 0); return __dentry_path(dentry, &b); } EXPORT_SYMBOL(dentry_path_raw); char *dentry_path(const struct dentry *dentry, char *buf, int buflen) { DECLARE_BUFFER(b, buf, buflen); if (unlikely(d_unlinked(dentry))) prepend(&b, "//deleted", 10); else prepend_char(&b, 0); return __dentry_path(dentry, &b); } static void get_fs_root_and_pwd_rcu(struct fs_struct *fs, struct path *root, struct path *pwd) { unsigned seq; do { seq = read_seqbegin(&fs->seq); *root = fs->root; *pwd = fs->pwd; } while (read_seqretry(&fs->seq, seq)); } /* * NOTE! The user-level library version returns a * character pointer. The kernel system call just * returns the length of the buffer filled (which * includes the ending '\0' character), or a negative * error value. So libc would do something like * * char *getcwd(char * buf, size_t size) * { * int retval; * * retval = sys_getcwd(buf, size); * if (retval >= 0) * return buf; * errno = -retval; * return NULL; * } */ SYSCALL_DEFINE2(getcwd, char __user *, buf, unsigned long, size) { int error; struct path pwd, root; char *page = __getname(); if (!page) return -ENOMEM; rcu_read_lock(); get_fs_root_and_pwd_rcu(current->fs, &root, &pwd); if (unlikely(d_unlinked(pwd.dentry))) { rcu_read_unlock(); error = -ENOENT; } else { unsigned len; DECLARE_BUFFER(b, page, PATH_MAX); prepend_char(&b, 0); if (unlikely(prepend_path(&pwd, &root, &b) > 0)) prepend(&b, "(unreachable)", 13); rcu_read_unlock(); len = PATH_MAX - b.len; if (unlikely(len > PATH_MAX)) error = -ENAMETOOLONG; else if (unlikely(len > size)) error = -ERANGE; else if (copy_to_user(buf, b.buf, len)) error = -EFAULT; else error = len; } __putname(page); return error; } |
| 900 898 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 | // SPDX-License-Identifier: GPL-2.0 #include <linux/hugetlb.h> #include <asm-generic/tlb.h> #include <asm/pgalloc.h> #include "internal.h" bool reclaim_pt_is_enabled(unsigned long start, unsigned long end, struct zap_details *details) { return details && details->reclaim_pt && (end - start >= PMD_SIZE); } bool try_get_and_clear_pmd(struct mm_struct *mm, pmd_t *pmd, pmd_t *pmdval) { spinlock_t *pml = pmd_lockptr(mm, pmd); if (!spin_trylock(pml)) return false; *pmdval = pmdp_get_lockless(pmd); pmd_clear(pmd); spin_unlock(pml); return true; } void free_pte(struct mm_struct *mm, unsigned long addr, struct mmu_gather *tlb, pmd_t pmdval) { pte_free_tlb(tlb, pmd_pgtable(pmdval), addr); mm_dec_nr_ptes(mm); } void try_to_free_pte(struct mm_struct *mm, pmd_t *pmd, unsigned long addr, struct mmu_gather *tlb) { pmd_t pmdval; spinlock_t *pml, *ptl = NULL; pte_t *start_pte, *pte; int i; pml = pmd_lock(mm, pmd); start_pte = pte_offset_map_rw_nolock(mm, pmd, addr, &pmdval, &ptl); if (!start_pte) goto out_ptl; if (ptl != pml) spin_lock_nested(ptl, SINGLE_DEPTH_NESTING); /* Check if it is empty PTE page */ for (i = 0, pte = start_pte; i < PTRS_PER_PTE; i++, pte++) { if (!pte_none(ptep_get(pte))) goto out_ptl; } pte_unmap(start_pte); pmd_clear(pmd); if (ptl != pml) spin_unlock(ptl); spin_unlock(pml); free_pte(mm, addr, tlb, pmdval); return; out_ptl: if (start_pte) pte_unmap_unlock(start_pte, ptl); if (ptl != pml) spin_unlock(pml); } |
| 14 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 | /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_FUTEX_H #define _LINUX_FUTEX_H #include <linux/sched.h> #include <linux/ktime.h> #include <linux/mm_types.h> #include <uapi/linux/futex.h> struct inode; struct task_struct; /* * Futexes are matched on equal values of this key. * The key type depends on whether it's a shared or private mapping. * Don't rearrange members without looking at hash_futex(). * * offset is aligned to a multiple of sizeof(u32) (== 4) by definition. * We use the two low order bits of offset to tell what is the kind of key : * 00 : Private process futex (PTHREAD_PROCESS_PRIVATE) * (no reference on an inode or mm) * 01 : Shared futex (PTHREAD_PROCESS_SHARED) * mapped on a file (reference on the underlying inode) * 10 : Shared futex (PTHREAD_PROCESS_SHARED) * (but private mapping on an mm, and reference taken on it) */ #define FUT_OFF_INODE 1 /* We set bit 0 if key has a reference on inode */ #define FUT_OFF_MMSHARED 2 /* We set bit 1 if key has a reference on mm */ union futex_key { struct { u64 i_seq; unsigned long pgoff; unsigned int offset; /* unsigned int node; */ } shared; struct { union { struct mm_struct *mm; u64 __tmp; }; unsigned long address; unsigned int offset; /* unsigned int node; */ } private; struct { u64 ptr; unsigned long word; unsigned int offset; unsigned int node; /* NOT hashed! */ } both; }; #define FUTEX_KEY_INIT (union futex_key) { .both = { .ptr = 0ULL } } #ifdef CONFIG_FUTEX enum { FUTEX_STATE_OK, FUTEX_STATE_EXITING, FUTEX_STATE_DEAD, }; static inline void futex_init_task(struct task_struct *tsk) { tsk->robust_list = NULL; #ifdef CONFIG_COMPAT tsk->compat_robust_list = NULL; #endif INIT_LIST_HEAD(&tsk->pi_state_list); tsk->pi_state_cache = NULL; tsk->futex_state = FUTEX_STATE_OK; mutex_init(&tsk->futex_exit_mutex); } void futex_exit_recursive(struct task_struct *tsk); void futex_exit_release(struct task_struct *tsk); void futex_exec_release(struct task_struct *tsk); long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout, u32 __user *uaddr2, u32 val2, u32 val3); int futex_hash_prctl(unsigned long arg2, unsigned long arg3, unsigned long arg4); #ifdef CONFIG_FUTEX_PRIVATE_HASH int futex_hash_allocate_default(void); void futex_hash_free(struct mm_struct *mm); int futex_mm_init(struct mm_struct *mm); #else /* !CONFIG_FUTEX_PRIVATE_HASH */ static inline int futex_hash_allocate_default(void) { return 0; } static inline int futex_hash_free(struct mm_struct *mm) { return 0; } static inline int futex_mm_init(struct mm_struct *mm) { return 0; } #endif /* CONFIG_FUTEX_PRIVATE_HASH */ #else /* !CONFIG_FUTEX */ static inline void futex_init_task(struct task_struct *tsk) { } static inline void futex_exit_recursive(struct task_struct *tsk) { } static inline void futex_exit_release(struct task_struct *tsk) { } static inline void futex_exec_release(struct task_struct *tsk) { } static inline long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout, u32 __user *uaddr2, u32 val2, u32 val3) { return -EINVAL; } static inline int futex_hash_prctl(unsigned long arg2, unsigned long arg3, unsigned long arg4) { return -EINVAL; } static inline int futex_hash_allocate_default(void) { return 0; } static inline int futex_hash_free(struct mm_struct *mm) { return 0; } static inline int futex_mm_init(struct mm_struct *mm) { return 0; } #endif #endif |
| 1 23 23 1 1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 | // SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (C), 2008-2021, OPPO Mobile Comm Corp., Ltd. * https://www.oppo.com/ */ #include <linux/sysfs.h> #include <linux/kobject.h> #include "internal.h" #include "compress.h" enum { attr_feature, attr_drop_caches, attr_pointer_ui, attr_pointer_bool, attr_accel, }; enum { struct_erofs_sb_info, struct_erofs_mount_opts, }; struct erofs_attr { struct attribute attr; short attr_id; int struct_type, offset; }; #define EROFS_ATTR(_name, _mode, _id) \ static struct erofs_attr erofs_attr_##_name = { \ .attr = {.name = __stringify(_name), .mode = _mode }, \ .attr_id = attr_##_id, \ } #define EROFS_ATTR_FUNC(_name, _mode) EROFS_ATTR(_name, _mode, _name) #define EROFS_ATTR_FEATURE(_name) EROFS_ATTR(_name, 0444, feature) #define EROFS_ATTR_OFFSET(_name, _mode, _id, _struct) \ static struct erofs_attr erofs_attr_##_name = { \ .attr = {.name = __stringify(_name), .mode = _mode }, \ .attr_id = attr_##_id, \ .struct_type = struct_##_struct, \ .offset = offsetof(struct _struct, _name),\ } #define EROFS_ATTR_RW(_name, _id, _struct) \ EROFS_ATTR_OFFSET(_name, 0644, _id, _struct) #define EROFS_RO_ATTR(_name, _id, _struct) \ EROFS_ATTR_OFFSET(_name, 0444, _id, _struct) #define EROFS_ATTR_RW_UI(_name, _struct) \ EROFS_ATTR_RW(_name, pointer_ui, _struct) #define EROFS_ATTR_RW_BOOL(_name, _struct) \ EROFS_ATTR_RW(_name, pointer_bool, _struct) #define ATTR_LIST(name) (&erofs_attr_##name.attr) #ifdef CONFIG_EROFS_FS_ZIP EROFS_ATTR_RW_UI(sync_decompress, erofs_mount_opts); EROFS_ATTR_FUNC(drop_caches, 0200); #endif #ifdef CONFIG_EROFS_FS_ZIP_ACCEL EROFS_ATTR_FUNC(accel, 0644); #endif EROFS_ATTR_RW_UI(dir_ra_bytes, erofs_sb_info); static struct attribute *erofs_sb_attrs[] = { #ifdef CONFIG_EROFS_FS_ZIP ATTR_LIST(sync_decompress), ATTR_LIST(drop_caches), #endif ATTR_LIST(dir_ra_bytes), NULL, }; ATTRIBUTE_GROUPS(erofs_sb); static struct attribute *erofs_attrs[] = { #ifdef CONFIG_EROFS_FS_ZIP_ACCEL ATTR_LIST(accel), #endif NULL, }; ATTRIBUTE_GROUPS(erofs); /* Features this copy of erofs supports */ EROFS_ATTR_FEATURE(zero_padding); EROFS_ATTR_FEATURE(compr_cfgs); EROFS_ATTR_FEATURE(big_pcluster); EROFS_ATTR_FEATURE(chunked_file); EROFS_ATTR_FEATURE(device_table); EROFS_ATTR_FEATURE(compr_head2); EROFS_ATTR_FEATURE(sb_chksum); EROFS_ATTR_FEATURE(ztailpacking); EROFS_ATTR_FEATURE(fragments); EROFS_ATTR_FEATURE(dedupe); EROFS_ATTR_FEATURE(48bit); EROFS_ATTR_FEATURE(metabox); static struct attribute *erofs_feat_attrs[] = { ATTR_LIST(zero_padding), ATTR_LIST(compr_cfgs), ATTR_LIST(big_pcluster), ATTR_LIST(chunked_file), ATTR_LIST(device_table), ATTR_LIST(compr_head2), ATTR_LIST(sb_chksum), ATTR_LIST(ztailpacking), ATTR_LIST(fragments), ATTR_LIST(dedupe), ATTR_LIST(48bit), ATTR_LIST(metabox), NULL, }; ATTRIBUTE_GROUPS(erofs_feat); static unsigned char *__struct_ptr(struct erofs_sb_info *sbi, int struct_type, int offset) { if (struct_type == struct_erofs_sb_info) return (unsigned char *)sbi + offset; if (struct_type == struct_erofs_mount_opts) return (unsigned char *)&sbi->opt + offset; return NULL; } static ssize_t erofs_attr_show(struct kobject *kobj, struct attribute *attr, char *buf) { struct erofs_sb_info *sbi = container_of(kobj, struct erofs_sb_info, s_kobj); struct erofs_attr *a = container_of(attr, struct erofs_attr, attr); unsigned char *ptr = __struct_ptr(sbi, a->struct_type, a->offset); switch (a->attr_id) { case attr_feature: return sysfs_emit(buf, "supported\n"); case attr_pointer_ui: if (!ptr) return 0; return sysfs_emit(buf, "%u\n", *(unsigned int *)ptr); case attr_pointer_bool: if (!ptr) return 0; return sysfs_emit(buf, "%d\n", *(bool *)ptr); case attr_accel: return z_erofs_crypto_show_engines(buf, PAGE_SIZE, '\n'); } return 0; } static ssize_t erofs_attr_store(struct kobject *kobj, struct attribute *attr, const char *buf, size_t len) { struct erofs_sb_info *sbi = container_of(kobj, struct erofs_sb_info, s_kobj); struct erofs_attr *a = container_of(attr, struct erofs_attr, attr); unsigned char *ptr = __struct_ptr(sbi, a->struct_type, a->offset); unsigned long t; int ret; switch (a->attr_id) { case attr_pointer_ui: if (!ptr) return 0; ret = kstrtoul(skip_spaces(buf), 0, &t); if (ret) return ret; if (t != (unsigned int)t) return -ERANGE; #ifdef CONFIG_EROFS_FS_ZIP if (!strcmp(a->attr.name, "sync_decompress") && (t > EROFS_SYNC_DECOMPRESS_FORCE_OFF)) return -EINVAL; #endif *(unsigned int *)ptr = t; return len; case attr_pointer_bool: if (!ptr) return 0; ret = kstrtoul(skip_spaces(buf), 0, &t); if (ret) return ret; if (t != 0 && t != 1) return -EINVAL; *(bool *)ptr = !!t; return len; #ifdef CONFIG_EROFS_FS_ZIP case attr_drop_caches: ret = kstrtoul(skip_spaces(buf), 0, &t); if (ret) return ret; if (t < 1 || t > 3) return -EINVAL; if (t & 2) z_erofs_shrink_scan(sbi, ~0UL); if (t & 1) invalidate_mapping_pages(MNGD_MAPPING(sbi), 0, -1); return len; #endif #ifdef CONFIG_EROFS_FS_ZIP_ACCEL case attr_accel: buf = skip_spaces(buf); z_erofs_crypto_disable_all_engines(); while (*buf) { t = strcspn(buf, "\n"); ret = z_erofs_crypto_enable_engine(buf, t); if (ret < 0) return ret; buf += buf[t] != '\0' ? t + 1 : t; } return len; #endif } return 0; } static void erofs_sb_release(struct kobject *kobj) { struct erofs_sb_info *sbi = container_of(kobj, struct erofs_sb_info, s_kobj); complete(&sbi->s_kobj_unregister); } static const struct sysfs_ops erofs_attr_ops = { .show = erofs_attr_show, .store = erofs_attr_store, }; static const struct kobj_type erofs_sb_ktype = { .default_groups = erofs_sb_groups, .sysfs_ops = &erofs_attr_ops, .release = erofs_sb_release, }; static const struct kobj_type erofs_ktype = { .default_groups = erofs_groups, .sysfs_ops = &erofs_attr_ops, }; static struct kset erofs_root = { .kobj = {.ktype = &erofs_ktype}, }; static const struct kobj_type erofs_feat_ktype = { .default_groups = erofs_feat_groups, .sysfs_ops = &erofs_attr_ops, }; static struct kobject erofs_feat = { .kset = &erofs_root, }; int erofs_register_sysfs(struct super_block *sb) { struct erofs_sb_info *sbi = EROFS_SB(sb); int err; sbi->s_kobj.kset = &erofs_root; init_completion(&sbi->s_kobj_unregister); err = kobject_init_and_add(&sbi->s_kobj, &erofs_sb_ktype, NULL, "%s", sb->s_sysfs_name); if (err) { kobject_put(&sbi->s_kobj); wait_for_completion(&sbi->s_kobj_unregister); } return err; } void erofs_unregister_sysfs(struct super_block *sb) { struct erofs_sb_info *sbi = EROFS_SB(sb); if (sbi->s_kobj.state_in_sysfs) { kobject_del(&sbi->s_kobj); kobject_put(&sbi->s_kobj); wait_for_completion(&sbi->s_kobj_unregister); } } void erofs_exit_sysfs(void) { kobject_put(&erofs_feat); kset_unregister(&erofs_root); } int __init erofs_init_sysfs(void) { int ret; kobject_set_name(&erofs_root.kobj, "erofs"); erofs_root.kobj.parent = fs_kobj; ret = kset_register(&erofs_root); if (!ret) { ret = kobject_init_and_add(&erofs_feat, &erofs_feat_ktype, NULL, "features"); if (!ret) return 0; erofs_exit_sysfs(); } return ret; } |
| 150 150 150 138 138 150 3 2 1 3 3 2 11 9 2 8 8 15 9 8 10 6 4 12 15 8 8 6 2 147 147 148 256 320 321 25 122 34 112 73 74 365 365 103 24 24 150 150 150 157 157 7 150 150 150 150 150 31 119 42 108 79 71 150 157 3 4 150 110 150 119 108 71 150 150 150 137 137 150 31 119 42 108 79 71 13 24 113 34 103 77 60 137 13 157 157 7 150 150 113 24 103 34 60 77 7 136 137 7 6 6 6 156 157 36 121 45 110 79 74 7 7 7 154 44 109 110 52 34 1 15 60 35 10 65 67 1 4 37 108 97 11 97 11 85 32 22 22 74 20 13 37 54 35 107 39 14 54 14 54 36 32 53 53 53 40 39 36 36 36 2 1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 | // SPDX-License-Identifier: GPL-2.0 /* * Copyright (c) 2000-2005 Silicon Graphics, Inc. * All Rights Reserved. */ #include "xfs.h" #include "xfs_fs.h" #include "xfs_shared.h" #include "xfs_format.h" #include "xfs_log_format.h" #include "xfs_trans_resv.h" #include "xfs_bit.h" #include "xfs_sb.h" #include "xfs_mount.h" #include "xfs_inode.h" #include "xfs_iwalk.h" #include "xfs_quota.h" #include "xfs_bmap.h" #include "xfs_bmap_util.h" #include "xfs_trans.h" #include "xfs_trans_space.h" #include "xfs_qm.h" #include "xfs_trace.h" #include "xfs_icache.h" #include "xfs_error.h" #include "xfs_ag.h" #include "xfs_ialloc.h" #include "xfs_log_priv.h" #include "xfs_health.h" #include "xfs_da_format.h" #include "xfs_metafile.h" #include "xfs_rtgroup.h" /* * The global quota manager. There is only one of these for the entire * system, _not_ one per file system. XQM keeps track of the overall * quota functionality, including maintaining the freelist and hash * tables of dquots. */ STATIC int xfs_qm_init_quotainos(struct xfs_mount *mp); STATIC int xfs_qm_init_quotainfo(struct xfs_mount *mp); STATIC void xfs_qm_dqfree_one(struct xfs_dquot *dqp); /* * We use the batch lookup interface to iterate over the dquots as it * currently is the only interface into the radix tree code that allows * fuzzy lookups instead of exact matches. Holding the lock over multiple * operations is fine as all callers are used either during mount/umount * or quotaoff. */ #define XFS_DQ_LOOKUP_BATCH 32 STATIC int xfs_qm_dquot_walk( struct xfs_mount *mp, xfs_dqtype_t type, int (*execute)(struct xfs_dquot *dqp, void *data), void *data) { struct xfs_quotainfo *qi = mp->m_quotainfo; struct radix_tree_root *tree = xfs_dquot_tree(qi, type); uint32_t next_index; int last_error = 0; int skipped; int nr_found; restart: skipped = 0; next_index = 0; nr_found = 0; while (1) { struct xfs_dquot *batch[XFS_DQ_LOOKUP_BATCH]; int error; int i; mutex_lock(&qi->qi_tree_lock); nr_found = radix_tree_gang_lookup(tree, (void **)batch, next_index, XFS_DQ_LOOKUP_BATCH); if (!nr_found) { mutex_unlock(&qi->qi_tree_lock); break; } for (i = 0; i < nr_found; i++) { struct xfs_dquot *dqp = batch[i]; next_index = dqp->q_id + 1; error = execute(batch[i], data); if (error == -EAGAIN) { skipped++; continue; } if (error && last_error != -EFSCORRUPTED) last_error = error; } mutex_unlock(&qi->qi_tree_lock); /* bail out if the filesystem is corrupted. */ if (last_error == -EFSCORRUPTED) { skipped = 0; break; } /* we're done if id overflows back to zero */ if (!next_index) break; } if (skipped) { delay(1); goto restart; } return last_error; } /* * Purge a dquot from all tracking data structures and free it. */ STATIC int xfs_qm_dqpurge( struct xfs_dquot *dqp, void *data) { struct xfs_quotainfo *qi = dqp->q_mount->m_quotainfo; int error = -EAGAIN; xfs_dqlock(dqp); if ((dqp->q_flags & XFS_DQFLAG_FREEING) || dqp->q_nrefs != 0) goto out_unlock; dqp->q_flags |= XFS_DQFLAG_FREEING; xfs_qm_dqunpin_wait(dqp); xfs_dqflock(dqp); /* * If we are turning this type of quotas off, we don't care * about the dirty metadata sitting in this dquot. OTOH, if * we're unmounting, we do care, so we flush it and wait. */ if (XFS_DQ_IS_DIRTY(dqp)) { struct xfs_buf *bp = NULL; /* * We don't care about getting disk errors here. We need * to purge this dquot anyway, so we go ahead regardless. */ error = xfs_dquot_use_attached_buf(dqp, &bp); if (error == -EAGAIN) { xfs_dqfunlock(dqp); dqp->q_flags &= ~XFS_DQFLAG_FREEING; goto out_unlock; } if (!bp) goto out_funlock; /* * dqflush completes dqflock on error, and the bwrite ioend * does it on success. */ error = xfs_qm_dqflush(dqp, bp); if (!error) { error = xfs_bwrite(bp); xfs_buf_relse(bp); } xfs_dqflock(dqp); } xfs_dquot_detach_buf(dqp); out_funlock: ASSERT(atomic_read(&dqp->q_pincount) == 0); ASSERT(xlog_is_shutdown(dqp->q_logitem.qli_item.li_log) || !test_bit(XFS_LI_IN_AIL, &dqp->q_logitem.qli_item.li_flags)); xfs_dqfunlock(dqp); xfs_dqunlock(dqp); radix_tree_delete(xfs_dquot_tree(qi, xfs_dquot_type(dqp)), dqp->q_id); qi->qi_dquots--; /* * We move dquots to the freelist as soon as their reference count * hits zero, so it really should be on the freelist here. */ ASSERT(!list_empty(&dqp->q_lru)); list_lru_del_obj(&qi->qi_lru, &dqp->q_lru); XFS_STATS_DEC(dqp->q_mount, xs_qm_dquot_unused); xfs_qm_dqdestroy(dqp); return 0; out_unlock: xfs_dqunlock(dqp); return error; } /* * Purge the dquot cache. */ static void xfs_qm_dqpurge_all( struct xfs_mount *mp) { xfs_qm_dquot_walk(mp, XFS_DQTYPE_USER, xfs_qm_dqpurge, NULL); xfs_qm_dquot_walk(mp, XFS_DQTYPE_GROUP, xfs_qm_dqpurge, NULL); xfs_qm_dquot_walk(mp, XFS_DQTYPE_PROJ, xfs_qm_dqpurge, NULL); } /* * Just destroy the quotainfo structure. */ void xfs_qm_unmount( struct xfs_mount *mp) { if (mp->m_quotainfo) { xfs_qm_dqpurge_all(mp); xfs_qm_destroy_quotainfo(mp); } } static void xfs_qm_unmount_rt( struct xfs_mount *mp) { struct xfs_rtgroup *rtg = xfs_rtgroup_grab(mp, 0); if (!rtg) return; if (rtg_bitmap(rtg)) xfs_qm_dqdetach(rtg_bitmap(rtg)); if (rtg_summary(rtg)) xfs_qm_dqdetach(rtg_summary(rtg)); xfs_rtgroup_rele(rtg); } STATIC void xfs_qm_destroy_quotainos( struct xfs_quotainfo *qi) { if (qi->qi_uquotaip) { xfs_irele(qi->qi_uquotaip); qi->qi_uquotaip = NULL; /* paranoia */ } if (qi->qi_gquotaip) { xfs_irele(qi->qi_gquotaip); qi->qi_gquotaip = NULL; } if (qi->qi_pquotaip) { xfs_irele(qi->qi_pquotaip); qi->qi_pquotaip = NULL; } if (qi->qi_dirip) { xfs_irele(qi->qi_dirip); qi->qi_dirip = NULL; } } /* * Called from the vfsops layer. */ void xfs_qm_unmount_quotas( xfs_mount_t *mp) { /* * Release the dquots that root inode, et al might be holding, * before we flush quotas and blow away the quotainfo structure. */ ASSERT(mp->m_rootip); xfs_qm_dqdetach(mp->m_rootip); /* * For pre-RTG file systems, the RT inodes have quotas attached, * detach them now. */ if (!xfs_has_rtgroups(mp)) xfs_qm_unmount_rt(mp); /* * Release the quota inodes. */ if (mp->m_quotainfo) xfs_qm_destroy_quotainos(mp->m_quotainfo); } STATIC int xfs_qm_dqattach_one( struct xfs_inode *ip, xfs_dqtype_t type, bool doalloc, struct xfs_dquot **IO_idqpp) { struct xfs_dquot *dqp; int error; xfs_assert_ilocked(ip, XFS_ILOCK_EXCL); error = 0; /* * See if we already have it in the inode itself. IO_idqpp is &i_udquot * or &i_gdquot. This made the code look weird, but made the logic a lot * simpler. */ dqp = *IO_idqpp; if (dqp) { trace_xfs_dqattach_found(dqp); return 0; } /* * Find the dquot from somewhere. This bumps the reference count of * dquot and returns it locked. This can return ENOENT if dquot didn't * exist on disk and we didn't ask it to allocate; ESRCH if quotas got * turned off suddenly. */ error = xfs_qm_dqget_inode(ip, type, doalloc, &dqp); if (error) return error; trace_xfs_dqattach_get(dqp); /* * dqget may have dropped and re-acquired the ilock, but it guarantees * that the dquot returned is the one that should go in the inode. */ *IO_idqpp = dqp; xfs_dqunlock(dqp); return 0; } static bool xfs_qm_need_dqattach( struct xfs_inode *ip) { struct xfs_mount *mp = ip->i_mount; if (!XFS_IS_QUOTA_ON(mp)) return false; if (!XFS_NOT_DQATTACHED(mp, ip)) return false; if (xfs_is_quota_inode(&mp->m_sb, ip->i_ino)) return false; if (xfs_is_metadir_inode(ip)) return false; return true; } /* * Given a locked inode, attach dquot(s) to it, taking U/G/P-QUOTAON * into account. * If @doalloc is true, the dquot(s) will be allocated if needed. * Inode may get unlocked and relocked in here, and the caller must deal with * the consequences. */ int xfs_qm_dqattach_locked( xfs_inode_t *ip, bool doalloc) { xfs_mount_t *mp = ip->i_mount; int error = 0; if (!xfs_qm_need_dqattach(ip)) return 0; xfs_assert_ilocked(ip, XFS_ILOCK_EXCL); ASSERT(!xfs_is_metadir_inode(ip)); if (XFS_IS_UQUOTA_ON(mp) && !ip->i_udquot) { error = xfs_qm_dqattach_one(ip, XFS_DQTYPE_USER, doalloc, &ip->i_udquot); if (error) goto done; ASSERT(ip->i_udquot); } if (XFS_IS_GQUOTA_ON(mp) && !ip->i_gdquot) { error = xfs_qm_dqattach_one(ip, XFS_DQTYPE_GROUP, doalloc, &ip->i_gdquot); if (error) goto done; ASSERT(ip->i_gdquot); } if (XFS_IS_PQUOTA_ON(mp) && !ip->i_pdquot) { error = xfs_qm_dqattach_one(ip, XFS_DQTYPE_PROJ, doalloc, &ip->i_pdquot); if (error) goto done; ASSERT(ip->i_pdquot); } done: /* * Don't worry about the dquots that we may have attached before any * error - they'll get detached later if it has not already been done. */ xfs_assert_ilocked(ip, XFS_ILOCK_EXCL); return error; } int xfs_qm_dqattach( struct xfs_inode *ip) { int error; if (!xfs_qm_need_dqattach(ip)) return 0; xfs_ilock(ip, XFS_ILOCK_EXCL); error = xfs_qm_dqattach_locked(ip, false); xfs_iunlock(ip, XFS_ILOCK_EXCL); return error; } /* * Release dquots (and their references) if any. * The inode should be locked EXCL except when this's called by * xfs_ireclaim. */ void xfs_qm_dqdetach( xfs_inode_t *ip) { if (xfs_is_metadir_inode(ip)) return; if (!(ip->i_udquot || ip->i_gdquot || ip->i_pdquot)) return; trace_xfs_dquot_dqdetach(ip); ASSERT(!xfs_is_quota_inode(&ip->i_mount->m_sb, ip->i_ino)); if (ip->i_udquot) { xfs_qm_dqrele(ip->i_udquot); ip->i_udquot = NULL; } if (ip->i_gdquot) { xfs_qm_dqrele(ip->i_gdquot); ip->i_gdquot = NULL; } if (ip->i_pdquot) { xfs_qm_dqrele(ip->i_pdquot); ip->i_pdquot = NULL; } } struct xfs_qm_isolate { struct list_head buffers; struct list_head dispose; }; static enum lru_status xfs_qm_dquot_isolate( struct list_head *item, struct list_lru_one *lru, void *arg) __releases(&lru->lock) __acquires(&lru->lock) { struct xfs_dquot *dqp = container_of(item, struct xfs_dquot, q_lru); struct xfs_qm_isolate *isol = arg; enum lru_status ret = LRU_SKIP; if (!xfs_dqlock_nowait(dqp)) goto out_miss_busy; /* * If something else is freeing this dquot and hasn't yet removed it * from the LRU, leave it for the freeing task to complete the freeing * process rather than risk it being free from under us here. */ if (dqp->q_flags & XFS_DQFLAG_FREEING) goto out_miss_unlock; /* * If the dquot is pinned or dirty, rotate it to the end of the LRU to * give some time for it to be cleaned before we try to isolate it * again. */ ret = LRU_ROTATE; if (XFS_DQ_IS_DIRTY(dqp) || atomic_read(&dqp->q_pincount) > 0) { goto out_miss_unlock; } /* * This dquot has acquired a reference in the meantime remove it from * the freelist and try again. */ if (dqp->q_nrefs) { xfs_dqunlock(dqp); XFS_STATS_INC(dqp->q_mount, xs_qm_dqwants); trace_xfs_dqreclaim_want(dqp); list_lru_isolate(lru, &dqp->q_lru); XFS_STATS_DEC(dqp->q_mount, xs_qm_dquot_unused); return LRU_REMOVED; } /* * The dquot may still be under IO, in which case the flush lock will be * held. If we can't get the flush lock now, just skip over the dquot as * if it was dirty. */ if (!xfs_dqflock_nowait(dqp)) goto out_miss_unlock; ASSERT(!XFS_DQ_IS_DIRTY(dqp)); xfs_dquot_detach_buf(dqp); xfs_dqfunlock(dqp); /* * Prevent lookups now that we are past the point of no return. */ dqp->q_flags |= XFS_DQFLAG_FREEING; xfs_dqunlock(dqp); ASSERT(dqp->q_nrefs == 0); list_lru_isolate_move(lru, &dqp->q_lru, &isol->dispose); XFS_STATS_DEC(dqp->q_mount, xs_qm_dquot_unused); trace_xfs_dqreclaim_done(dqp); XFS_STATS_INC(dqp->q_mount, xs_qm_dqreclaims); return LRU_REMOVED; out_miss_unlock: xfs_dqunlock(dqp); out_miss_busy: trace_xfs_dqreclaim_busy(dqp); XFS_STATS_INC(dqp->q_mount, xs_qm_dqreclaim_misses); return ret; } static unsigned long xfs_qm_shrink_scan( struct shrinker *shrink, struct shrink_control *sc) { struct xfs_quotainfo *qi = shrink->private_data; struct xfs_qm_isolate isol; unsigned long freed; int error; if ((sc->gfp_mask & (__GFP_FS|__GFP_DIRECT_RECLAIM)) != (__GFP_FS|__GFP_DIRECT_RECLAIM)) return 0; INIT_LIST_HEAD(&isol.buffers); INIT_LIST_HEAD(&isol.dispose); freed = list_lru_shrink_walk(&qi->qi_lru, sc, xfs_qm_dquot_isolate, &isol); error = xfs_buf_delwri_submit(&isol.buffers); if (error) xfs_warn(NULL, "%s: dquot reclaim failed", __func__); while (!list_empty(&isol.dispose)) { struct xfs_dquot *dqp; dqp = list_first_entry(&isol.dispose, struct xfs_dquot, q_lru); list_del_init(&dqp->q_lru); xfs_qm_dqfree_one(dqp); } return freed; } static unsigned long xfs_qm_shrink_count( struct shrinker *shrink, struct shrink_control *sc) { struct xfs_quotainfo *qi = shrink->private_data; return list_lru_shrink_count(&qi->qi_lru, sc); } STATIC void xfs_qm_set_defquota( struct xfs_mount *mp, xfs_dqtype_t type, struct xfs_quotainfo *qinf) { struct xfs_dquot *dqp; struct xfs_def_quota *defq; int error; error = xfs_qm_dqget_uncached(mp, 0, type, &dqp); if (error) return; defq = xfs_get_defquota(qinf, xfs_dquot_type(dqp)); /* * Timers and warnings have been already set, let's just set the * default limits for this quota type */ defq->blk.hard = dqp->q_blk.hardlimit; defq->blk.soft = dqp->q_blk.softlimit; defq->ino.hard = dqp->q_ino.hardlimit; defq->ino.soft = dqp->q_ino.softlimit; defq->rtb.hard = dqp->q_rtb.hardlimit; defq->rtb.soft = dqp->q_rtb.softlimit; xfs_qm_dqdestroy(dqp); } /* Initialize quota time limits from the root dquot. */ static void xfs_qm_init_timelimits( struct xfs_mount *mp, xfs_dqtype_t type) { struct xfs_quotainfo *qinf = mp->m_quotainfo; struct xfs_def_quota *defq; struct xfs_dquot *dqp; int error; defq = xfs_get_defquota(qinf, type); defq->blk.time = XFS_QM_BTIMELIMIT; defq->ino.time = XFS_QM_ITIMELIMIT; defq->rtb.time = XFS_QM_RTBTIMELIMIT; /* * We try to get the limits from the superuser's limits fields. * This is quite hacky, but it is standard quota practice. * * Since we may not have done a quotacheck by this point, just read * the dquot without attaching it to any hashtables or lists. */ error = xfs_qm_dqget_uncached(mp, 0, type, &dqp); if (error) return; /* * The warnings and timers set the grace period given to * a user or group before he or she can not perform any * more writing. If it is zero, a default is used. */ if (dqp->q_blk.timer) defq->blk.time = dqp->q_blk.timer; if (dqp->q_ino.timer) defq->ino.time = dqp->q_ino.timer; if (dqp->q_rtb.timer) defq->rtb.time = dqp->q_rtb.timer; xfs_qm_dqdestroy(dqp); } static int xfs_qm_load_metadir_qinos( struct xfs_mount *mp, struct xfs_quotainfo *qi) { struct xfs_trans *tp; int error; tp = xfs_trans_alloc_empty(mp); error = xfs_dqinode_load_parent(tp, &qi->qi_dirip); if (error == -ENOENT) { /* no quota dir directory, but we'll create one later */ error = 0; goto out_trans; } if (error) goto out_trans; if (XFS_IS_UQUOTA_ON(mp)) { error = xfs_dqinode_load(tp, qi->qi_dirip, XFS_DQTYPE_USER, &qi->qi_uquotaip); if (error && error != -ENOENT) goto out_trans; } if (XFS_IS_GQUOTA_ON(mp)) { error = xfs_dqinode_load(tp, qi->qi_dirip, XFS_DQTYPE_GROUP, &qi->qi_gquotaip); if (error && error != -ENOENT) goto out_trans; } if (XFS_IS_PQUOTA_ON(mp)) { error = xfs_dqinode_load(tp, qi->qi_dirip, XFS_DQTYPE_PROJ, &qi->qi_pquotaip); if (error && error != -ENOENT) goto out_trans; } error = 0; out_trans: xfs_trans_cancel(tp); return error; } /* Create quota inodes in the metadata directory tree. */ STATIC int xfs_qm_create_metadir_qinos( struct xfs_mount *mp, struct xfs_quotainfo *qi) { int error; if (!qi->qi_dirip) { error = xfs_dqinode_mkdir_parent(mp, &qi->qi_dirip); if (error && error != -EEXIST) return error; /* * If the /quotas dirent points to an inode that isn't * loadable, qi_dirip will be NULL but mkdir_parent will return * -EEXIST. In this case the metadir is corrupt, so bail out. */ if (XFS_IS_CORRUPT(mp, qi->qi_dirip == NULL)) return -EFSCORRUPTED; } if (XFS_IS_UQUOTA_ON(mp) && !qi->qi_uquotaip) { error = xfs_dqinode_metadir_create(qi->qi_dirip, XFS_DQTYPE_USER, &qi->qi_uquotaip); if (error) return error; } if (XFS_IS_GQUOTA_ON(mp) && !qi->qi_gquotaip) { error = xfs_dqinode_metadir_create(qi->qi_dirip, XFS_DQTYPE_GROUP, &qi->qi_gquotaip); if (error) return error; } if (XFS_IS_PQUOTA_ON(mp) && !qi->qi_pquotaip) { error = xfs_dqinode_metadir_create(qi->qi_dirip, XFS_DQTYPE_PROJ, &qi->qi_pquotaip); if (error) return error; } return 0; } /* * Add QUOTABIT to sb_versionnum and initialize qflags in preparation for * creating quota files on a metadir filesystem. */ STATIC int xfs_qm_prep_metadir_sb( struct xfs_mount *mp) { struct xfs_trans *tp; int error; error = xfs_trans_alloc(mp, &M_RES(mp)->tr_sb, 0, 0, 0, &tp); if (error) return error; spin_lock(&mp->m_sb_lock); xfs_add_quota(mp); /* qflags will get updated fully _after_ quotacheck */ mp->m_sb.sb_qflags = mp->m_qflags & XFS_ALL_QUOTA_ACCT; spin_unlock(&mp->m_sb_lock); xfs_log_sb(tp); return xfs_trans_commit(tp); } /* * Load existing quota inodes or create them. Since this is a V5 filesystem, * we don't have to deal with the grp/prjquota switcheroo thing from V4. */ STATIC int xfs_qm_init_metadir_qinos( struct xfs_mount *mp) { struct xfs_quotainfo *qi = mp->m_quotainfo; int error; if (!xfs_has_quota(mp)) { error = xfs_qm_prep_metadir_sb(mp); if (error) return error; } error = xfs_qm_load_metadir_qinos(mp, qi); if (error) goto out_err; error = xfs_qm_create_metadir_qinos(mp, qi); if (error) goto out_err; /* The only user of the quota dir inode is online fsck */ #if !IS_ENABLED(CONFIG_XFS_ONLINE_SCRUB) xfs_irele(qi->qi_dirip); qi->qi_dirip = NULL; #endif return 0; out_err: xfs_qm_destroy_quotainos(mp->m_quotainfo); return error; } /* * This initializes all the quota information that's kept in the * mount structure */ STATIC int xfs_qm_init_quotainfo( struct xfs_mount *mp) { struct xfs_quotainfo *qinf; int error; ASSERT(XFS_IS_QUOTA_ON(mp)); qinf = mp->m_quotainfo = kzalloc(sizeof(struct xfs_quotainfo), GFP_KERNEL | __GFP_NOFAIL); error = list_lru_init(&qinf->qi_lru); if (error) goto out_free_qinf; /* * See if quotainodes are setup, and if not, allocate them, * and change the superblock accordingly. */ if (xfs_has_metadir(mp)) error = xfs_qm_init_metadir_qinos(mp); else error = xfs_qm_init_quotainos(mp); if (error) goto out_free_lru; INIT_RADIX_TREE(&qinf->qi_uquota_tree, GFP_KERNEL); INIT_RADIX_TREE(&qinf->qi_gquota_tree, GFP_KERNEL); INIT_RADIX_TREE(&qinf->qi_pquota_tree, GFP_KERNEL); mutex_init(&qinf->qi_tree_lock); /* mutex used to serialize quotaoffs */ mutex_init(&qinf->qi_quotaofflock); /* Precalc some constants */ qinf->qi_dqchunklen = XFS_FSB_TO_BB(mp, XFS_DQUOT_CLUSTER_SIZE_FSB); qinf->qi_dqperchunk = xfs_calc_dquots_per_chunk(qinf->qi_dqchunklen); if (xfs_has_bigtime(mp)) { qinf->qi_expiry_min = xfs_dq_bigtime_to_unix(XFS_DQ_BIGTIME_EXPIRY_MIN); qinf->qi_expiry_max = xfs_dq_bigtime_to_unix(XFS_DQ_BIGTIME_EXPIRY_MAX); } else { qinf->qi_expiry_min = XFS_DQ_LEGACY_EXPIRY_MIN; qinf->qi_expiry_max = XFS_DQ_LEGACY_EXPIRY_MAX; } trace_xfs_quota_expiry_range(mp, qinf->qi_expiry_min, qinf->qi_expiry_max); mp->m_qflags |= (mp->m_sb.sb_qflags & XFS_ALL_QUOTA_CHKD); xfs_qm_init_timelimits(mp, XFS_DQTYPE_USER); xfs_qm_init_timelimits(mp, XFS_DQTYPE_GROUP); xfs_qm_init_timelimits(mp, XFS_DQTYPE_PROJ); if (XFS_IS_UQUOTA_ON(mp)) xfs_qm_set_defquota(mp, XFS_DQTYPE_USER, qinf); if (XFS_IS_GQUOTA_ON(mp)) xfs_qm_set_defquota(mp, XFS_DQTYPE_GROUP, qinf); if (XFS_IS_PQUOTA_ON(mp)) xfs_qm_set_defquota(mp, XFS_DQTYPE_PROJ, qinf); qinf->qi_shrinker = shrinker_alloc(SHRINKER_NUMA_AWARE, "xfs-qm:%s", mp->m_super->s_id); if (!qinf->qi_shrinker) { error = -ENOMEM; goto out_free_inos; } qinf->qi_shrinker->count_objects = xfs_qm_shrink_count; qinf->qi_shrinker->scan_objects = xfs_qm_shrink_scan; qinf->qi_shrinker->private_data = qinf; shrinker_register(qinf->qi_shrinker); xfs_hooks_init(&qinf->qi_mod_ino_dqtrx_hooks); xfs_hooks_init(&qinf->qi_apply_dqtrx_hooks); return 0; out_free_inos: mutex_destroy(&qinf->qi_quotaofflock); mutex_destroy(&qinf->qi_tree_lock); xfs_qm_destroy_quotainos(qinf); out_free_lru: list_lru_destroy(&qinf->qi_lru); out_free_qinf: kfree(qinf); mp->m_quotainfo = NULL; return error; } /* * Gets called when unmounting a filesystem or when all quotas get * turned off. * This purges the quota inodes, destroys locks and frees itself. */ void xfs_qm_destroy_quotainfo( struct xfs_mount *mp) { struct xfs_quotainfo *qi; qi = mp->m_quotainfo; ASSERT(qi != NULL); shrinker_free(qi->qi_shrinker); list_lru_destroy(&qi->qi_lru); xfs_qm_destroy_quotainos(qi); mutex_destroy(&qi->qi_tree_lock); mutex_destroy(&qi->qi_quotaofflock); kfree(qi); mp->m_quotainfo = NULL; } static inline enum xfs_metafile_type xfs_qm_metafile_type( unsigned int flags) { if (flags & XFS_QMOPT_UQUOTA) return XFS_METAFILE_USRQUOTA; else if (flags & XFS_QMOPT_GQUOTA) return XFS_METAFILE_GRPQUOTA; return XFS_METAFILE_PRJQUOTA; } /* * Create an inode and return with a reference already taken, but unlocked * This is how we create quota inodes */ STATIC int xfs_qm_qino_alloc( struct xfs_mount *mp, struct xfs_inode **ipp, unsigned int flags) { struct xfs_trans *tp; enum xfs_metafile_type metafile_type = xfs_qm_metafile_type(flags); int error; bool need_alloc = true; *ipp = NULL; /* * With superblock that doesn't have separate pquotino, we * share an inode between gquota and pquota. If the on-disk * superblock has GQUOTA and the filesystem is now mounted * with PQUOTA, just use sb_gquotino for sb_pquotino and * vice-versa. */ if (!xfs_has_pquotino(mp) && (flags & (XFS_QMOPT_PQUOTA|XFS_QMOPT_GQUOTA))) { xfs_ino_t ino = NULLFSINO; if ((flags & XFS_QMOPT_PQUOTA) && (mp->m_sb.sb_gquotino != NULLFSINO)) { ino = mp->m_sb.sb_gquotino; if (XFS_IS_CORRUPT(mp, mp->m_sb.sb_pquotino != NULLFSINO)) { xfs_fs_mark_sick(mp, XFS_SICK_FS_PQUOTA); return -EFSCORRUPTED; } } else if ((flags & XFS_QMOPT_GQUOTA) && (mp->m_sb.sb_pquotino != NULLFSINO)) { ino = mp->m_sb.sb_pquotino; if (XFS_IS_CORRUPT(mp, mp->m_sb.sb_gquotino != NULLFSINO)) { xfs_fs_mark_sick(mp, XFS_SICK_FS_GQUOTA); return -EFSCORRUPTED; } } if (ino != NULLFSINO) { error = xfs_metafile_iget(mp, ino, metafile_type, ipp); if (error) return error; mp->m_sb.sb_gquotino = NULLFSINO; mp->m_sb.sb_pquotino = NULLFSINO; need_alloc = false; } } error = xfs_trans_alloc(mp, &M_RES(mp)->tr_create, need_alloc ? XFS_QM_QINOCREATE_SPACE_RES(mp) : 0, 0, 0, &tp); if (error) return error; if (need_alloc) { struct xfs_icreate_args args = { .mode = S_IFREG, .flags = XFS_ICREATE_UNLINKABLE, }; xfs_ino_t ino; error = xfs_dialloc(&tp, &args, &ino); if (!error) error = xfs_icreate(tp, ino, &args, ipp); if (error) { xfs_trans_cancel(tp); return error; } if (xfs_has_metadir(mp)) xfs_metafile_set_iflag(tp, *ipp, metafile_type); } /* * Make the changes in the superblock, and log those too. * sbfields arg may contain fields other than *QUOTINO; * VERSIONNUM for example. */ spin_lock(&mp->m_sb_lock); if (flags & XFS_QMOPT_SBVERSION) { ASSERT(!xfs_has_quota(mp)); xfs_add_quota(mp); mp->m_sb.sb_uquotino = NULLFSINO; mp->m_sb.sb_gquotino = NULLFSINO; mp->m_sb.sb_pquotino = NULLFSINO; /* qflags will get updated fully _after_ quotacheck */ mp->m_sb.sb_qflags = mp->m_qflags & XFS_ALL_QUOTA_ACCT; } if (flags & XFS_QMOPT_UQUOTA) mp->m_sb.sb_uquotino = (*ipp)->i_ino; else if (flags & XFS_QMOPT_GQUOTA) mp->m_sb.sb_gquotino = (*ipp)->i_ino; else mp->m_sb.sb_pquotino = (*ipp)->i_ino; spin_unlock(&mp->m_sb_lock); xfs_log_sb(tp); error = xfs_trans_commit(tp); if (error) { ASSERT(xfs_is_shutdown(mp)); xfs_alert(mp, "%s failed (error %d)!", __func__, error); } if (need_alloc) { xfs_iunlock(*ipp, XFS_ILOCK_EXCL); xfs_finish_inode_setup(*ipp); } return error; } STATIC void xfs_qm_reset_dqcounts( struct xfs_mount *mp, struct xfs_buf *bp, xfs_dqid_t id, xfs_dqtype_t type) { struct xfs_dqblk *dqb; int j; trace_xfs_reset_dqcounts(bp, _RET_IP_); /* * Reset all counters and timers. They'll be * started afresh by xfs_qm_quotacheck. */ #ifdef DEBUG j = (int)XFS_FSB_TO_B(mp, XFS_DQUOT_CLUSTER_SIZE_FSB) / sizeof(struct xfs_dqblk); ASSERT(mp->m_quotainfo->qi_dqperchunk == j); #endif dqb = bp->b_addr; for (j = 0; j < mp->m_quotainfo->qi_dqperchunk; j++) { struct xfs_disk_dquot *ddq; ddq = (struct xfs_disk_dquot *)&dqb[j]; /* * Do a sanity check, and if needed, repair the dqblk. Don't * output any warnings because it's perfectly possible to * find uninitialised dquot blks. See comment in * xfs_dquot_verify. */ if (xfs_dqblk_verify(mp, &dqb[j], id + j) || (dqb[j].dd_diskdq.d_type & XFS_DQTYPE_REC_MASK) != type) xfs_dqblk_repair(mp, &dqb[j], id + j, type); /* * Reset type in case we are reusing group quota file for * project quotas or vice versa */ ddq->d_type = type; ddq->d_bcount = 0; ddq->d_icount = 0; ddq->d_rtbcount = 0; /* * dquot id 0 stores the default grace period and the maximum * warning limit that were set by the administrator, so we * should not reset them. */ if (ddq->d_id != 0) { ddq->d_btimer = 0; ddq->d_itimer = 0; ddq->d_rtbtimer = 0; ddq->d_bwarns = 0; ddq->d_iwarns = 0; ddq->d_rtbwarns = 0; if (xfs_has_bigtime(mp)) ddq->d_type |= XFS_DQTYPE_BIGTIME; } if (xfs_has_crc(mp)) { xfs_update_cksum((char *)&dqb[j], sizeof(struct xfs_dqblk), XFS_DQUOT_CRC_OFF); } } } STATIC int xfs_qm_reset_dqcounts_all( struct xfs_mount *mp, xfs_dqid_t firstid, xfs_fsblock_t bno, xfs_filblks_t blkcnt, xfs_dqtype_t type, struct list_head *buffer_list) { struct xfs_buf *bp; int error = 0; ASSERT(blkcnt > 0); /* * Blkcnt arg can be a very big number, and might even be * larger than the log itself. So, we have to break it up into * manageable-sized transactions. * Note that we don't start a permanent transaction here; we might * not be able to get a log reservation for the whole thing up front, * and we don't really care to either, because we just discard * everything if we were to crash in the middle of this loop. */ while (blkcnt--) { error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp, XFS_FSB_TO_DADDR(mp, bno), mp->m_quotainfo->qi_dqchunklen, 0, &bp, &xfs_dquot_buf_ops); /* * CRC and validation errors will return a EFSCORRUPTED here. If * this occurs, re-read without CRC validation so that we can * repair the damage via xfs_qm_reset_dqcounts(). This process * will leave a trace in the log indicating corruption has * been detected. */ if (error == -EFSCORRUPTED) { error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp, XFS_FSB_TO_DADDR(mp, bno), mp->m_quotainfo->qi_dqchunklen, 0, &bp, NULL); } if (error) break; /* * A corrupt buffer might not have a verifier attached, so * make sure we have the correct one attached before writeback * occurs. */ bp->b_ops = &xfs_dquot_buf_ops; xfs_qm_reset_dqcounts(mp, bp, firstid, type); xfs_buf_delwri_queue(bp, buffer_list); xfs_buf_relse(bp); /* goto the next block. */ bno++; firstid += mp->m_quotainfo->qi_dqperchunk; } return error; } /* * Iterate over all allocated dquot blocks in this quota inode, zeroing all * counters for every chunk of dquots that we find. */ STATIC int xfs_qm_reset_dqcounts_buf( struct xfs_mount *mp, struct xfs_inode *qip, xfs_dqtype_t type, struct list_head *buffer_list) { struct xfs_bmbt_irec *map; int i, nmaps; /* number of map entries */ int error; /* return value */ xfs_fileoff_t lblkno; xfs_filblks_t maxlblkcnt; xfs_dqid_t firstid; xfs_fsblock_t rablkno; xfs_filblks_t rablkcnt; error = 0; /* * This looks racy, but we can't keep an inode lock across a * trans_reserve. But, this gets called during quotacheck, and that * happens only at mount time which is single threaded. */ if (qip->i_nblocks == 0) return 0; map = kmalloc(XFS_DQITER_MAP_SIZE * sizeof(*map), GFP_KERNEL | __GFP_NOFAIL); lblkno = 0; maxlblkcnt = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes); do { uint lock_mode; nmaps = XFS_DQITER_MAP_SIZE; /* * We aren't changing the inode itself. Just changing * some of its data. No new blocks are added here, and * the inode is never added to the transaction. */ lock_mode = xfs_ilock_data_map_shared(qip); error = xfs_bmapi_read(qip, lblkno, maxlblkcnt - lblkno, map, &nmaps, 0); xfs_iunlock(qip, lock_mode); if (error) break; ASSERT(nmaps <= XFS_DQITER_MAP_SIZE); for (i = 0; i < nmaps; i++) { ASSERT(map[i].br_startblock != DELAYSTARTBLOCK); ASSERT(map[i].br_blockcount); lblkno += map[i].br_blockcount; if (map[i].br_startblock == HOLESTARTBLOCK) continue; firstid = (xfs_dqid_t) map[i].br_startoff * mp->m_quotainfo->qi_dqperchunk; /* * Do a read-ahead on the next extent. */ if ((i+1 < nmaps) && (map[i+1].br_startblock != HOLESTARTBLOCK)) { rablkcnt = map[i+1].br_blockcount; rablkno = map[i+1].br_startblock; while (rablkcnt--) { xfs_buf_readahead(mp->m_ddev_targp, XFS_FSB_TO_DADDR(mp, rablkno), mp->m_quotainfo->qi_dqchunklen, &xfs_dquot_buf_ops); rablkno++; } } /* * Iterate thru all the blks in the extent and * reset the counters of all the dquots inside them. */ error = xfs_qm_reset_dqcounts_all(mp, firstid, map[i].br_startblock, map[i].br_blockcount, type, buffer_list); if (error) goto out; } } while (nmaps > 0); out: kfree(map); return error; } /* * Called by dqusage_adjust in doing a quotacheck. * * Given the inode, and a dquot id this updates both the incore dqout as well * as the buffer copy. This is so that once the quotacheck is done, we can * just log all the buffers, as opposed to logging numerous updates to * individual dquots. */ STATIC int xfs_qm_quotacheck_dqadjust( struct xfs_inode *ip, xfs_dqtype_t type, xfs_qcnt_t nblks, xfs_qcnt_t rtblks) { struct xfs_mount *mp = ip->i_mount; struct xfs_dquot *dqp; xfs_dqid_t id; int error; id = xfs_qm_id_for_quotatype(ip, type); error = xfs_qm_dqget(mp, id, type, true, &dqp); if (error) { /* * Shouldn't be able to turn off quotas here. */ ASSERT(error != -ESRCH); ASSERT(error != -ENOENT); return error; } error = xfs_dquot_attach_buf(NULL, dqp); if (error) return error; trace_xfs_dqadjust(dqp); /* * Adjust the inode count and the block count to reflect this inode's * resource usage. */ dqp->q_ino.count++; dqp->q_ino.reserved++; if (nblks) { dqp->q_blk.count += nblks; dqp->q_blk.reserved += nblks; } if (rtblks) { dqp->q_rtb.count += rtblks; dqp->q_rtb.reserved += rtblks; } /* * Set default limits, adjust timers (since we changed usages) * * There are no timers for the default values set in the root dquot. */ if (dqp->q_id) { xfs_qm_adjust_dqlimits(dqp); xfs_qm_adjust_dqtimers(dqp); } dqp->q_flags |= XFS_DQFLAG_DIRTY; xfs_qm_dqput(dqp); return 0; } /* * callback routine supplied to bulkstat(). Given an inumber, find its * dquots and update them to account for resources taken by that inode. */ /* ARGSUSED */ STATIC int xfs_qm_dqusage_adjust( struct xfs_mount *mp, struct xfs_trans *tp, xfs_ino_t ino, void *data) { struct xfs_inode *ip; xfs_filblks_t nblks, rtblks; unsigned int lock_mode; int error; ASSERT(XFS_IS_QUOTA_ON(mp)); /* * rootino must have its resources accounted for, not so with the quota * inodes. */ if (xfs_is_quota_inode(&mp->m_sb, ino)) return 0; /* * We don't _need_ to take the ilock EXCL here because quotacheck runs * at mount time and therefore nobody will be racing chown/chproj. */ error = xfs_iget(mp, tp, ino, XFS_IGET_DONTCACHE, 0, &ip); if (error == -EINVAL || error == -ENOENT) return 0; if (error) return error; /* * Reload the incore unlinked list to avoid failure in inodegc. * Use an unlocked check here because unrecovered unlinked inodes * should be somewhat rare. */ if (xfs_inode_unlinked_incomplete(ip)) { error = xfs_inode_reload_unlinked(ip); if (error) { xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); goto error0; } } /* Metadata directory files are not accounted to user-visible quotas. */ if (xfs_is_metadir_inode(ip)) goto error0; ASSERT(ip->i_delayed_blks == 0); lock_mode = xfs_ilock_data_map_shared(ip); if (XFS_IS_REALTIME_INODE(ip)) { error = xfs_iread_extents(tp, ip, XFS_DATA_FORK); if (error) { xfs_iunlock(ip, lock_mode); goto error0; } } xfs_inode_count_blocks(tp, ip, &nblks, &rtblks); xfs_iflags_clear(ip, XFS_IQUOTAUNCHECKED); xfs_iunlock(ip, lock_mode); /* * Add the (disk blocks and inode) resources occupied by this * inode to its dquots. We do this adjustment in the incore dquot, * and also copy the changes to its buffer. * We don't care about putting these changes in a transaction * envelope because if we crash in the middle of a 'quotacheck' * we have to start from the beginning anyway. * Once we're done, we'll log all the dquot bufs. * * The *QUOTA_ON checks below may look pretty racy, but quotachecks * and quotaoffs don't race. (Quotachecks happen at mount time only). */ if (XFS_IS_UQUOTA_ON(mp)) { error = xfs_qm_quotacheck_dqadjust(ip, XFS_DQTYPE_USER, nblks, rtblks); if (error) goto error0; } if (XFS_IS_GQUOTA_ON(mp)) { error = xfs_qm_quotacheck_dqadjust(ip, XFS_DQTYPE_GROUP, nblks, rtblks); if (error) goto error0; } if (XFS_IS_PQUOTA_ON(mp)) { error = xfs_qm_quotacheck_dqadjust(ip, XFS_DQTYPE_PROJ, nblks, rtblks); if (error) goto error0; } error0: xfs_irele(ip); return error; } STATIC int xfs_qm_flush_one( struct xfs_dquot *dqp, void *data) { struct list_head *buffer_list = data; struct xfs_buf *bp = NULL; int error = 0; xfs_dqlock(dqp); if (dqp->q_flags & XFS_DQFLAG_FREEING) goto out_unlock; if (!XFS_DQ_IS_DIRTY(dqp)) goto out_unlock; xfs_qm_dqunpin_wait(dqp); xfs_dqflock(dqp); error = xfs_dquot_use_attached_buf(dqp, &bp); if (error) goto out_unlock; if (!bp) { error = -EFSCORRUPTED; goto out_unlock; } error = xfs_qm_dqflush(dqp, bp); if (!error) xfs_buf_delwri_queue(bp, buffer_list); xfs_buf_relse(bp); out_unlock: xfs_dqunlock(dqp); return error; } /* * Walk thru all the filesystem inodes and construct a consistent view * of the disk quota world. If the quotacheck fails, disable quotas. */ STATIC int xfs_qm_quotacheck( xfs_mount_t *mp) { int error, error2; uint flags; LIST_HEAD (buffer_list); struct xfs_inode *uip = mp->m_quotainfo->qi_uquotaip; struct xfs_inode *gip = mp->m_quotainfo->qi_gquotaip; struct xfs_inode *pip = mp->m_quotainfo->qi_pquotaip; flags = 0; ASSERT(uip || gip || pip); ASSERT(XFS_IS_QUOTA_ON(mp)); xfs_notice(mp, "Quotacheck needed: Please wait."); /* * First we go thru all the dquots on disk, USR and GRP/PRJ, and reset * their counters to zero. We need a clean slate. * We don't log our changes till later. */ if (uip) { error = xfs_qm_reset_dqcounts_buf(mp, uip, XFS_DQTYPE_USER, &buffer_list); if (error) goto error_return; flags |= XFS_UQUOTA_CHKD; } if (gip) { error = xfs_qm_reset_dqcounts_buf(mp, gip, XFS_DQTYPE_GROUP, &buffer_list); if (error) goto error_return; flags |= XFS_GQUOTA_CHKD; } if (pip) { error = xfs_qm_reset_dqcounts_buf(mp, pip, XFS_DQTYPE_PROJ, &buffer_list); if (error) goto error_return; flags |= XFS_PQUOTA_CHKD; } xfs_set_quotacheck_running(mp); error = xfs_iwalk_threaded(mp, 0, 0, xfs_qm_dqusage_adjust, 0, true, NULL); xfs_clear_quotacheck_running(mp); /* * On error, the inode walk may have partially populated the dquot * caches. We must purge them before disabling quota and tearing down * the quotainfo, or else the dquots will leak. */ if (error) goto error_purge; /* * We've made all the changes that we need to make incore. Flush them * down to disk buffers if everything was updated successfully. */ if (XFS_IS_UQUOTA_ON(mp)) { error = xfs_qm_dquot_walk(mp, XFS_DQTYPE_USER, xfs_qm_flush_one, &buffer_list); } if (XFS_IS_GQUOTA_ON(mp)) { error2 = xfs_qm_dquot_walk(mp, XFS_DQTYPE_GROUP, xfs_qm_flush_one, &buffer_list); if (!error) error = error2; } if (XFS_IS_PQUOTA_ON(mp)) { error2 = xfs_qm_dquot_walk(mp, XFS_DQTYPE_PROJ, xfs_qm_flush_one, &buffer_list); if (!error) error = error2; } error2 = xfs_buf_delwri_submit(&buffer_list); if (!error) error = error2; /* * We can get this error if we couldn't do a dquot allocation inside * xfs_qm_dqusage_adjust (via bulkstat). We don't care about the * dirty dquots that might be cached, we just want to get rid of them * and turn quotaoff. The dquots won't be attached to any of the inodes * at this point (because we intentionally didn't in dqget_noattach). */ if (error) goto error_purge; /* * If one type of quotas is off, then it will lose its * quotachecked status, since we won't be doing accounting for * that type anymore. */ mp->m_qflags &= ~XFS_ALL_QUOTA_CHKD; mp->m_qflags |= flags; error_return: xfs_buf_delwri_cancel(&buffer_list); if (error) { xfs_warn(mp, "Quotacheck: Unsuccessful (Error %d): Disabling quotas.", error); /* * We must turn off quotas. */ ASSERT(mp->m_quotainfo != NULL); xfs_qm_destroy_quotainfo(mp); if (xfs_mount_reset_sbqflags(mp)) { xfs_warn(mp, "Quotacheck: Failed to reset quota flags."); } xfs_fs_mark_sick(mp, XFS_SICK_FS_QUOTACHECK); } else { xfs_notice(mp, "Quotacheck: Done."); xfs_fs_mark_healthy(mp, XFS_SICK_FS_QUOTACHECK); } return error; error_purge: /* * On error, we may have inodes queued for inactivation. This may try * to attach dquots to the inode before running cleanup operations on * the inode and this can race with the xfs_qm_destroy_quotainfo() call * below that frees mp->m_quotainfo. To avoid this race, flush all the * pending inodegc operations before we purge the dquots from memory, * ensuring that background inactivation is idle whilst we turn off * quotas. */ xfs_inodegc_flush(mp); xfs_qm_dqpurge_all(mp); goto error_return; } /* * This is called from xfs_mountfs to start quotas and initialize all * necessary data structures like quotainfo. This is also responsible for * running a quotacheck as necessary. We are guaranteed that the superblock * is consistently read in at this point. * * If we fail here, the mount will continue with quota turned off. We don't * need to inidicate success or failure at all. */ void xfs_qm_mount_quotas( struct xfs_mount *mp) { int error = 0; uint sbf; /* * If quotas on realtime volumes is not supported, disable quotas * immediately. We only support rtquota if rtgroups are enabled to * avoid problems with older kernels. */ if (mp->m_sb.sb_rextents && (!xfs_has_rtgroups(mp) || xfs_has_zoned(mp))) { xfs_notice(mp, "Cannot turn on quotas for realtime filesystem"); mp->m_qflags = 0; goto write_changes; } ASSERT(XFS_IS_QUOTA_ON(mp)); /* * Allocate the quotainfo structure inside the mount struct, and * create quotainode(s), and change/rev superblock if necessary. */ error = xfs_qm_init_quotainfo(mp); if (error) { /* * We must turn off quotas. */ ASSERT(mp->m_quotainfo == NULL); mp->m_qflags = 0; goto write_changes; } /* * If any of the quotas are not consistent, do a quotacheck. */ if (XFS_QM_NEED_QUOTACHECK(mp)) { error = xfs_qm_quotacheck(mp); if (error) { /* Quotacheck failed and disabled quotas. */ return; } } /* * If one type of quotas is off, then it will lose its * quotachecked status, since we won't be doing accounting for * that type anymore. */ if (!XFS_IS_UQUOTA_ON(mp)) mp->m_qflags &= ~XFS_UQUOTA_CHKD; if (!XFS_IS_GQUOTA_ON(mp)) mp->m_qflags &= ~XFS_GQUOTA_CHKD; if (!XFS_IS_PQUOTA_ON(mp)) mp->m_qflags &= ~XFS_PQUOTA_CHKD; write_changes: /* * We actually don't have to acquire the m_sb_lock at all. * This can only be called from mount, and that's single threaded. XXX */ spin_lock(&mp->m_sb_lock); sbf = mp->m_sb.sb_qflags; mp->m_sb.sb_qflags = mp->m_qflags & XFS_MOUNT_QUOTA_ALL; spin_unlock(&mp->m_sb_lock); if (sbf != (mp->m_qflags & XFS_MOUNT_QUOTA_ALL)) { if (xfs_sync_sb(mp, false)) { /* * We could only have been turning quotas off. * We aren't in very good shape actually because * the incore structures are convinced that quotas are * off, but the on disk superblock doesn't know that ! */ ASSERT(!(XFS_IS_QUOTA_ON(mp))); xfs_alert(mp, "%s: Superblock update failed!", __func__); } } if (error) { xfs_warn(mp, "Failed to initialize disk quotas, err %d.", error); return; } } /* * Load the inode for a given type of quota, assuming that the sb fields have * been sorted out. This is not true when switching quota types on a V4 * filesystem, so do not use this function for that. * * Returns -ENOENT if the quota inode field is NULLFSINO; 0 and an inode on * success; or a negative errno. */ int xfs_qm_qino_load( struct xfs_mount *mp, xfs_dqtype_t type, struct xfs_inode **ipp) { struct xfs_trans *tp; struct xfs_inode *dp = NULL; int error; tp = xfs_trans_alloc_empty(mp); if (xfs_has_metadir(mp)) { error = xfs_dqinode_load_parent(tp, &dp); if (error) goto out_cancel; } error = xfs_dqinode_load(tp, dp, type, ipp); if (dp) xfs_irele(dp); out_cancel: xfs_trans_cancel(tp); return error; } /* * This is called after the superblock has been read in and we're ready to * iget the quota inodes. */ STATIC int xfs_qm_init_quotainos( xfs_mount_t *mp) { struct xfs_inode *uip = NULL; struct xfs_inode *gip = NULL; struct xfs_inode *pip = NULL; int error; uint flags = 0; ASSERT(mp->m_quotainfo); /* * Get the uquota and gquota inodes */ if (xfs_has_quota(mp)) { if (XFS_IS_UQUOTA_ON(mp) && mp->m_sb.sb_uquotino != NULLFSINO) { ASSERT(mp->m_sb.sb_uquotino > 0); error = xfs_qm_qino_load(mp, XFS_DQTYPE_USER, &uip); if (error) return error; } if (XFS_IS_GQUOTA_ON(mp) && mp->m_sb.sb_gquotino != NULLFSINO) { ASSERT(mp->m_sb.sb_gquotino > 0); error = xfs_qm_qino_load(mp, XFS_DQTYPE_GROUP, &gip); if (error) goto error_rele; } if (XFS_IS_PQUOTA_ON(mp) && mp->m_sb.sb_pquotino != NULLFSINO) { ASSERT(mp->m_sb.sb_pquotino > 0); error = xfs_qm_qino_load(mp, XFS_DQTYPE_PROJ, &pip); if (error) goto error_rele; } } else { flags |= XFS_QMOPT_SBVERSION; } /* * Create the three inodes, if they don't exist already. The changes * made above will get added to a transaction and logged in one of * the qino_alloc calls below. If the device is readonly, * temporarily switch to read-write to do this. */ if (XFS_IS_UQUOTA_ON(mp) && uip == NULL) { error = xfs_qm_qino_alloc(mp, &uip, flags | XFS_QMOPT_UQUOTA); if (error) goto error_rele; flags &= ~XFS_QMOPT_SBVERSION; } if (XFS_IS_GQUOTA_ON(mp) && gip == NULL) { error = xfs_qm_qino_alloc(mp, &gip, flags | XFS_QMOPT_GQUOTA); if (error) goto error_rele; flags &= ~XFS_QMOPT_SBVERSION; } if (XFS_IS_PQUOTA_ON(mp) && pip == NULL) { error = xfs_qm_qino_alloc(mp, &pip, flags | XFS_QMOPT_PQUOTA); if (error) goto error_rele; } mp->m_quotainfo->qi_uquotaip = uip; mp->m_quotainfo->qi_gquotaip = gip; mp->m_quotainfo->qi_pquotaip = pip; return 0; error_rele: if (uip) xfs_irele(uip); if (gip) xfs_irele(gip); if (pip) xfs_irele(pip); return error; } STATIC void xfs_qm_dqfree_one( struct xfs_dquot *dqp) { struct xfs_mount *mp = dqp->q_mount; struct xfs_quotainfo *qi = mp->m_quotainfo; mutex_lock(&qi->qi_tree_lock); radix_tree_delete(xfs_dquot_tree(qi, xfs_dquot_type(dqp)), dqp->q_id); qi->qi_dquots--; mutex_unlock(&qi->qi_tree_lock); xfs_qm_dqdestroy(dqp); } /* --------------- utility functions for vnodeops ---------------- */ /* * Given an inode, a uid, gid and prid make sure that we have * allocated relevant dquot(s) on disk, and that we won't exceed inode * quotas by creating this file. * This also attaches dquot(s) to the given inode after locking it, * and returns the dquots corresponding to the uid and/or gid. * * in : inode (unlocked) * out : udquot, gdquot with references taken and unlocked */ int xfs_qm_vop_dqalloc( struct xfs_inode *ip, kuid_t uid, kgid_t gid, prid_t prid, uint flags, struct xfs_dquot **O_udqpp, struct xfs_dquot **O_gdqpp, struct xfs_dquot **O_pdqpp) { struct xfs_mount *mp = ip->i_mount; struct inode *inode = VFS_I(ip); struct user_namespace *user_ns = inode->i_sb->s_user_ns; struct xfs_dquot *uq = NULL; struct xfs_dquot *gq = NULL; struct xfs_dquot *pq = NULL; int error; uint lockflags; if (!XFS_IS_QUOTA_ON(mp)) return 0; ASSERT(!xfs_is_metadir_inode(ip)); lockflags = XFS_ILOCK_EXCL; xfs_ilock(ip, lockflags); if ((flags & XFS_QMOPT_INHERIT) && XFS_INHERIT_GID(ip)) gid = inode->i_gid; /* * Attach the dquot(s) to this inode, doing a dquot allocation * if necessary. The dquot(s) will not be locked. */ if (XFS_NOT_DQATTACHED(mp, ip)) { error = xfs_qm_dqattach_locked(ip, true); if (error) { xfs_iunlock(ip, lockflags); return error; } } if ((flags & XFS_QMOPT_UQUOTA) && XFS_IS_UQUOTA_ON(mp)) { ASSERT(O_udqpp); if (!uid_eq(inode->i_uid, uid)) { /* * What we need is the dquot that has this uid, and * if we send the inode to dqget, the uid of the inode * takes priority over what's sent in the uid argument. * We must unlock inode here before calling dqget if * we're not sending the inode, because otherwise * we'll deadlock by doing trans_reserve while * holding ilock. */ xfs_iunlock(ip, lockflags); error = xfs_qm_dqget(mp, from_kuid(user_ns, uid), XFS_DQTYPE_USER, true, &uq); if (error) { ASSERT(error != -ENOENT); return error; } /* * Get the ilock in the right order. */ xfs_dqunlock(uq); lockflags = XFS_ILOCK_SHARED; xfs_ilock(ip, lockflags); } else { /* * Take an extra reference, because we'll return * this to caller */ ASSERT(ip->i_udquot); uq = xfs_qm_dqhold(ip->i_udquot); } } if ((flags & XFS_QMOPT_GQUOTA) && XFS_IS_GQUOTA_ON(mp)) { ASSERT(O_gdqpp); if (!gid_eq(inode->i_gid, gid)) { xfs_iunlock(ip, lockflags); error = xfs_qm_dqget(mp, from_kgid(user_ns, gid), XFS_DQTYPE_GROUP, true, &gq); if (error) { ASSERT(error != -ENOENT); goto error_rele; } xfs_dqunlock(gq); lockflags = XFS_ILOCK_SHARED; xfs_ilock(ip, lockflags); } else { ASSERT(ip->i_gdquot); gq = xfs_qm_dqhold(ip->i_gdquot); } } if ((flags & XFS_QMOPT_PQUOTA) && XFS_IS_PQUOTA_ON(mp)) { ASSERT(O_pdqpp); if (ip->i_projid != prid) { xfs_iunlock(ip, lockflags); error = xfs_qm_dqget(mp, prid, XFS_DQTYPE_PROJ, true, &pq); if (error) { ASSERT(error != -ENOENT); goto error_rele; } xfs_dqunlock(pq); lockflags = XFS_ILOCK_SHARED; xfs_ilock(ip, lockflags); } else { ASSERT(ip->i_pdquot); pq = xfs_qm_dqhold(ip->i_pdquot); } } trace_xfs_dquot_dqalloc(ip); xfs_iunlock(ip, lockflags); if (O_udqpp) *O_udqpp = uq; else xfs_qm_dqrele(uq); if (O_gdqpp) *O_gdqpp = gq; else xfs_qm_dqrele(gq); if (O_pdqpp) *O_pdqpp = pq; else xfs_qm_dqrele(pq); return 0; error_rele: xfs_qm_dqrele(gq); xfs_qm_dqrele(uq); return error; } /* * Actually transfer ownership, and do dquot modifications. * These were already reserved. */ struct xfs_dquot * xfs_qm_vop_chown( struct xfs_trans *tp, struct xfs_inode *ip, struct xfs_dquot **IO_olddq, struct xfs_dquot *newdq) { struct xfs_dquot *prevdq; xfs_filblks_t dblocks, rblocks; bool isrt = XFS_IS_REALTIME_INODE(ip); xfs_assert_ilocked(ip, XFS_ILOCK_EXCL); ASSERT(XFS_IS_QUOTA_ON(ip->i_mount)); ASSERT(!xfs_is_metadir_inode(ip)); /* old dquot */ prevdq = *IO_olddq; ASSERT(prevdq); ASSERT(prevdq != newdq); xfs_inode_count_blocks(tp, ip, &dblocks, &rblocks); xfs_trans_mod_ino_dquot(tp, ip, prevdq, XFS_TRANS_DQ_BCOUNT, -(xfs_qcnt_t)dblocks); xfs_trans_mod_ino_dquot(tp, ip, prevdq, XFS_TRANS_DQ_RTBCOUNT, -(xfs_qcnt_t)rblocks); xfs_trans_mod_ino_dquot(tp, ip, prevdq, XFS_TRANS_DQ_ICOUNT, -1); /* the sparkling new dquot */ xfs_trans_mod_ino_dquot(tp, ip, newdq, XFS_TRANS_DQ_BCOUNT, dblocks); xfs_trans_mod_ino_dquot(tp, ip, newdq, XFS_TRANS_DQ_RTBCOUNT, rblocks); xfs_trans_mod_ino_dquot(tp, ip, newdq, XFS_TRANS_DQ_ICOUNT, 1); /* * Back when we made quota reservations for the chown, we reserved the * ondisk blocks + delalloc blocks with the new dquot. Now that we've * switched the dquots, decrease the new dquot's block reservation * (having already bumped up the real counter) so that we don't have * any reservation to give back when we commit. */ xfs_trans_mod_dquot(tp, newdq, isrt ? XFS_TRANS_DQ_RES_RTBLKS : XFS_TRANS_DQ_RES_BLKS, -ip->i_delayed_blks); /* * Give the incore reservation for delalloc blocks back to the old * dquot. We don't normally handle delalloc quota reservations * transactionally, so just lock the dquot and subtract from the * reservation. Dirty the transaction because it's too late to turn * back now. */ tp->t_flags |= XFS_TRANS_DIRTY; xfs_dqlock(prevdq); if (isrt) { ASSERT(prevdq->q_rtb.reserved >= ip->i_delayed_blks); prevdq->q_rtb.reserved -= ip->i_delayed_blks; } else { ASSERT(prevdq->q_blk.reserved >= ip->i_delayed_blks); prevdq->q_blk.reserved -= ip->i_delayed_blks; } xfs_dqunlock(prevdq); /* * Take an extra reference, because the inode is going to keep * this dquot pointer even after the trans_commit. */ *IO_olddq = xfs_qm_dqhold(newdq); return prevdq; } int xfs_qm_vop_rename_dqattach( struct xfs_inode **i_tab) { struct xfs_mount *mp = i_tab[0]->i_mount; int i; if (!XFS_IS_QUOTA_ON(mp)) return 0; for (i = 0; (i < 4 && i_tab[i]); i++) { struct xfs_inode *ip = i_tab[i]; int error; /* * Watch out for duplicate entries in the table. */ if (i == 0 || ip != i_tab[i-1]) { if (XFS_NOT_DQATTACHED(mp, ip)) { error = xfs_qm_dqattach(ip); if (error) return error; } } } return 0; } void xfs_qm_vop_create_dqattach( struct xfs_trans *tp, struct xfs_inode *ip, struct xfs_dquot *udqp, struct xfs_dquot *gdqp, struct xfs_dquot *pdqp) { struct xfs_mount *mp = tp->t_mountp; if (!XFS_IS_QUOTA_ON(mp)) return; xfs_assert_ilocked(ip, XFS_ILOCK_EXCL); ASSERT(!xfs_is_metadir_inode(ip)); if (udqp && XFS_IS_UQUOTA_ON(mp)) { ASSERT(ip->i_udquot == NULL); ASSERT(i_uid_read(VFS_I(ip)) == udqp->q_id); ip->i_udquot = xfs_qm_dqhold(udqp); } if (gdqp && XFS_IS_GQUOTA_ON(mp)) { ASSERT(ip->i_gdquot == NULL); ASSERT(i_gid_read(VFS_I(ip)) == gdqp->q_id); ip->i_gdquot = xfs_qm_dqhold(gdqp); } if (pdqp && XFS_IS_PQUOTA_ON(mp)) { ASSERT(ip->i_pdquot == NULL); ASSERT(ip->i_projid == pdqp->q_id); ip->i_pdquot = xfs_qm_dqhold(pdqp); } xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_ICOUNT, 1); } /* Decide if this inode's dquot is near an enforcement boundary. */ bool xfs_inode_near_dquot_enforcement( struct xfs_inode *ip, xfs_dqtype_t type) { struct xfs_dquot *dqp; struct xfs_dquot_res *res; struct xfs_dquot_pre *pre; int64_t freesp; /* We only care for quotas that are enabled and enforced. */ dqp = xfs_inode_dquot(ip, type); if (!dqp || !xfs_dquot_is_enforced(dqp)) return false; if (xfs_dquot_res_over_limits(&dqp->q_ino) || xfs_dquot_res_over_limits(&dqp->q_blk) || xfs_dquot_res_over_limits(&dqp->q_rtb)) return true; if (XFS_IS_REALTIME_INODE(ip)) { res = &dqp->q_rtb; pre = &dqp->q_rtb_prealloc; } else { res = &dqp->q_blk; pre = &dqp->q_blk_prealloc; } /* For space on the data device, check the various thresholds. */ if (!pre->q_prealloc_hi_wmark) return false; if (res->reserved < pre->q_prealloc_lo_wmark) return false; if (res->reserved >= pre->q_prealloc_hi_wmark) return true; freesp = pre->q_prealloc_hi_wmark - res->reserved; if (freesp < pre->q_low_space[XFS_QLOWSP_5_PCNT]) return true; return false; } |
| 5 2 3 1 1 1 1 1 1 1 1 3 2 2 2 3 2 3 3 2 3 3 3 3 3 3 3 2 2 3 5 4 4 4 3 3 3 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 3422 3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434 3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456 3457 3458 3459 3460 3461 3462 3463 3464 3465 3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479 3480 3481 3482 3483 3484 3485 3486 3487 3488 3489 3490 3491 3492 3493 3494 3495 3496 3497 3498 3499 3500 3501 3502 3503 3504 3505 3506 3507 3508 3509 3510 3511 3512 3513 3514 3515 3516 3517 3518 3519 3520 3521 3522 3523 3524 3525 3526 3527 3528 3529 3530 3531 3532 3533 3534 3535 3536 3537 3538 3539 3540 3541 3542 3543 3544 3545 3546 3547 3548 3549 3550 3551 3552 3553 3554 3555 3556 3557 3558 3559 3560 3561 3562 3563 3564 3565 3566 3567 3568 3569 3570 3571 3572 3573 3574 3575 3576 3577 3578 3579 3580 3581 3582 3583 3584 3585 3586 3587 3588 3589 3590 3591 3592 3593 3594 3595 3596 3597 3598 3599 3600 3601 3602 3603 3604 3605 3606 3607 3608 3609 3610 3611 3612 3613 3614 3615 3616 3617 3618 3619 3620 3621 3622 3623 3624 3625 3626 3627 3628 3629 3630 3631 3632 3633 3634 3635 3636 3637 3638 3639 3640 3641 3642 3643 3644 3645 3646 3647 3648 3649 3650 3651 3652 3653 3654 3655 3656 3657 3658 3659 3660 3661 3662 3663 3664 3665 3666 3667 3668 3669 3670 3671 3672 3673 3674 3675 3676 3677 3678 3679 3680 3681 3682 3683 3684 3685 3686 3687 3688 3689 3690 3691 3692 3693 3694 3695 3696 3697 3698 3699 3700 3701 3702 3703 3704 3705 3706 3707 3708 3709 3710 3711 3712 3713 3714 3715 3716 3717 3718 3719 3720 3721 3722 3723 3724 3725 3726 3727 3728 3729 3730 3731 3732 3733 3734 3735 3736 3737 3738 3739 3740 3741 3742 3743 3744 3745 3746 3747 3748 3749 3750 3751 3752 3753 3754 3755 3756 3757 3758 3759 3760 3761 3762 3763 3764 3765 3766 3767 3768 3769 3770 3771 3772 3773 3774 3775 3776 3777 3778 3779 3780 3781 3782 3783 3784 3785 3786 3787 3788 3789 3790 3791 3792 3793 3794 3795 3796 3797 3798 3799 3800 3801 3802 3803 3804 3805 3806 3807 3808 3809 3810 3811 3812 3813 3814 3815 3816 3817 3818 3819 3820 3821 3822 3823 3824 3825 3826 3827 3828 3829 3830 3831 3832 3833 3834 3835 3836 3837 3838 3839 3840 3841 3842 3843 3844 3845 3846 3847 3848 3849 3850 3851 3852 3853 3854 3855 3856 3857 3858 3859 3860 3861 3862 3863 3864 3865 3866 3867 3868 3869 3870 3871 3872 3873 3874 3875 3876 3877 3878 3879 3880 3881 3882 3883 3884 3885 3886 3887 3888 3889 3890 3891 3892 3893 3894 3895 3896 3897 3898 3899 3900 3901 3902 3903 3904 3905 3906 3907 3908 3909 3910 3911 3912 3913 3914 3915 3916 3917 3918 3919 3920 3921 3922 3923 3924 3925 3926 3927 3928 3929 3930 3931 3932 3933 3934 3935 3936 3937 3938 3939 3940 3941 3942 3943 3944 3945 3946 3947 3948 3949 3950 3951 3952 3953 3954 3955 3956 3957 3958 3959 3960 3961 3962 3963 3964 3965 3966 3967 3968 3969 3970 3971 3972 3973 3974 3975 3976 3977 3978 3979 3980 3981 3982 3983 3984 3985 3986 3987 3988 3989 3990 3991 3992 3993 3994 3995 3996 3997 3998 3999 4000 4001 4002 4003 4004 4005 4006 4007 4008 4009 4010 4011 4012 4013 4014 4015 4016 4017 4018 4019 4020 4021 4022 4023 4024 4025 4026 4027 4028 4029 4030 4031 4032 4033 4034 4035 4036 4037 4038 4039 4040 4041 4042 4043 4044 4045 4046 4047 4048 4049 4050 4051 4052 4053 4054 4055 4056 4057 4058 4059 4060 4061 4062 4063 4064 4065 4066 4067 4068 4069 4070 4071 4072 4073 4074 4075 4076 4077 4078 4079 4080 4081 4082 4083 4084 4085 4086 4087 4088 4089 4090 4091 4092 4093 4094 4095 4096 4097 4098 4099 4100 4101 4102 4103 4104 4105 4106 4107 4108 4109 4110 4111 4112 4113 4114 4115 4116 4117 4118 4119 4120 4121 4122 4123 4124 4125 4126 4127 4128 4129 4130 4131 4132 4133 4134 4135 4136 4137 4138 4139 4140 4141 4142 4143 4144 4145 4146 4147 4148 4149 4150 4151 4152 4153 4154 4155 4156 4157 4158 4159 4160 4161 4162 4163 4164 4165 4166 4167 4168 4169 4170 4171 4172 4173 4174 4175 4176 4177 4178 4179 4180 4181 4182 4183 4184 4185 4186 4187 4188 4189 4190 4191 4192 4193 4194 4195 4196 4197 4198 4199 4200 4201 4202 4203 4204 4205 4206 4207 4208 4209 4210 4211 4212 4213 4214 4215 4216 4217 4218 4219 4220 4221 4222 4223 4224 4225 4226 4227 4228 4229 4230 4231 4232 4233 4234 4235 4236 4237 4238 4239 4240 4241 4242 4243 4244 4245 4246 4247 4248 4249 4250 4251 4252 4253 4254 4255 4256 4257 4258 4259 4260 4261 4262 4263 4264 4265 4266 4267 4268 4269 4270 4271 4272 4273 4274 4275 4276 4277 4278 4279 4280 4281 4282 4283 4284 4285 4286 4287 4288 4289 4290 4291 4292 4293 4294 4295 4296 4297 4298 4299 4300 4301 4302 4303 4304 4305 4306 4307 4308 4309 4310 4311 4312 4313 4314 4315 4316 4317 4318 4319 4320 4321 4322 4323 4324 4325 4326 4327 4328 4329 4330 4331 4332 4333 4334 4335 4336 4337 4338 4339 4340 4341 4342 4343 4344 4345 4346 4347 4348 4349 4350 4351 4352 4353 4354 4355 4356 4357 4358 4359 4360 4361 4362 4363 4364 4365 4366 4367 4368 4369 4370 4371 4372 4373 4374 4375 4376 4377 4378 4379 4380 4381 4382 4383 4384 4385 4386 4387 4388 4389 4390 4391 4392 4393 4394 4395 4396 4397 4398 4399 4400 4401 4402 4403 4404 4405 4406 4407 4408 4409 4410 4411 4412 4413 4414 4415 4416 4417 4418 4419 4420 4421 4422 4423 4424 4425 4426 4427 4428 4429 4430 4431 4432 4433 4434 4435 4436 4437 4438 4439 4440 4441 4442 4443 4444 4445 4446 4447 4448 4449 4450 4451 4452 4453 4454 4455 4456 4457 4458 4459 4460 4461 4462 4463 4464 4465 4466 4467 4468 4469 4470 4471 4472 4473 4474 4475 4476 4477 4478 4479 4480 4481 4482 4483 4484 4485 4486 4487 4488 4489 4490 4491 4492 4493 4494 4495 4496 4497 4498 4499 4500 4501 4502 4503 4504 4505 4506 4507 4508 4509 4510 4511 4512 4513 4514 4515 4516 4517 4518 4519 4520 4521 4522 4523 4524 4525 4526 4527 4528 4529 4530 4531 4532 4533 4534 4535 4536 4537 4538 4539 4540 4541 4542 4543 4544 4545 4546 4547 4548 4549 4550 4551 4552 4553 4554 4555 4556 4557 4558 4559 4560 4561 4562 4563 4564 4565 4566 4567 4568 4569 4570 4571 4572 4573 4574 4575 4576 4577 4578 4579 4580 4581 4582 4583 4584 4585 4586 4587 4588 4589 4590 4591 4592 4593 4594 4595 4596 4597 4598 4599 4600 4601 4602 4603 4604 4605 4606 4607 4608 4609 4610 4611 4612 4613 4614 4615 4616 4617 4618 4619 4620 4621 4622 4623 4624 4625 4626 4627 4628 4629 4630 4631 4632 4633 4634 4635 4636 4637 4638 4639 4640 4641 4642 4643 4644 4645 4646 4647 4648 4649 4650 4651 4652 4653 4654 4655 4656 4657 4658 4659 4660 4661 4662 4663 4664 4665 4666 4667 4668 4669 4670 4671 4672 4673 4674 4675 4676 4677 4678 4679 4680 4681 4682 4683 4684 4685 4686 4687 4688 4689 4690 4691 4692 4693 4694 4695 4696 4697 4698 4699 4700 4701 4702 4703 4704 4705 4706 4707 4708 4709 4710 4711 4712 4713 4714 4715 4716 4717 4718 4719 4720 4721 4722 4723 4724 4725 4726 4727 4728 4729 4730 4731 4732 4733 4734 4735 4736 4737 4738 4739 4740 4741 4742 4743 4744 4745 4746 4747 4748 4749 4750 4751 4752 4753 4754 4755 4756 4757 4758 4759 4760 4761 4762 4763 4764 4765 4766 4767 4768 4769 4770 4771 4772 4773 4774 4775 4776 4777 4778 4779 4780 4781 4782 4783 4784 4785 4786 4787 4788 4789 4790 4791 4792 4793 4794 4795 4796 4797 4798 4799 4800 4801 4802 4803 4804 4805 4806 4807 4808 4809 4810 4811 4812 4813 4814 4815 4816 4817 4818 4819 4820 4821 4822 4823 4824 4825 4826 4827 4828 4829 4830 4831 4832 4833 4834 4835 4836 4837 4838 4839 4840 4841 4842 4843 4844 4845 4846 4847 4848 4849 4850 4851 4852 4853 4854 4855 4856 4857 4858 4859 4860 4861 4862 4863 4864 4865 4866 4867 4868 4869 4870 4871 4872 4873 4874 4875 4876 4877 4878 4879 4880 4881 4882 4883 4884 4885 4886 4887 4888 4889 4890 4891 4892 4893 4894 4895 4896 4897 4898 4899 4900 4901 4902 4903 4904 4905 4906 4907 4908 4909 4910 4911 4912 4913 4914 4915 4916 4917 4918 4919 4920 4921 4922 4923 4924 4925 4926 4927 4928 4929 4930 4931 4932 4933 4934 4935 4936 4937 4938 4939 4940 4941 4942 4943 4944 4945 4946 4947 4948 4949 4950 4951 4952 4953 4954 4955 4956 4957 4958 4959 4960 4961 4962 4963 4964 4965 4966 4967 4968 4969 4970 4971 4972 4973 4974 4975 4976 4977 4978 4979 4980 4981 4982 4983 4984 4985 4986 4987 4988 4989 4990 4991 4992 4993 4994 4995 4996 4997 4998 4999 5000 5001 5002 5003 5004 5005 5006 5007 5008 5009 5010 5011 5012 5013 5014 | // SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 1991, 1992 Linus Torvalds */ /* * Hopefully this will be a rather complete VT102 implementation. * * Beeping thanks to John T Kohl. * * Virtual Consoles, Screen Blanking, Screen Dumping, Color, Graphics * Chars, and VT100 enhancements by Peter MacDonald. * * Copy and paste function by Andrew Haylett, * some enhancements by Alessandro Rubini. * * Code to check for different video-cards mostly by Galen Hunt, * <g-hunt@ee.utah.edu> * * Rudimentary ISO 10646/Unicode/UTF-8 character set support by * Markus Kuhn, <mskuhn@immd4.informatik.uni-erlangen.de>. * * Dynamic allocation of consoles, aeb@cwi.nl, May 1994 * Resizing of consoles, aeb, 940926 * * Code for xterm like mouse click reporting by Peter Orbaek 20-Jul-94 * <poe@daimi.aau.dk> * * User-defined bell sound, new setterm control sequences and printk * redirection by Martin Mares <mj@k332.feld.cvut.cz> 19-Nov-95 * * APM screenblank bug fixed Takashi Manabe <manabe@roy.dsl.tutics.tut.jp> * * Merge with the abstract console driver by Geert Uytterhoeven * <geert@linux-m68k.org>, Jan 1997. * * Original m68k console driver modifications by * * - Arno Griffioen <arno@usn.nl> * - David Carter <carter@cs.bris.ac.uk> * * The abstract console driver provides a generic interface for a text * console. It supports VGA text mode, frame buffer based graphical consoles * and special graphics processors that are only accessible through some * registers (e.g. a TMS340x0 GSP). * * The interface to the hardware is specified using a special structure * (struct consw) which contains function pointers to console operations * (see <linux/console.h> for more information). * * Support for changeable cursor shape * by Pavel Machek <pavel@atrey.karlin.mff.cuni.cz>, August 1997 * * Ported to i386 and con_scrolldelta fixed * by Emmanuel Marty <core@ggi-project.org>, April 1998 * * Resurrected character buffers in videoram plus lots of other trickery * by Martin Mares <mj@atrey.karlin.mff.cuni.cz>, July 1998 * * Removed old-style timers, introduced console_timer, made timer * deletion SMP-safe. 17Jun00, Andrew Morton * * Removed console_lock, enabled interrupts across all console operations * 13 March 2001, Andrew Morton * * Fixed UTF-8 mode so alternate charset modes always work according * to control sequences interpreted in do_con_trol function * preserving backward VT100 semigraphics compatibility, * malformed UTF sequences represented as sequences of replacement glyphs, * original codes or '?' as a last resort if replacement glyph is undefined * by Adam Tla/lka <atlka@pg.gda.pl>, Aug 2006 */ #include <linux/module.h> #include <linux/types.h> #include <linux/sched/signal.h> #include <linux/tty.h> #include <linux/tty_flip.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/errno.h> #include <linux/kd.h> #include <linux/slab.h> #include <linux/vmalloc.h> #include <linux/major.h> #include <linux/mm.h> #include <linux/console.h> #include <linux/init.h> #include <linux/mutex.h> #include <linux/vt_kern.h> #include <linux/selection.h> #include <linux/tiocl.h> #include <linux/kbd_kern.h> #include <linux/consolemap.h> #include <linux/timer.h> #include <linux/interrupt.h> #include <linux/workqueue.h> #include <linux/pm.h> #include <linux/font.h> #include <linux/bitops.h> #include <linux/notifier.h> #include <linux/device.h> #include <linux/io.h> #include <linux/uaccess.h> #include <linux/kdb.h> #include <linux/ctype.h> #include <linux/gcd.h> #define MAX_NR_CON_DRIVER 16 #define CON_DRIVER_FLAG_MODULE 1 #define CON_DRIVER_FLAG_INIT 2 #define CON_DRIVER_FLAG_ATTR 4 #define CON_DRIVER_FLAG_ZOMBIE 8 struct con_driver { const struct consw *con; const char *desc; struct device *dev; int node; int first; int last; int flag; }; static struct con_driver registered_con_driver[MAX_NR_CON_DRIVER]; const struct consw *conswitchp; /* * Here is the default bell parameters: 750HZ, 1/8th of a second */ #define DEFAULT_BELL_PITCH 750 #define DEFAULT_BELL_DURATION (HZ/8) #define DEFAULT_CURSOR_BLINK_MS 200 struct vc vc_cons [MAX_NR_CONSOLES]; EXPORT_SYMBOL(vc_cons); static const struct consw *con_driver_map[MAX_NR_CONSOLES]; static int con_open(struct tty_struct *, struct file *); static void vc_init(struct vc_data *vc, int do_clear); static void gotoxy(struct vc_data *vc, int new_x, int new_y); static void save_cur(struct vc_data *vc); static void reset_terminal(struct vc_data *vc, int do_clear); static void con_flush_chars(struct tty_struct *tty); static int set_vesa_blanking(u8 __user *mode); static void set_cursor(struct vc_data *vc); static void hide_cursor(struct vc_data *vc); static void console_callback(struct work_struct *ignored); static void con_driver_unregister_callback(struct work_struct *ignored); static void blank_screen_t(struct timer_list *unused); static void set_palette(struct vc_data *vc); static void unblank_screen(void); #define vt_get_kmsg_redirect() vt_kmsg_redirect(-1) int default_utf8 = true; module_param(default_utf8, int, S_IRUGO | S_IWUSR); int global_cursor_default = -1; module_param(global_cursor_default, int, S_IRUGO | S_IWUSR); EXPORT_SYMBOL(global_cursor_default); static int cur_default = CUR_UNDERLINE; module_param(cur_default, int, S_IRUGO | S_IWUSR); /* * ignore_poke: don't unblank the screen when things are typed. This is * mainly for the privacy of braille terminal users. */ static int ignore_poke; int do_poke_blanked_console; int console_blanked; EXPORT_SYMBOL(console_blanked); static enum vesa_blank_mode vesa_blank_mode; static int vesa_off_interval; static int blankinterval; core_param(consoleblank, blankinterval, int, 0444); static DECLARE_WORK(console_work, console_callback); static DECLARE_WORK(con_driver_unregister_work, con_driver_unregister_callback); /* * fg_console is the current virtual console, * last_console is the last used one, * want_console is the console we want to switch to, * saved_* variants are for save/restore around kernel debugger enter/leave */ int fg_console; EXPORT_SYMBOL(fg_console); int last_console; int want_console = -1; static int saved_fg_console; static int saved_last_console; static int saved_want_console; static int saved_vc_mode; static int saved_console_blanked; /* * For each existing display, we have a pointer to console currently visible * on that display, allowing consoles other than fg_console to be refreshed * appropriately. Unless the low-level driver supplies its own display_fg * variable, we use this one for the "master display". */ static struct vc_data *master_display_fg; /* * Unfortunately, we need to delay tty echo when we're currently writing to the * console since the code is (and always was) not re-entrant, so we schedule * all flip requests to process context with schedule-task() and run it from * console_callback(). */ /* * For the same reason, we defer scrollback to the console callback. */ static int scrollback_delta; /* * Hook so that the power management routines can (un)blank * the console on our behalf. */ int (*console_blank_hook)(int); EXPORT_SYMBOL(console_blank_hook); static DEFINE_TIMER(console_timer, blank_screen_t); static int blank_state; static int blank_timer_expired; enum { blank_off = 0, blank_normal_wait, blank_vesa_wait, }; /* * /sys/class/tty/tty0/ * * the attribute 'active' contains the name of the current vc * console and it supports poll() to detect vc switches */ static struct device *tty0dev; /* * Notifier list for console events. */ static ATOMIC_NOTIFIER_HEAD(vt_notifier_list); int register_vt_notifier(struct notifier_block *nb) { return atomic_notifier_chain_register(&vt_notifier_list, nb); } EXPORT_SYMBOL_GPL(register_vt_notifier); int unregister_vt_notifier(struct notifier_block *nb) { return atomic_notifier_chain_unregister(&vt_notifier_list, nb); } EXPORT_SYMBOL_GPL(unregister_vt_notifier); static void notify_write(struct vc_data *vc, unsigned int unicode) { struct vt_notifier_param param = { .vc = vc, .c = unicode }; atomic_notifier_call_chain(&vt_notifier_list, VT_WRITE, ¶m); } static void notify_update(struct vc_data *vc) { struct vt_notifier_param param = { .vc = vc }; atomic_notifier_call_chain(&vt_notifier_list, VT_UPDATE, ¶m); } /* * Low-Level Functions */ static inline bool con_is_fg(const struct vc_data *vc) { return vc->vc_num == fg_console; } static inline bool con_should_update(const struct vc_data *vc) { return con_is_visible(vc) && !console_blanked; } static inline u16 *screenpos(const struct vc_data *vc, unsigned int offset, bool viewed) { unsigned long origin = viewed ? vc->vc_visible_origin : vc->vc_origin; return (u16 *)(origin + offset); } static void con_putc(struct vc_data *vc, u16 ca, unsigned int y, unsigned int x) { if (vc->vc_sw->con_putc) vc->vc_sw->con_putc(vc, ca, y, x); else vc->vc_sw->con_putcs(vc, &ca, 1, y, x); } /* Called from the keyboard irq path.. */ static inline void scrolldelta(int lines) { /* FIXME */ /* scrolldelta needs some kind of consistency lock, but the BKL was and still is not protecting versus the scheduled back end */ scrollback_delta += lines; schedule_console_callback(); } void schedule_console_callback(void) { schedule_work(&console_work); } /* * Code to manage unicode-based screen buffers */ /* * Our screen buffer is preceded by an array of line pointers so that * scrolling only implies some pointer shuffling. */ static u32 **vc_uniscr_alloc(unsigned int cols, unsigned int rows) { u32 **uni_lines; void *p; unsigned int memsize, i, col_size = cols * sizeof(**uni_lines); /* allocate everything in one go */ memsize = col_size * rows; memsize += rows * sizeof(*uni_lines); uni_lines = vzalloc(memsize); if (!uni_lines) return NULL; /* initial line pointers */ p = uni_lines + rows; for (i = 0; i < rows; i++) { uni_lines[i] = p; p += col_size; } return uni_lines; } static void vc_uniscr_free(u32 **uni_lines) { vfree(uni_lines); } static void vc_uniscr_set(struct vc_data *vc, u32 **new_uni_lines) { vc_uniscr_free(vc->vc_uni_lines); vc->vc_uni_lines = new_uni_lines; } static void vc_uniscr_putc(struct vc_data *vc, u32 uc) { if (vc->vc_uni_lines) vc->vc_uni_lines[vc->state.y][vc->state.x] = uc; } static void vc_uniscr_insert(struct vc_data *vc, unsigned int nr) { if (vc->vc_uni_lines) { u32 *ln = vc->vc_uni_lines[vc->state.y]; unsigned int x = vc->state.x, cols = vc->vc_cols; memmove(&ln[x + nr], &ln[x], (cols - x - nr) * sizeof(*ln)); memset32(&ln[x], ' ', nr); } } static void vc_uniscr_delete(struct vc_data *vc, unsigned int nr) { if (vc->vc_uni_lines) { u32 *ln = vc->vc_uni_lines[vc->state.y]; unsigned int x = vc->state.x, cols = vc->vc_cols; memmove(&ln[x], &ln[x + nr], (cols - x - nr) * sizeof(*ln)); memset32(&ln[cols - nr], ' ', nr); } } static void vc_uniscr_clear_line(struct vc_data *vc, unsigned int x, unsigned int nr) { if (vc->vc_uni_lines) memset32(&vc->vc_uni_lines[vc->state.y][x], ' ', nr); } static void vc_uniscr_clear_lines(struct vc_data *vc, unsigned int y, unsigned int nr) { if (vc->vc_uni_lines) while (nr--) memset32(vc->vc_uni_lines[y++], ' ', vc->vc_cols); } /* juggling array rotation algorithm (complexity O(N), size complexity O(1)) */ static void juggle_array(u32 **array, unsigned int size, unsigned int nr) { unsigned int gcd_idx; for (gcd_idx = 0; gcd_idx < gcd(nr, size); gcd_idx++) { u32 *gcd_idx_val = array[gcd_idx]; unsigned int dst_idx = gcd_idx; while (1) { unsigned int src_idx = (dst_idx + nr) % size; if (src_idx == gcd_idx) break; array[dst_idx] = array[src_idx]; dst_idx = src_idx; } array[dst_idx] = gcd_idx_val; } } static void vc_uniscr_scroll(struct vc_data *vc, unsigned int top, unsigned int bottom, enum con_scroll dir, unsigned int nr) { u32 **uni_lines = vc->vc_uni_lines; unsigned int size = bottom - top; if (!uni_lines) return; if (dir == SM_DOWN) { juggle_array(&uni_lines[top], size, size - nr); vc_uniscr_clear_lines(vc, top, nr); } else { juggle_array(&uni_lines[top], size, nr); vc_uniscr_clear_lines(vc, bottom - nr, nr); } } static u32 vc_uniscr_getc(struct vc_data *vc, int relative_pos) { int pos = vc->state.x + vc->vc_need_wrap + relative_pos; if (vc->vc_uni_lines && in_range(pos, 0, vc->vc_cols)) return vc->vc_uni_lines[vc->state.y][pos]; return 0; } static void vc_uniscr_copy_area(u32 **dst_lines, unsigned int dst_cols, unsigned int dst_rows, u32 **src_lines, unsigned int src_cols, unsigned int src_top_row, unsigned int src_bot_row) { unsigned int dst_row = 0; if (!dst_lines) return; while (src_top_row < src_bot_row) { u32 *src_line = src_lines[src_top_row]; u32 *dst_line = dst_lines[dst_row]; memcpy(dst_line, src_line, src_cols * sizeof(*src_line)); if (dst_cols - src_cols) memset32(dst_line + src_cols, ' ', dst_cols - src_cols); src_top_row++; dst_row++; } while (dst_row < dst_rows) { u32 *dst_line = dst_lines[dst_row]; memset32(dst_line, ' ', dst_cols); dst_row++; } } /* * Called from vcs_read() to make sure unicode screen retrieval is possible. * This will initialize the unicode screen buffer if not already done. * This returns 0 if OK, or a negative error code otherwise. * In particular, -ENODATA is returned if the console is not in UTF-8 mode. */ int vc_uniscr_check(struct vc_data *vc) { u32 **uni_lines; unsigned short *p; int x, y, mask; WARN_CONSOLE_UNLOCKED(); if (!vc->vc_utf) return -ENODATA; if (vc->vc_uni_lines) return 0; uni_lines = vc_uniscr_alloc(vc->vc_cols, vc->vc_rows); if (!uni_lines) return -ENOMEM; /* * Let's populate it initially with (imperfect) reverse translation. * This is the next best thing we can do short of having it enabled * from the start even when no users rely on this functionality. True * unicode content will be available after a complete screen refresh. */ p = (unsigned short *)vc->vc_origin; mask = vc->vc_hi_font_mask | 0xff; for (y = 0; y < vc->vc_rows; y++) { u32 *line = uni_lines[y]; for (x = 0; x < vc->vc_cols; x++) { u16 glyph = scr_readw(p++) & mask; line[x] = inverse_translate(vc, glyph, true); } } vc->vc_uni_lines = uni_lines; return 0; } /* * Called from vcs_read() to get the unicode data from the screen. * This must be preceded by a successful call to vc_uniscr_check() once * the console lock has been taken. */ void vc_uniscr_copy_line(const struct vc_data *vc, void *dest, bool viewed, unsigned int row, unsigned int col, unsigned int nr) { u32 **uni_lines = vc->vc_uni_lines; int offset = row * vc->vc_size_row + col * 2; unsigned long pos; if (WARN_ON_ONCE(!uni_lines)) return; pos = (unsigned long)screenpos(vc, offset, viewed); if (pos >= vc->vc_origin && pos < vc->vc_scr_end) { /* * Desired position falls in the main screen buffer. * However the actual row/col might be different if * scrollback is active. */ row = (pos - vc->vc_origin) / vc->vc_size_row; col = ((pos - vc->vc_origin) % vc->vc_size_row) / 2; memcpy(dest, &uni_lines[row][col], nr * sizeof(u32)); } else { /* * Scrollback is active. For now let's simply backtranslate * the screen glyphs until the unicode screen buffer does * synchronize with console display drivers for a scrollback * buffer of its own. */ u16 *p = (u16 *)pos; int mask = vc->vc_hi_font_mask | 0xff; u32 *uni_buf = dest; while (nr--) { u16 glyph = scr_readw(p++) & mask; *uni_buf++ = inverse_translate(vc, glyph, true); } } } static void con_scroll(struct vc_data *vc, unsigned int top, unsigned int bottom, enum con_scroll dir, unsigned int nr) { unsigned int rows = bottom - top; u16 *clear, *dst, *src; if (top + nr >= bottom) nr = rows - 1; if (bottom > vc->vc_rows || top >= bottom || nr < 1) return; vc_uniscr_scroll(vc, top, bottom, dir, nr); if (con_is_visible(vc) && vc->vc_sw->con_scroll(vc, top, bottom, dir, nr)) return; src = clear = (u16 *)(vc->vc_origin + vc->vc_size_row * top); dst = (u16 *)(vc->vc_origin + vc->vc_size_row * (top + nr)); if (dir == SM_UP) { clear = src + (rows - nr) * vc->vc_cols; swap(src, dst); } scr_memmovew(dst, src, (rows - nr) * vc->vc_size_row); scr_memsetw(clear, vc->vc_video_erase_char, vc->vc_size_row * nr); } static void do_update_region(struct vc_data *vc, unsigned long start, int count) { unsigned int xx, yy, offset; u16 *p = (u16 *)start; offset = (start - vc->vc_origin) / 2; xx = offset % vc->vc_cols; yy = offset / vc->vc_cols; for(;;) { u16 attrib = scr_readw(p) & 0xff00; int startx = xx; u16 *q = p; while (xx < vc->vc_cols && count) { if (attrib != (scr_readw(p) & 0xff00)) { if (p > q) vc->vc_sw->con_putcs(vc, q, p-q, yy, startx); startx = xx; q = p; attrib = scr_readw(p) & 0xff00; } p++; xx++; count--; } if (p > q) vc->vc_sw->con_putcs(vc, q, p-q, yy, startx); if (!count) break; xx = 0; yy++; } } void update_region(struct vc_data *vc, unsigned long start, int count) { WARN_CONSOLE_UNLOCKED(); if (con_should_update(vc)) { hide_cursor(vc); do_update_region(vc, start, count); set_cursor(vc); } } EXPORT_SYMBOL(update_region); /* Structure of attributes is hardware-dependent */ static u8 build_attr(struct vc_data *vc, u8 _color, enum vc_intensity _intensity, bool _blink, bool _underline, bool _reverse, bool _italic) { if (vc->vc_sw->con_build_attr) return vc->vc_sw->con_build_attr(vc, _color, _intensity, _blink, _underline, _reverse, _italic); /* * ++roman: I completely changed the attribute format for monochrome * mode (!can_do_color). The formerly used MDA (monochrome display * adapter) format didn't allow the combination of certain effects. * Now the attribute is just a bit vector: * Bit 0..1: intensity (0..2) * Bit 2 : underline * Bit 3 : reverse * Bit 7 : blink */ { u8 a = _color; if (!vc->vc_can_do_color) return _intensity | (_italic << 1) | (_underline << 2) | (_reverse << 3) | (_blink << 7); if (_italic) a = (a & 0xF0) | vc->vc_itcolor; else if (_underline) a = (a & 0xf0) | vc->vc_ulcolor; else if (_intensity == VCI_HALF_BRIGHT) a = (a & 0xf0) | vc->vc_halfcolor; if (_reverse) a = (a & 0x88) | (((a >> 4) | (a << 4)) & 0x77); if (_blink) a ^= 0x80; if (_intensity == VCI_BOLD) a ^= 0x08; if (vc->vc_hi_font_mask == 0x100) a <<= 1; return a; } } static void update_attr(struct vc_data *vc) { vc->vc_attr = build_attr(vc, vc->state.color, vc->state.intensity, vc->state.blink, vc->state.underline, vc->state.reverse ^ vc->vc_decscnm, vc->state.italic); vc->vc_video_erase_char = ' ' | (build_attr(vc, vc->state.color, VCI_NORMAL, vc->state.blink, false, vc->vc_decscnm, false) << 8); } /* Note: inverting the screen twice should revert to the original state */ void invert_screen(struct vc_data *vc, int offset, int count, bool viewed) { u16 *p; WARN_CONSOLE_UNLOCKED(); count /= 2; p = screenpos(vc, offset, viewed); if (vc->vc_sw->con_invert_region) { vc->vc_sw->con_invert_region(vc, p, count); } else { u16 *q = p; int cnt = count; u16 a; if (!vc->vc_can_do_color) { while (cnt--) { a = scr_readw(q); a ^= 0x0800; scr_writew(a, q); q++; } } else if (vc->vc_hi_font_mask == 0x100) { while (cnt--) { a = scr_readw(q); a = (a & 0x11ff) | ((a & 0xe000) >> 4) | ((a & 0x0e00) << 4); scr_writew(a, q); q++; } } else { while (cnt--) { a = scr_readw(q); a = (a & 0x88ff) | ((a & 0x7000) >> 4) | ((a & 0x0700) << 4); scr_writew(a, q); q++; } } } if (con_should_update(vc)) do_update_region(vc, (unsigned long) p, count); notify_update(vc); } /* used by selection: complement pointer position */ void complement_pos(struct vc_data *vc, int offset) { static int old_offset = -1; static unsigned short old; static unsigned short oldx, oldy; WARN_CONSOLE_UNLOCKED(); if (old_offset != -1 && old_offset >= 0 && old_offset < vc->vc_screenbuf_size) { scr_writew(old, screenpos(vc, old_offset, true)); if (con_should_update(vc)) con_putc(vc, old, oldy, oldx); notify_update(vc); } old_offset = offset; if (offset != -1 && offset >= 0 && offset < vc->vc_screenbuf_size) { unsigned short new; u16 *p = screenpos(vc, offset, true); old = scr_readw(p); new = old ^ vc->vc_complement_mask; scr_writew(new, p); if (con_should_update(vc)) { oldx = (offset >> 1) % vc->vc_cols; oldy = (offset >> 1) / vc->vc_cols; con_putc(vc, new, oldy, oldx); } notify_update(vc); } } static void insert_char(struct vc_data *vc, unsigned int nr) { unsigned short *p = (unsigned short *) vc->vc_pos; vc_uniscr_insert(vc, nr); scr_memmovew(p + nr, p, (vc->vc_cols - vc->state.x - nr) * 2); scr_memsetw(p, vc->vc_video_erase_char, nr * 2); vc->vc_need_wrap = 0; if (con_should_update(vc)) do_update_region(vc, (unsigned long) p, vc->vc_cols - vc->state.x); } static void delete_char(struct vc_data *vc, unsigned int nr) { unsigned short *p = (unsigned short *) vc->vc_pos; vc_uniscr_delete(vc, nr); scr_memmovew(p, p + nr, (vc->vc_cols - vc->state.x - nr) * 2); scr_memsetw(p + vc->vc_cols - vc->state.x - nr, vc->vc_video_erase_char, nr * 2); vc->vc_need_wrap = 0; if (con_should_update(vc)) do_update_region(vc, (unsigned long) p, vc->vc_cols - vc->state.x); } static int softcursor_original = -1; static void add_softcursor(struct vc_data *vc) { int i = scr_readw((u16 *) vc->vc_pos); u32 type = vc->vc_cursor_type; if (!(type & CUR_SW)) return; if (softcursor_original != -1) return; softcursor_original = i; i |= CUR_SET(type); i ^= CUR_CHANGE(type); if ((type & CUR_ALWAYS_BG) && (softcursor_original & CUR_BG) == (i & CUR_BG)) i ^= CUR_BG; if ((type & CUR_INVERT_FG_BG) && (i & CUR_FG) == ((i & CUR_BG) >> 4)) i ^= CUR_FG; scr_writew(i, (u16 *)vc->vc_pos); if (con_should_update(vc)) con_putc(vc, i, vc->state.y, vc->state.x); } static void hide_softcursor(struct vc_data *vc) { if (softcursor_original != -1) { scr_writew(softcursor_original, (u16 *)vc->vc_pos); if (con_should_update(vc)) con_putc(vc, softcursor_original, vc->state.y, vc->state.x); softcursor_original = -1; } } static void hide_cursor(struct vc_data *vc) { if (vc_is_sel(vc)) clear_selection(); vc->vc_sw->con_cursor(vc, false); hide_softcursor(vc); } static void set_cursor(struct vc_data *vc) { if (!con_is_fg(vc) || console_blanked || vc->vc_mode == KD_GRAPHICS) return; if (vc->vc_deccm) { if (vc_is_sel(vc)) clear_selection(); add_softcursor(vc); if (CUR_SIZE(vc->vc_cursor_type) != CUR_NONE) vc->vc_sw->con_cursor(vc, true); } else hide_cursor(vc); } static void set_origin(struct vc_data *vc) { WARN_CONSOLE_UNLOCKED(); if (!con_is_visible(vc) || !vc->vc_sw->con_set_origin || !vc->vc_sw->con_set_origin(vc)) vc->vc_origin = (unsigned long)vc->vc_screenbuf; vc->vc_visible_origin = vc->vc_origin; vc->vc_scr_end = vc->vc_origin + vc->vc_screenbuf_size; vc->vc_pos = vc->vc_origin + vc->vc_size_row * vc->state.y + 2 * vc->state.x; } static void save_screen(struct vc_data *vc) { WARN_CONSOLE_UNLOCKED(); if (vc->vc_sw->con_save_screen) vc->vc_sw->con_save_screen(vc); } static void flush_scrollback(struct vc_data *vc) { WARN_CONSOLE_UNLOCKED(); set_origin(vc); if (!con_is_visible(vc)) return; /* * The legacy way for flushing the scrollback buffer is to use a side * effect of the con_switch method. We do it only on the foreground * console as background consoles have no scrollback buffers in that * case and we obviously don't want to switch to them. */ hide_cursor(vc); vc->vc_sw->con_switch(vc); set_cursor(vc); } /* * Redrawing of screen */ void clear_buffer_attributes(struct vc_data *vc) { unsigned short *p = (unsigned short *)vc->vc_origin; int count = vc->vc_screenbuf_size / 2; int mask = vc->vc_hi_font_mask | 0xff; for (; count > 0; count--, p++) { scr_writew((scr_readw(p)&mask) | (vc->vc_video_erase_char & ~mask), p); } } void redraw_screen(struct vc_data *vc, int is_switch) { int redraw = 0; WARN_CONSOLE_UNLOCKED(); if (!vc) { /* strange ... */ /* printk("redraw_screen: tty %d not allocated ??\n", new_console+1); */ return; } if (is_switch) { struct vc_data *old_vc = vc_cons[fg_console].d; if (old_vc == vc) return; if (!con_is_visible(vc)) redraw = 1; *vc->vc_display_fg = vc; fg_console = vc->vc_num; hide_cursor(old_vc); if (!con_is_visible(old_vc)) { save_screen(old_vc); set_origin(old_vc); } if (tty0dev) sysfs_notify(&tty0dev->kobj, NULL, "active"); } else { hide_cursor(vc); redraw = 1; } if (redraw) { bool update; int old_was_color = vc->vc_can_do_color; set_origin(vc); update = vc->vc_sw->con_switch(vc); set_palette(vc); /* * If console changed from mono<->color, the best we can do * is to clear the buffer attributes. As it currently stands, * rebuilding new attributes from the old buffer is not doable * without overly complex code. */ if (old_was_color != vc->vc_can_do_color) { update_attr(vc); clear_buffer_attributes(vc); } if (update && vc->vc_mode != KD_GRAPHICS) do_update_region(vc, vc->vc_origin, vc->vc_screenbuf_size / 2); } set_cursor(vc); if (is_switch) { vt_set_leds_compute_shiftstate(); notify_update(vc); } } EXPORT_SYMBOL(redraw_screen); /* * Allocation, freeing and resizing of VTs. */ int vc_cons_allocated(unsigned int i) { return (i < MAX_NR_CONSOLES && vc_cons[i].d); } static void visual_init(struct vc_data *vc, int num, bool init) { /* ++Geert: vc->vc_sw->con_init determines console size */ if (vc->vc_sw) module_put(vc->vc_sw->owner); vc->vc_sw = conswitchp; if (con_driver_map[num]) vc->vc_sw = con_driver_map[num]; __module_get(vc->vc_sw->owner); vc->vc_num = num; vc->vc_display_fg = &master_display_fg; if (vc->uni_pagedict_loc) con_free_unimap(vc); vc->uni_pagedict_loc = &vc->uni_pagedict; vc->uni_pagedict = NULL; vc->vc_hi_font_mask = 0; vc->vc_complement_mask = 0; vc->vc_can_do_color = 0; vc->vc_cur_blink_ms = DEFAULT_CURSOR_BLINK_MS; vc->vc_sw->con_init(vc, init); if (!vc->vc_complement_mask) vc->vc_complement_mask = vc->vc_can_do_color ? 0x7700 : 0x0800; vc->vc_s_complement_mask = vc->vc_complement_mask; vc->vc_size_row = vc->vc_cols << 1; vc->vc_screenbuf_size = vc->vc_rows * vc->vc_size_row; } static void visual_deinit(struct vc_data *vc) { vc->vc_sw->con_deinit(vc); module_put(vc->vc_sw->owner); } static void vc_port_destruct(struct tty_port *port) { struct vc_data *vc = container_of(port, struct vc_data, port); kfree(vc); } static const struct tty_port_operations vc_port_ops = { .destruct = vc_port_destruct, }; /* * Change # of rows and columns (0 means unchanged/the size of fg_console) * [this is to be used together with some user program * like resize that changes the hardware videomode] */ #define VC_MAXCOL (32767) #define VC_MAXROW (32767) int vc_allocate(unsigned int currcons) /* return 0 on success */ { struct vt_notifier_param param; struct vc_data *vc; int err; WARN_CONSOLE_UNLOCKED(); if (currcons >= MAX_NR_CONSOLES) return -ENXIO; if (vc_cons[currcons].d) return 0; /* due to the granularity of kmalloc, we waste some memory here */ /* the alloc is done in two steps, to optimize the common situation of a 25x80 console (structsize=216, screenbuf_size=4000) */ /* although the numbers above are not valid since long ago, the point is still up-to-date and the comment still has its value even if only as a historical artifact. --mj, July 1998 */ param.vc = vc = kzalloc(sizeof(struct vc_data), GFP_KERNEL); if (!vc) return -ENOMEM; vc_cons[currcons].d = vc; tty_port_init(&vc->port); vc->port.ops = &vc_port_ops; INIT_WORK(&vc_cons[currcons].SAK_work, vc_SAK); visual_init(vc, currcons, true); if (!*vc->uni_pagedict_loc) con_set_default_unimap(vc); err = -EINVAL; if (vc->vc_cols > VC_MAXCOL || vc->vc_rows > VC_MAXROW || vc->vc_screenbuf_size > KMALLOC_MAX_SIZE || !vc->vc_screenbuf_size) goto err_free; err = -ENOMEM; vc->vc_screenbuf = kzalloc(vc->vc_screenbuf_size, GFP_KERNEL); if (!vc->vc_screenbuf) goto err_free; /* If no drivers have overridden us and the user didn't pass a boot option, default to displaying the cursor */ if (global_cursor_default == -1) global_cursor_default = 1; vc_init(vc, 1); vcs_make_sysfs(currcons); atomic_notifier_call_chain(&vt_notifier_list, VT_ALLOCATE, ¶m); return 0; err_free: visual_deinit(vc); kfree(vc); vc_cons[currcons].d = NULL; return err; } static inline int resize_screen(struct vc_data *vc, int width, int height, bool from_user) { /* Resizes the resolution of the display adapater */ int err = 0; if (vc->vc_sw->con_resize) err = vc->vc_sw->con_resize(vc, width, height, from_user); return err; } /** * vc_do_resize - resizing method for the tty * @tty: tty being resized * @vc: virtual console private data * @cols: columns * @lines: lines * @from_user: invoked by a user? * * Resize a virtual console, clipping according to the actual constraints. If * the caller passes a tty structure then update the termios winsize * information and perform any necessary signal handling. * * Locking: Caller must hold the console semaphore. Takes the termios rwsem and * ctrl.lock of the tty IFF a tty is passed. */ static int vc_do_resize(struct tty_struct *tty, struct vc_data *vc, unsigned int cols, unsigned int lines, bool from_user) { unsigned long old_origin, new_origin, new_scr_end, rlth, rrem, err = 0; unsigned long end; unsigned int old_rows, old_row_size, first_copied_row; unsigned int new_cols, new_rows, new_row_size, new_screen_size; unsigned short *oldscreen, *newscreen; u32 **new_uniscr = NULL; WARN_CONSOLE_UNLOCKED(); if (cols > VC_MAXCOL || lines > VC_MAXROW) return -EINVAL; new_cols = (cols ? cols : vc->vc_cols); new_rows = (lines ? lines : vc->vc_rows); new_row_size = new_cols << 1; new_screen_size = new_row_size * new_rows; if (new_cols == vc->vc_cols && new_rows == vc->vc_rows) { /* * This function is being called here to cover the case * where the userspace calls the FBIOPUT_VSCREENINFO twice, * passing the same fb_var_screeninfo containing the fields * yres/xres equal to a number non-multiple of vc_font.height * and yres_virtual/xres_virtual equal to number lesser than the * vc_font.height and yres/xres. * In the second call, the struct fb_var_screeninfo isn't * being modified by the underlying driver because of the * if above, and this causes the fbcon_display->vrows to become * negative and it eventually leads to out-of-bound * access by the imageblit function. * To give the correct values to the struct and to not have * to deal with possible errors from the code below, we call * the resize_screen here as well. */ return resize_screen(vc, new_cols, new_rows, from_user); } if (new_screen_size > KMALLOC_MAX_SIZE || !new_screen_size) return -EINVAL; newscreen = kzalloc(new_screen_size, GFP_USER); if (!newscreen) return -ENOMEM; if (vc->vc_uni_lines) { new_uniscr = vc_uniscr_alloc(new_cols, new_rows); if (!new_uniscr) { kfree(newscreen); return -ENOMEM; } } if (vc_is_sel(vc)) clear_selection(); old_rows = vc->vc_rows; old_row_size = vc->vc_size_row; err = resize_screen(vc, new_cols, new_rows, from_user); if (err) { kfree(newscreen); vc_uniscr_free(new_uniscr); return err; } vc->vc_rows = new_rows; vc->vc_cols = new_cols; vc->vc_size_row = new_row_size; vc->vc_screenbuf_size = new_screen_size; rlth = min(old_row_size, new_row_size); rrem = new_row_size - rlth; old_origin = vc->vc_origin; new_origin = (long) newscreen; new_scr_end = new_origin + new_screen_size; if (vc->state.y > new_rows) { if (old_rows - vc->state.y < new_rows) { /* * Cursor near the bottom, copy contents from the * bottom of buffer */ first_copied_row = (old_rows - new_rows); } else { /* * Cursor is in no man's land, copy 1/2 screenful * from the top and bottom of cursor position */ first_copied_row = (vc->state.y - new_rows/2); } old_origin += first_copied_row * old_row_size; } else first_copied_row = 0; end = old_origin + old_row_size * min(old_rows, new_rows); vc_uniscr_copy_area(new_uniscr, new_cols, new_rows, vc->vc_uni_lines, rlth/2, first_copied_row, min(old_rows, new_rows)); vc_uniscr_set(vc, new_uniscr); update_attr(vc); while (old_origin < end) { scr_memcpyw((unsigned short *) new_origin, (unsigned short *) old_origin, rlth); if (rrem) scr_memsetw((void *)(new_origin + rlth), vc->vc_video_erase_char, rrem); old_origin += old_row_size; new_origin += new_row_size; } if (new_scr_end > new_origin) scr_memsetw((void *)new_origin, vc->vc_video_erase_char, new_scr_end - new_origin); oldscreen = vc->vc_screenbuf; vc->vc_screenbuf = newscreen; vc->vc_screenbuf_size = new_screen_size; set_origin(vc); kfree(oldscreen); /* do part of a reset_terminal() */ vc->vc_top = 0; vc->vc_bottom = vc->vc_rows; gotoxy(vc, vc->state.x, vc->state.y); save_cur(vc); if (tty) { /* Rewrite the requested winsize data with the actual resulting sizes */ struct winsize ws; memset(&ws, 0, sizeof(ws)); ws.ws_row = vc->vc_rows; ws.ws_col = vc->vc_cols; ws.ws_ypixel = vc->vc_scan_lines; tty_do_resize(tty, &ws); } if (con_is_visible(vc)) update_screen(vc); vt_event_post(VT_EVENT_RESIZE, vc->vc_num, vc->vc_num); notify_update(vc); return err; } /** * __vc_resize - resize a VT * @vc: virtual console * @cols: columns * @rows: rows * @from_user: invoked by a user? * * Resize a virtual console as seen from the console end of things. We use the * common vc_do_resize() method to update the structures. * * Locking: The caller must hold the console sem to protect console internals * and @vc->port.tty. */ int __vc_resize(struct vc_data *vc, unsigned int cols, unsigned int rows, bool from_user) { return vc_do_resize(vc->port.tty, vc, cols, rows, from_user); } EXPORT_SYMBOL(__vc_resize); /** * vt_resize - resize a VT * @tty: tty to resize * @ws: winsize attributes * * Resize a virtual terminal. This is called by the tty layer as we register * our own handler for resizing. The mutual helper does all the actual work. * * Locking: Takes the console sem and the called methods then take the tty * termios_rwsem and the tty ctrl.lock in that order. */ static int vt_resize(struct tty_struct *tty, struct winsize *ws) { struct vc_data *vc = tty->driver_data; int ret; console_lock(); ret = vc_do_resize(tty, vc, ws->ws_col, ws->ws_row, false); console_unlock(); return ret; } struct vc_data *vc_deallocate(unsigned int currcons) { struct vc_data *vc = NULL; WARN_CONSOLE_UNLOCKED(); if (vc_cons_allocated(currcons)) { struct vt_notifier_param param; param.vc = vc = vc_cons[currcons].d; atomic_notifier_call_chain(&vt_notifier_list, VT_DEALLOCATE, ¶m); vcs_remove_sysfs(currcons); visual_deinit(vc); con_free_unimap(vc); put_pid(vc->vt_pid); vc_uniscr_set(vc, NULL); kfree(vc->vc_screenbuf); vc_cons[currcons].d = NULL; } return vc; } /* * VT102 emulator */ enum { EPecma = 0, EPdec, EPeq, EPgt, EPlt}; #define set_kbd(vc, x) vt_set_kbd_mode_bit((vc)->vc_num, (x)) #define clr_kbd(vc, x) vt_clr_kbd_mode_bit((vc)->vc_num, (x)) #define is_kbd(vc, x) vt_get_kbd_mode_bit((vc)->vc_num, (x)) #define decarm VC_REPEAT #define decckm VC_CKMODE #define kbdapplic VC_APPLIC #define lnm VC_CRLF const unsigned char color_table[] = { 0, 4, 2, 6, 1, 5, 3, 7, 8,12,10,14, 9,13,11,15 }; EXPORT_SYMBOL(color_table); /* the default colour table, for VGA+ colour systems */ unsigned char default_red[] = { 0x00, 0xaa, 0x00, 0xaa, 0x00, 0xaa, 0x00, 0xaa, 0x55, 0xff, 0x55, 0xff, 0x55, 0xff, 0x55, 0xff }; module_param_array(default_red, byte, NULL, S_IRUGO | S_IWUSR); EXPORT_SYMBOL(default_red); unsigned char default_grn[] = { 0x00, 0x00, 0xaa, 0x55, 0x00, 0x00, 0xaa, 0xaa, 0x55, 0x55, 0xff, 0xff, 0x55, 0x55, 0xff, 0xff }; module_param_array(default_grn, byte, NULL, S_IRUGO | S_IWUSR); EXPORT_SYMBOL(default_grn); unsigned char default_blu[] = { 0x00, 0x00, 0x00, 0x00, 0xaa, 0xaa, 0xaa, 0xaa, 0x55, 0x55, 0x55, 0x55, 0xff, 0xff, 0xff, 0xff }; module_param_array(default_blu, byte, NULL, S_IRUGO | S_IWUSR); EXPORT_SYMBOL(default_blu); /* * gotoxy() must verify all boundaries, because the arguments * might also be negative. If the given position is out of * bounds, the cursor is placed at the nearest margin. */ static void gotoxy(struct vc_data *vc, int new_x, int new_y) { int min_y, max_y; if (new_x < 0) vc->state.x = 0; else { if (new_x >= vc->vc_cols) vc->state.x = vc->vc_cols - 1; else vc->state.x = new_x; } if (vc->vc_decom) { min_y = vc->vc_top; max_y = vc->vc_bottom; } else { min_y = 0; max_y = vc->vc_rows; } if (new_y < min_y) vc->state.y = min_y; else if (new_y >= max_y) vc->state.y = max_y - 1; else vc->state.y = new_y; vc->vc_pos = vc->vc_origin + vc->state.y * vc->vc_size_row + (vc->state.x << 1); vc->vc_need_wrap = 0; } /* for absolute user moves, when decom is set */ static void gotoxay(struct vc_data *vc, int new_x, int new_y) { gotoxy(vc, new_x, vc->vc_decom ? (vc->vc_top + new_y) : new_y); } void scrollback(struct vc_data *vc) { scrolldelta(-(vc->vc_rows / 2)); } void scrollfront(struct vc_data *vc, int lines) { if (!lines) lines = vc->vc_rows / 2; scrolldelta(lines); } static void lf(struct vc_data *vc) { /* don't scroll if above bottom of scrolling region, or * if below scrolling region */ if (vc->state.y + 1 == vc->vc_bottom) con_scroll(vc, vc->vc_top, vc->vc_bottom, SM_UP, 1); else if (vc->state.y < vc->vc_rows - 1) { vc->state.y++; vc->vc_pos += vc->vc_size_row; } vc->vc_need_wrap = 0; notify_write(vc, '\n'); } static void ri(struct vc_data *vc) { /* don't scroll if below top of scrolling region, or * if above scrolling region */ if (vc->state.y == vc->vc_top) con_scroll(vc, vc->vc_top, vc->vc_bottom, SM_DOWN, 1); else if (vc->state.y > 0) { vc->state.y--; vc->vc_pos -= vc->vc_size_row; } vc->vc_need_wrap = 0; } static inline void cr(struct vc_data *vc) { vc->vc_pos -= vc->state.x << 1; vc->vc_need_wrap = vc->state.x = 0; notify_write(vc, '\r'); } static inline void bs(struct vc_data *vc) { if (vc->state.x) { vc->vc_pos -= 2; vc->state.x--; vc->vc_need_wrap = 0; notify_write(vc, '\b'); } } static inline void del(struct vc_data *vc) { /* ignored */ } enum CSI_J { CSI_J_CURSOR_TO_END = 0, CSI_J_START_TO_CURSOR = 1, CSI_J_VISIBLE = 2, CSI_J_FULL = 3, }; static void csi_J(struct vc_data *vc, enum CSI_J vpar) { unsigned short *start; unsigned int count; switch (vpar) { case CSI_J_CURSOR_TO_END: vc_uniscr_clear_line(vc, vc->state.x, vc->vc_cols - vc->state.x); vc_uniscr_clear_lines(vc, vc->state.y + 1, vc->vc_rows - vc->state.y - 1); count = (vc->vc_scr_end - vc->vc_pos) >> 1; start = (unsigned short *)vc->vc_pos; break; case CSI_J_START_TO_CURSOR: vc_uniscr_clear_line(vc, 0, vc->state.x + 1); vc_uniscr_clear_lines(vc, 0, vc->state.y); count = ((vc->vc_pos - vc->vc_origin) >> 1) + 1; start = (unsigned short *)vc->vc_origin; break; case CSI_J_FULL: flush_scrollback(vc); fallthrough; case CSI_J_VISIBLE: vc_uniscr_clear_lines(vc, 0, vc->vc_rows); count = vc->vc_cols * vc->vc_rows; start = (unsigned short *)vc->vc_origin; break; default: return; } scr_memsetw(start, vc->vc_video_erase_char, 2 * count); if (con_should_update(vc)) do_update_region(vc, (unsigned long) start, count); vc->vc_need_wrap = 0; } enum { CSI_K_CURSOR_TO_LINEEND = 0, CSI_K_LINESTART_TO_CURSOR = 1, CSI_K_LINE = 2, }; static void csi_K(struct vc_data *vc) { unsigned int count; unsigned short *start = (unsigned short *)vc->vc_pos; int offset; switch (vc->vc_par[0]) { case CSI_K_CURSOR_TO_LINEEND: offset = 0; count = vc->vc_cols - vc->state.x; break; case CSI_K_LINESTART_TO_CURSOR: offset = -vc->state.x; count = vc->state.x + 1; break; case CSI_K_LINE: offset = -vc->state.x; count = vc->vc_cols; break; default: return; } vc_uniscr_clear_line(vc, vc->state.x + offset, count); scr_memsetw(start + offset, vc->vc_video_erase_char, 2 * count); vc->vc_need_wrap = 0; if (con_should_update(vc)) do_update_region(vc, (unsigned long)(start + offset), count); } /* erase the following count positions */ static void csi_X(struct vc_data *vc) { /* not vt100? */ unsigned int count = clamp(vc->vc_par[0], 1, vc->vc_cols - vc->state.x); vc_uniscr_clear_line(vc, vc->state.x, count); scr_memsetw((unsigned short *)vc->vc_pos, vc->vc_video_erase_char, 2 * count); if (con_should_update(vc)) vc->vc_sw->con_clear(vc, vc->state.y, vc->state.x, count); vc->vc_need_wrap = 0; } static void default_attr(struct vc_data *vc) { vc->state.intensity = VCI_NORMAL; vc->state.italic = false; vc->state.underline = false; vc->state.reverse = false; vc->state.blink = false; vc->state.color = vc->vc_def_color; } struct rgb { u8 r; u8 g; u8 b; }; static void rgb_from_256(unsigned int i, struct rgb *c) { if (i < 8) { /* Standard colours. */ c->r = i&1 ? 0xaa : 0x00; c->g = i&2 ? 0xaa : 0x00; c->b = i&4 ? 0xaa : 0x00; } else if (i < 16) { c->r = i&1 ? 0xff : 0x55; c->g = i&2 ? 0xff : 0x55; c->b = i&4 ? 0xff : 0x55; } else if (i < 232) { /* 6x6x6 colour cube. */ i -= 16; c->b = i % 6 * 255 / 6; i /= 6; c->g = i % 6 * 255 / 6; i /= 6; c->r = i * 255 / 6; } else /* Grayscale ramp. */ c->r = c->g = c->b = i * 10 - 2312; } static void rgb_foreground(struct vc_data *vc, const struct rgb *c) { u8 hue = 0, max = max3(c->r, c->g, c->b); if (c->r > max / 2) hue |= 4; if (c->g > max / 2) hue |= 2; if (c->b > max / 2) hue |= 1; if (hue == 7 && max <= 0x55) { hue = 0; vc->state.intensity = VCI_BOLD; } else if (max > 0xaa) vc->state.intensity = VCI_BOLD; else vc->state.intensity = VCI_NORMAL; vc->state.color = (vc->state.color & 0xf0) | hue; } static void rgb_background(struct vc_data *vc, const struct rgb *c) { /* For backgrounds, err on the dark side. */ vc->state.color = (vc->state.color & 0x0f) | (c->r&0x80) >> 1 | (c->g&0x80) >> 2 | (c->b&0x80) >> 3; } /* * ITU T.416 Higher colour modes. They break the usual properties of SGR codes * and thus need to be detected and ignored by hand. That standard also * wants : rather than ; as separators but sequences containing : are currently * completely ignored by the parser. * * Subcommands 3 (CMY) and 4 (CMYK) are so insane there's no point in * supporting them. */ static int vc_t416_color(struct vc_data *vc, int i, void(*set_color)(struct vc_data *vc, const struct rgb *c)) { struct rgb c; i++; if (i > vc->vc_npar) return i; if (vc->vc_par[i] == 5 && i + 1 <= vc->vc_npar) { /* 256 colours */ i++; rgb_from_256(vc->vc_par[i], &c); } else if (vc->vc_par[i] == 2 && i + 3 <= vc->vc_npar) { /* 24 bit */ c.r = vc->vc_par[i + 1]; c.g = vc->vc_par[i + 2]; c.b = vc->vc_par[i + 3]; i += 3; } else return i; set_color(vc, &c); return i; } enum { CSI_m_DEFAULT = 0, CSI_m_BOLD = 1, CSI_m_HALF_BRIGHT = 2, CSI_m_ITALIC = 3, CSI_m_UNDERLINE = 4, CSI_m_BLINK = 5, CSI_m_REVERSE = 7, CSI_m_PRI_FONT = 10, CSI_m_ALT_FONT1 = 11, CSI_m_ALT_FONT2 = 12, CSI_m_DOUBLE_UNDERLINE = 21, CSI_m_NORMAL_INTENSITY = 22, CSI_m_NO_ITALIC = 23, CSI_m_NO_UNDERLINE = 24, CSI_m_NO_BLINK = 25, CSI_m_NO_REVERSE = 27, CSI_m_FG_COLOR_BEG = 30, CSI_m_FG_COLOR_END = 37, CSI_m_FG_COLOR = 38, CSI_m_DEFAULT_FG_COLOR = 39, CSI_m_BG_COLOR_BEG = 40, CSI_m_BG_COLOR_END = 47, CSI_m_BG_COLOR = 48, CSI_m_DEFAULT_BG_COLOR = 49, CSI_m_BRIGHT_FG_COLOR_BEG = 90, CSI_m_BRIGHT_FG_COLOR_END = 97, CSI_m_BRIGHT_FG_COLOR_OFF = CSI_m_BRIGHT_FG_COLOR_BEG - CSI_m_FG_COLOR_BEG, CSI_m_BRIGHT_BG_COLOR_BEG = 100, CSI_m_BRIGHT_BG_COLOR_END = 107, CSI_m_BRIGHT_BG_COLOR_OFF = CSI_m_BRIGHT_BG_COLOR_BEG - CSI_m_BG_COLOR_BEG, }; /* console_lock is held */ static void csi_m(struct vc_data *vc) { int i; for (i = 0; i <= vc->vc_npar; i++) switch (vc->vc_par[i]) { case CSI_m_DEFAULT: /* all attributes off */ default_attr(vc); break; case CSI_m_BOLD: vc->state.intensity = VCI_BOLD; break; case CSI_m_HALF_BRIGHT: vc->state.intensity = VCI_HALF_BRIGHT; break; case CSI_m_ITALIC: vc->state.italic = true; break; case CSI_m_DOUBLE_UNDERLINE: /* * No console drivers support double underline, so * convert it to a single underline. */ case CSI_m_UNDERLINE: vc->state.underline = true; break; case CSI_m_BLINK: vc->state.blink = true; break; case CSI_m_REVERSE: vc->state.reverse = true; break; case CSI_m_PRI_FONT: /* ANSI X3.64-1979 (SCO-ish?) * Select primary font, don't display control chars if * defined, don't set bit 8 on output. */ vc->vc_translate = set_translate(vc->state.Gx_charset[vc->state.charset], vc); vc->vc_disp_ctrl = 0; vc->vc_toggle_meta = 0; break; case CSI_m_ALT_FONT1: /* ANSI X3.64-1979 (SCO-ish?) * Select first alternate font, lets chars < 32 be * displayed as ROM chars. */ vc->vc_translate = set_translate(IBMPC_MAP, vc); vc->vc_disp_ctrl = 1; vc->vc_toggle_meta = 0; break; case CSI_m_ALT_FONT2: /* ANSI X3.64-1979 (SCO-ish?) * Select second alternate font, toggle high bit * before displaying as ROM char. */ vc->vc_translate = set_translate(IBMPC_MAP, vc); vc->vc_disp_ctrl = 1; vc->vc_toggle_meta = 1; break; case CSI_m_NORMAL_INTENSITY: vc->state.intensity = VCI_NORMAL; break; case CSI_m_NO_ITALIC: vc->state.italic = false; break; case CSI_m_NO_UNDERLINE: vc->state.underline = false; break; case CSI_m_NO_BLINK: vc->state.blink = false; break; case CSI_m_NO_REVERSE: vc->state.reverse = false; break; case CSI_m_FG_COLOR: i = vc_t416_color(vc, i, rgb_foreground); break; case CSI_m_BG_COLOR: i = vc_t416_color(vc, i, rgb_background); break; case CSI_m_DEFAULT_FG_COLOR: vc->state.color = (vc->vc_def_color & 0x0f) | (vc->state.color & 0xf0); break; case CSI_m_DEFAULT_BG_COLOR: vc->state.color = (vc->vc_def_color & 0xf0) | (vc->state.color & 0x0f); break; case CSI_m_BRIGHT_FG_COLOR_BEG ... CSI_m_BRIGHT_FG_COLOR_END: vc->state.intensity = VCI_BOLD; vc->vc_par[i] -= CSI_m_BRIGHT_FG_COLOR_OFF; fallthrough; case CSI_m_FG_COLOR_BEG ... CSI_m_FG_COLOR_END: vc->vc_par[i] -= CSI_m_FG_COLOR_BEG; vc->state.color = color_table[vc->vc_par[i]] | (vc->state.color & 0xf0); break; case CSI_m_BRIGHT_BG_COLOR_BEG ... CSI_m_BRIGHT_BG_COLOR_END: vc->vc_par[i] -= CSI_m_BRIGHT_BG_COLOR_OFF; fallthrough; case CSI_m_BG_COLOR_BEG ... CSI_m_BG_COLOR_END: vc->vc_par[i] -= CSI_m_BG_COLOR_BEG; vc->state.color = (color_table[vc->vc_par[i]] << 4) | (vc->state.color & 0x0f); break; } update_attr(vc); } static void respond_string(const char *p, size_t len, struct tty_port *port) { tty_insert_flip_string(port, p, len); tty_flip_buffer_push(port); } static void cursor_report(struct vc_data *vc, struct tty_struct *tty) { char buf[40]; int len; len = sprintf(buf, "\033[%d;%dR", vc->state.y + (vc->vc_decom ? vc->vc_top + 1 : 1), vc->state.x + 1); respond_string(buf, len, tty->port); } static inline void status_report(struct tty_struct *tty) { static const char teminal_ok[] = "\033[0n"; respond_string(teminal_ok, strlen(teminal_ok), tty->port); } static inline void respond_ID(struct tty_struct *tty) { /* terminal answer to an ESC-Z or csi0c query. */ static const char vt102_id[] = "\033[?6c"; respond_string(vt102_id, strlen(vt102_id), tty->port); } void mouse_report(struct tty_struct *tty, int butt, int mrx, int mry) { char buf[8]; int len; len = sprintf(buf, "\033[M%c%c%c", (char)(' ' + butt), (char)('!' + mrx), (char)('!' + mry)); respond_string(buf, len, tty->port); } /* invoked via ioctl(TIOCLINUX) and through set_selection_user */ int mouse_reporting(void) { return vc_cons[fg_console].d->vc_report_mouse; } /* invoked via ioctl(TIOCLINUX) */ static int get_bracketed_paste(struct tty_struct *tty) { struct vc_data *vc = tty->driver_data; return vc->vc_bracketed_paste; } enum { CSI_DEC_hl_CURSOR_KEYS = 1, /* CKM: cursor keys send ^[Ox/^[[x */ CSI_DEC_hl_132_COLUMNS = 3, /* COLM: 80/132 mode switch */ CSI_DEC_hl_REVERSE_VIDEO = 5, /* SCNM */ CSI_DEC_hl_ORIGIN_MODE = 6, /* OM: origin relative/absolute */ CSI_DEC_hl_AUTOWRAP = 7, /* AWM */ CSI_DEC_hl_AUTOREPEAT = 8, /* ARM */ CSI_DEC_hl_MOUSE_X10 = 9, CSI_DEC_hl_SHOW_CURSOR = 25, /* TCEM */ CSI_DEC_hl_MOUSE_VT200 = 1000, CSI_DEC_hl_BRACKETED_PASTE = 2004, }; /* console_lock is held */ static void csi_DEC_hl(struct vc_data *vc, bool on_off) { unsigned int i; for (i = 0; i <= vc->vc_npar; i++) switch (vc->vc_par[i]) { case CSI_DEC_hl_CURSOR_KEYS: if (on_off) set_kbd(vc, decckm); else clr_kbd(vc, decckm); break; case CSI_DEC_hl_132_COLUMNS: /* unimplemented */ #if 0 vc_resize(deccolm ? 132 : 80, vc->vc_rows); /* this alone does not suffice; some user mode utility has to change the hardware regs */ #endif break; case CSI_DEC_hl_REVERSE_VIDEO: if (vc->vc_decscnm != on_off) { vc->vc_decscnm = on_off; invert_screen(vc, 0, vc->vc_screenbuf_size, false); update_attr(vc); } break; case CSI_DEC_hl_ORIGIN_MODE: vc->vc_decom = on_off; gotoxay(vc, 0, 0); break; case CSI_DEC_hl_AUTOWRAP: vc->vc_decawm = on_off; break; case CSI_DEC_hl_AUTOREPEAT: if (on_off) set_kbd(vc, decarm); else clr_kbd(vc, decarm); break; case CSI_DEC_hl_MOUSE_X10: vc->vc_report_mouse = on_off ? 1 : 0; break; case CSI_DEC_hl_SHOW_CURSOR: vc->vc_deccm = on_off; break; case CSI_DEC_hl_MOUSE_VT200: vc->vc_report_mouse = on_off ? 2 : 0; break; case CSI_DEC_hl_BRACKETED_PASTE: vc->vc_bracketed_paste = on_off; break; } } enum { CSI_hl_DISPLAY_CTRL = 3, /* handle ansi control chars */ CSI_hl_INSERT = 4, /* IRM: insert/replace */ CSI_hl_AUTO_NL = 20, /* LNM: Enter == CrLf/Lf */ }; /* console_lock is held */ static void csi_hl(struct vc_data *vc, bool on_off) { unsigned int i; for (i = 0; i <= vc->vc_npar; i++) switch (vc->vc_par[i]) { /* ANSI modes set/reset */ case CSI_hl_DISPLAY_CTRL: vc->vc_disp_ctrl = on_off; break; case CSI_hl_INSERT: vc->vc_decim = on_off; break; case CSI_hl_AUTO_NL: if (on_off) set_kbd(vc, lnm); else clr_kbd(vc, lnm); break; } } enum CSI_right_square_bracket { CSI_RSB_COLOR_FOR_UNDERLINE = 1, CSI_RSB_COLOR_FOR_HALF_BRIGHT = 2, CSI_RSB_MAKE_CUR_COLOR_DEFAULT = 8, CSI_RSB_BLANKING_INTERVAL = 9, CSI_RSB_BELL_FREQUENCY = 10, CSI_RSB_BELL_DURATION = 11, CSI_RSB_BRING_CONSOLE_TO_FRONT = 12, CSI_RSB_UNBLANK = 13, CSI_RSB_VESA_OFF_INTERVAL = 14, CSI_RSB_BRING_PREV_CONSOLE_TO_FRONT = 15, CSI_RSB_CURSOR_BLINK_INTERVAL = 16, }; /* * csi_RSB - csi+] (Right Square Bracket) handler * * These are linux console private sequences. * * console_lock is held */ static void csi_RSB(struct vc_data *vc) { switch (vc->vc_par[0]) { case CSI_RSB_COLOR_FOR_UNDERLINE: if (vc->vc_can_do_color && vc->vc_par[1] < 16) { vc->vc_ulcolor = color_table[vc->vc_par[1]]; if (vc->state.underline) update_attr(vc); } break; case CSI_RSB_COLOR_FOR_HALF_BRIGHT: if (vc->vc_can_do_color && vc->vc_par[1] < 16) { vc->vc_halfcolor = color_table[vc->vc_par[1]]; if (vc->state.intensity == VCI_HALF_BRIGHT) update_attr(vc); } break; case CSI_RSB_MAKE_CUR_COLOR_DEFAULT: vc->vc_def_color = vc->vc_attr; if (vc->vc_hi_font_mask == 0x100) vc->vc_def_color >>= 1; default_attr(vc); update_attr(vc); break; case CSI_RSB_BLANKING_INTERVAL: blankinterval = min(vc->vc_par[1], 60U) * 60; poke_blanked_console(); break; case CSI_RSB_BELL_FREQUENCY: if (vc->vc_npar >= 1) vc->vc_bell_pitch = vc->vc_par[1]; else vc->vc_bell_pitch = DEFAULT_BELL_PITCH; break; case CSI_RSB_BELL_DURATION: if (vc->vc_npar >= 1) vc->vc_bell_duration = (vc->vc_par[1] < 2000) ? msecs_to_jiffies(vc->vc_par[1]) : 0; else vc->vc_bell_duration = DEFAULT_BELL_DURATION; break; case CSI_RSB_BRING_CONSOLE_TO_FRONT: if (vc->vc_par[1] >= 1 && vc_cons_allocated(vc->vc_par[1] - 1)) set_console(vc->vc_par[1] - 1); break; case CSI_RSB_UNBLANK: poke_blanked_console(); break; case CSI_RSB_VESA_OFF_INTERVAL: vesa_off_interval = min(vc->vc_par[1], 60U) * 60 * HZ; break; case CSI_RSB_BRING_PREV_CONSOLE_TO_FRONT: set_console(last_console); break; case CSI_RSB_CURSOR_BLINK_INTERVAL: if (vc->vc_npar >= 1 && vc->vc_par[1] >= 50 && vc->vc_par[1] <= USHRT_MAX) vc->vc_cur_blink_ms = vc->vc_par[1]; else vc->vc_cur_blink_ms = DEFAULT_CURSOR_BLINK_MS; break; } } /* console_lock is held */ static void csi_at(struct vc_data *vc, unsigned int nr) { nr = clamp(nr, 1, vc->vc_cols - vc->state.x); insert_char(vc, nr); } /* console_lock is held */ static void csi_L(struct vc_data *vc) { unsigned int nr = clamp(vc->vc_par[0], 1, vc->vc_rows - vc->state.y); con_scroll(vc, vc->state.y, vc->vc_bottom, SM_DOWN, nr); vc->vc_need_wrap = 0; } /* console_lock is held */ static void csi_P(struct vc_data *vc) { unsigned int nr = clamp(vc->vc_par[0], 1, vc->vc_cols - vc->state.x); delete_char(vc, nr); } /* console_lock is held */ static void csi_M(struct vc_data *vc) { unsigned int nr = clamp(vc->vc_par[0], 1, vc->vc_rows - vc->state.y); con_scroll(vc, vc->state.y, vc->vc_bottom, SM_UP, nr); vc->vc_need_wrap = 0; } /* console_lock is held (except via vc_init->reset_terminal */ static void save_cur(struct vc_data *vc) { memcpy(&vc->saved_state, &vc->state, sizeof(vc->state)); } /* console_lock is held */ static void restore_cur(struct vc_data *vc) { memcpy(&vc->state, &vc->saved_state, sizeof(vc->state)); gotoxy(vc, vc->state.x, vc->state.y); vc->vc_translate = set_translate(vc->state.Gx_charset[vc->state.charset], vc); update_attr(vc); vc->vc_need_wrap = 0; } /** * enum vc_ctl_state - control characters state of a vt * * @ESnormal: initial state, no control characters parsed * @ESesc: ESC parsed * @ESsquare: CSI parsed -- modifiers/parameters/ctrl chars expected * @ESgetpars: CSI parsed -- parameters/ctrl chars expected * @ESfunckey: CSI [ parsed * @EShash: ESC # parsed * @ESsetG0: ESC ( parsed * @ESsetG1: ESC ) parsed * @ESpercent: ESC % parsed * @EScsiignore: CSI [0x20-0x3f] parsed * @ESnonstd: OSC parsed * @ESpalette: OSC P parsed * @ESosc: OSC [0-9] parsed * @ESANSI_first: first state for ignoring ansi control sequences * @ESapc: ESC _ parsed * @ESpm: ESC ^ parsed * @ESdcs: ESC P parsed * @ESANSI_last: last state for ignoring ansi control sequences */ enum vc_ctl_state { ESnormal, ESesc, ESsquare, ESgetpars, ESfunckey, EShash, ESsetG0, ESsetG1, ESpercent, EScsiignore, ESnonstd, ESpalette, ESosc, ESANSI_first = ESosc, ESapc, ESpm, ESdcs, ESANSI_last = ESdcs, }; /* console_lock is held (except via vc_init()) */ static void reset_terminal(struct vc_data *vc, int do_clear) { unsigned int i; vc->vc_top = 0; vc->vc_bottom = vc->vc_rows; vc->vc_state = ESnormal; vc->vc_priv = EPecma; vc->vc_translate = set_translate(LAT1_MAP, vc); vc->state.Gx_charset[0] = LAT1_MAP; vc->state.Gx_charset[1] = GRAF_MAP; vc->state.charset = 0; vc->vc_need_wrap = 0; vc->vc_report_mouse = 0; vc->vc_bracketed_paste = 0; vc->vc_utf = default_utf8; vc->vc_utf_count = 0; vc->vc_disp_ctrl = 0; vc->vc_toggle_meta = 0; vc->vc_decscnm = 0; vc->vc_decom = 0; vc->vc_decawm = 1; vc->vc_deccm = global_cursor_default; vc->vc_decim = 0; vt_reset_keyboard(vc->vc_num); vc->vc_cursor_type = cur_default; vc->vc_complement_mask = vc->vc_s_complement_mask; default_attr(vc); update_attr(vc); bitmap_zero(vc->vc_tab_stop, VC_TABSTOPS_COUNT); for (i = 0; i < VC_TABSTOPS_COUNT; i += 8) set_bit(i, vc->vc_tab_stop); vc->vc_bell_pitch = DEFAULT_BELL_PITCH; vc->vc_bell_duration = DEFAULT_BELL_DURATION; vc->vc_cur_blink_ms = DEFAULT_CURSOR_BLINK_MS; gotoxy(vc, 0, 0); save_cur(vc); if (do_clear) csi_J(vc, CSI_J_VISIBLE); } static void vc_setGx(struct vc_data *vc, unsigned int which, u8 c) { unsigned char *charset = &vc->state.Gx_charset[which]; switch (c) { case '0': *charset = GRAF_MAP; break; case 'B': *charset = LAT1_MAP; break; case 'U': *charset = IBMPC_MAP; break; case 'K': *charset = USER_MAP; break; } if (vc->state.charset == which) vc->vc_translate = set_translate(*charset, vc); } static bool ansi_control_string(enum vc_ctl_state state) { return state >= ESANSI_first && state <= ESANSI_last; } enum { ASCII_NULL = 0, ASCII_BELL = 7, ASCII_BACKSPACE = 8, ASCII_IGNORE_FIRST = ASCII_BACKSPACE, ASCII_HTAB = 9, ASCII_LINEFEED = 10, ASCII_VTAB = 11, ASCII_FORMFEED = 12, ASCII_CAR_RET = 13, ASCII_IGNORE_LAST = ASCII_CAR_RET, ASCII_SHIFTOUT = 14, ASCII_SHIFTIN = 15, ASCII_CANCEL = 24, ASCII_SUBSTITUTE = 26, ASCII_ESCAPE = 27, ASCII_CSI_IGNORE_FIRST = ' ', /* 0x2x, 0x3a and 0x3c - 0x3f */ ASCII_CSI_IGNORE_LAST = '?', ASCII_DEL = 127, ASCII_EXT_CSI = 128 + ASCII_ESCAPE, }; /* * Handle ascii characters in control sequences and change states accordingly. * E.g. ESC sets the state of vc to ESesc. * * Returns: true if @c handled. */ static bool handle_ascii(struct tty_struct *tty, struct vc_data *vc, u8 c) { switch (c) { case ASCII_NULL: return true; case ASCII_BELL: if (ansi_control_string(vc->vc_state)) vc->vc_state = ESnormal; else if (vc->vc_bell_duration) kd_mksound(vc->vc_bell_pitch, vc->vc_bell_duration); return true; case ASCII_BACKSPACE: bs(vc); return true; case ASCII_HTAB: vc->vc_pos -= (vc->state.x << 1); vc->state.x = find_next_bit(vc->vc_tab_stop, min(vc->vc_cols - 1, VC_TABSTOPS_COUNT), vc->state.x + 1); if (vc->state.x >= VC_TABSTOPS_COUNT) vc->state.x = vc->vc_cols - 1; vc->vc_pos += (vc->state.x << 1); notify_write(vc, '\t'); return true; case ASCII_LINEFEED: case ASCII_VTAB: case ASCII_FORMFEED: lf(vc); if (!is_kbd(vc, lnm)) return true; fallthrough; case ASCII_CAR_RET: cr(vc); return true; case ASCII_SHIFTOUT: vc->state.charset = 1; vc->vc_translate = set_translate(vc->state.Gx_charset[1], vc); vc->vc_disp_ctrl = 1; return true; case ASCII_SHIFTIN: vc->state.charset = 0; vc->vc_translate = set_translate(vc->state.Gx_charset[0], vc); vc->vc_disp_ctrl = 0; return true; case ASCII_CANCEL: case ASCII_SUBSTITUTE: vc->vc_state = ESnormal; return true; case ASCII_ESCAPE: vc->vc_state = ESesc; return true; case ASCII_DEL: del(vc); return true; case ASCII_EXT_CSI: vc->vc_state = ESsquare; return true; } return false; } /* * Handle a character (@c) following an ESC (when @vc is in the ESesc state). * E.g. previous ESC with @c == '[' here yields the ESsquare state (that is: * CSI). */ static void handle_esc(struct tty_struct *tty, struct vc_data *vc, u8 c) { vc->vc_state = ESnormal; switch (c) { case '[': vc->vc_state = ESsquare; break; case ']': vc->vc_state = ESnonstd; break; case '_': vc->vc_state = ESapc; break; case '^': vc->vc_state = ESpm; break; case '%': vc->vc_state = ESpercent; break; case 'E': cr(vc); lf(vc); break; case 'M': ri(vc); break; case 'D': lf(vc); break; case 'H': if (vc->state.x < VC_TABSTOPS_COUNT) set_bit(vc->state.x, vc->vc_tab_stop); break; case 'P': vc->vc_state = ESdcs; break; case 'Z': respond_ID(tty); break; case '7': save_cur(vc); break; case '8': restore_cur(vc); break; case '(': vc->vc_state = ESsetG0; break; case ')': vc->vc_state = ESsetG1; break; case '#': vc->vc_state = EShash; break; case 'c': reset_terminal(vc, 1); break; case '>': /* Numeric keypad */ clr_kbd(vc, kbdapplic); break; case '=': /* Appl. keypad */ set_kbd(vc, kbdapplic); break; } } /* * Handle special DEC control sequences ("ESC [ ? parameters char"). Parameters * are in @vc->vc_par and the char is in @c here. */ static void csi_DEC(struct tty_struct *tty, struct vc_data *vc, u8 c) { switch (c) { case 'h': csi_DEC_hl(vc, true); break; case 'l': csi_DEC_hl(vc, false); break; case 'c': if (vc->vc_par[0]) vc->vc_cursor_type = CUR_MAKE(vc->vc_par[0], vc->vc_par[1], vc->vc_par[2]); else vc->vc_cursor_type = cur_default; break; case 'm': clear_selection(); if (vc->vc_par[0]) vc->vc_complement_mask = vc->vc_par[0] << 8 | vc->vc_par[1]; else vc->vc_complement_mask = vc->vc_s_complement_mask; break; case 'n': if (vc->vc_par[0] == 5) status_report(tty); else if (vc->vc_par[0] == 6) cursor_report(vc, tty); break; } } /* * Handle Control Sequence Introducer control characters. That is * "ESC [ parameters char". Parameters are in @vc->vc_par and the char is in * @c here. */ static void csi_ECMA(struct tty_struct *tty, struct vc_data *vc, u8 c) { switch (c) { case 'G': case '`': if (vc->vc_par[0]) vc->vc_par[0]--; gotoxy(vc, vc->vc_par[0], vc->state.y); break; case 'A': if (!vc->vc_par[0]) vc->vc_par[0]++; gotoxy(vc, vc->state.x, vc->state.y - vc->vc_par[0]); break; case 'B': case 'e': if (!vc->vc_par[0]) vc->vc_par[0]++; gotoxy(vc, vc->state.x, vc->state.y + vc->vc_par[0]); break; case 'C': case 'a': if (!vc->vc_par[0]) vc->vc_par[0]++; gotoxy(vc, vc->state.x + vc->vc_par[0], vc->state.y); break; case 'D': if (!vc->vc_par[0]) vc->vc_par[0]++; gotoxy(vc, vc->state.x - vc->vc_par[0], vc->state.y); break; case 'E': if (!vc->vc_par[0]) vc->vc_par[0]++; gotoxy(vc, 0, vc->state.y + vc->vc_par[0]); break; case 'F': if (!vc->vc_par[0]) vc->vc_par[0]++; gotoxy(vc, 0, vc->state.y - vc->vc_par[0]); break; case 'd': if (vc->vc_par[0]) vc->vc_par[0]--; gotoxay(vc, vc->state.x ,vc->vc_par[0]); break; case 'H': case 'f': if (vc->vc_par[0]) vc->vc_par[0]--; if (vc->vc_par[1]) vc->vc_par[1]--; gotoxay(vc, vc->vc_par[1], vc->vc_par[0]); break; case 'J': csi_J(vc, vc->vc_par[0]); break; case 'K': csi_K(vc); break; case 'L': csi_L(vc); break; case 'M': csi_M(vc); break; case 'P': csi_P(vc); break; case 'c': if (!vc->vc_par[0]) respond_ID(tty); break; case 'g': if (!vc->vc_par[0] && vc->state.x < VC_TABSTOPS_COUNT) set_bit(vc->state.x, vc->vc_tab_stop); else if (vc->vc_par[0] == 3) bitmap_zero(vc->vc_tab_stop, VC_TABSTOPS_COUNT); break; case 'h': csi_hl(vc, true); break; case 'l': csi_hl(vc, false); break; case 'm': csi_m(vc); break; case 'n': if (vc->vc_par[0] == 5) status_report(tty); else if (vc->vc_par[0] == 6) cursor_report(vc, tty); break; case 'q': /* DECLL - but only 3 leds */ /* map 0,1,2,3 to 0,1,2,4 */ if (vc->vc_par[0] < 4) vt_set_led_state(vc->vc_num, (vc->vc_par[0] < 3) ? vc->vc_par[0] : 4); break; case 'r': if (!vc->vc_par[0]) vc->vc_par[0]++; if (!vc->vc_par[1]) vc->vc_par[1] = vc->vc_rows; /* Minimum allowed region is 2 lines */ if (vc->vc_par[0] < vc->vc_par[1] && vc->vc_par[1] <= vc->vc_rows) { vc->vc_top = vc->vc_par[0] - 1; vc->vc_bottom = vc->vc_par[1]; gotoxay(vc, 0, 0); } break; case 's': save_cur(vc); break; case 'u': restore_cur(vc); break; case 'X': csi_X(vc); break; case '@': csi_at(vc, vc->vc_par[0]); break; case ']': csi_RSB(vc); break; } } static void vc_reset_params(struct vc_data *vc) { memset(vc->vc_par, 0, sizeof(vc->vc_par)); vc->vc_npar = 0; } /* console_lock is held */ static void do_con_trol(struct tty_struct *tty, struct vc_data *vc, u8 c) { /* * Control characters can be used in the _middle_ * of an escape sequence, aside from ANSI control strings. */ if (ansi_control_string(vc->vc_state) && c >= ASCII_IGNORE_FIRST && c <= ASCII_IGNORE_LAST) return; if (handle_ascii(tty, vc, c)) return; switch(vc->vc_state) { case ESesc: /* ESC */ handle_esc(tty, vc, c); return; case ESnonstd: /* ESC ] aka OSC */ switch (c) { case 'P': /* palette escape sequence */ vc_reset_params(vc); vc->vc_state = ESpalette; return; case 'R': /* reset palette */ reset_palette(vc); break; case '0' ... '9': vc->vc_state = ESosc; return; } vc->vc_state = ESnormal; return; case ESpalette: /* ESC ] P aka OSC P */ if (isxdigit(c)) { vc->vc_par[vc->vc_npar++] = hex_to_bin(c); if (vc->vc_npar == 7) { int i = vc->vc_par[0] * 3, j = 1; vc->vc_palette[i] = 16 * vc->vc_par[j++]; vc->vc_palette[i++] += vc->vc_par[j++]; vc->vc_palette[i] = 16 * vc->vc_par[j++]; vc->vc_palette[i++] += vc->vc_par[j++]; vc->vc_palette[i] = 16 * vc->vc_par[j++]; vc->vc_palette[i] += vc->vc_par[j]; set_palette(vc); vc->vc_state = ESnormal; } } else vc->vc_state = ESnormal; return; case ESsquare: /* ESC [ aka CSI, parameters or modifiers expected */ vc_reset_params(vc); vc->vc_state = ESgetpars; switch (c) { case '[': /* Function key */ vc->vc_state = ESfunckey; return; case '?': vc->vc_priv = EPdec; return; case '>': vc->vc_priv = EPgt; return; case '=': vc->vc_priv = EPeq; return; case '<': vc->vc_priv = EPlt; return; } vc->vc_priv = EPecma; fallthrough; case ESgetpars: /* ESC [ aka CSI, parameters expected */ switch (c) { case ';': if (vc->vc_npar < NPAR - 1) { vc->vc_npar++; return; } break; case '0' ... '9': vc->vc_par[vc->vc_npar] *= 10; vc->vc_par[vc->vc_npar] += c - '0'; return; } if (c >= ASCII_CSI_IGNORE_FIRST && c <= ASCII_CSI_IGNORE_LAST) { vc->vc_state = EScsiignore; return; } /* parameters done, handle the control char @c */ vc->vc_state = ESnormal; switch (vc->vc_priv) { case EPdec: csi_DEC(tty, vc, c); return; case EPecma: csi_ECMA(tty, vc, c); return; default: return; } case EScsiignore: if (c >= ASCII_CSI_IGNORE_FIRST && c <= ASCII_CSI_IGNORE_LAST) return; vc->vc_state = ESnormal; return; case ESpercent: /* ESC % */ vc->vc_state = ESnormal; switch (c) { case '@': /* defined in ISO 2022 */ vc->vc_utf = 0; return; case 'G': /* prelim official escape code */ case '8': /* retained for compatibility */ vc->vc_utf = 1; return; } return; case ESfunckey: /* ESC [ [ aka CSI [ */ vc->vc_state = ESnormal; return; case EShash: /* ESC # */ vc->vc_state = ESnormal; if (c == '8') { /* DEC screen alignment test. kludge :-) */ vc->vc_video_erase_char = (vc->vc_video_erase_char & 0xff00) | 'E'; csi_J(vc, CSI_J_VISIBLE); vc->vc_video_erase_char = (vc->vc_video_erase_char & 0xff00) | ' '; do_update_region(vc, vc->vc_origin, vc->vc_screenbuf_size / 2); } return; case ESsetG0: /* ESC ( */ vc_setGx(vc, 0, c); vc->vc_state = ESnormal; return; case ESsetG1: /* ESC ) */ vc_setGx(vc, 1, c); vc->vc_state = ESnormal; return; case ESapc: /* ESC _ */ return; case ESosc: /* ESC ] [0-9] aka OSC [0-9] */ return; case ESpm: /* ESC ^ */ return; case ESdcs: /* ESC P */ return; default: vc->vc_state = ESnormal; } } struct vc_draw_region { unsigned long from, to; int x; }; static void con_flush(struct vc_data *vc, struct vc_draw_region *draw) { if (draw->x < 0) return; vc->vc_sw->con_putcs(vc, (u16 *)draw->from, (u16 *)draw->to - (u16 *)draw->from, vc->state.y, draw->x); draw->x = -1; } static inline int vc_translate_ascii(const struct vc_data *vc, int c) { if (IS_ENABLED(CONFIG_CONSOLE_TRANSLATIONS)) { if (vc->vc_toggle_meta) c |= 0x80; return vc->vc_translate[c]; } return c; } /** * vc_sanitize_unicode - Replace invalid Unicode code points with ``U+FFFD`` * @c: the received code point */ static inline int vc_sanitize_unicode(const int c) { if (c >= 0xd800 && c <= 0xdfff) return 0xfffd; return c; } /** * vc_translate_unicode - Combine UTF-8 into Unicode in &vc_data.vc_utf_char * @vc: virtual console * @c: UTF-8 byte to translate * @rescan: set to true iff @c wasn't consumed here and needs to be re-processed * * * &vc_data.vc_utf_char is the being-constructed Unicode code point. * * &vc_data.vc_utf_count is the number of continuation bytes still expected to * arrive. * * &vc_data.vc_npar is the number of continuation bytes arrived so far. * * Return: * * %-1 - Input OK so far, @c consumed, further bytes expected. * * %0xFFFD - Possibility 1: input invalid, @c may have been consumed (see * desc. of @rescan). Possibility 2: input OK, @c consumed, * ``U+FFFD`` is the resulting code point. ``U+FFFD`` is valid, * ``REPLACEMENT CHARACTER``. * * otherwise - Input OK, @c consumed, resulting code point returned. */ static int vc_translate_unicode(struct vc_data *vc, int c, bool *rescan) { static const u32 utf8_length_changes[] = {0x7f, 0x7ff, 0xffff, 0x10ffff}; /* Continuation byte received */ if ((c & 0xc0) == 0x80) { /* Unexpected continuation byte? */ if (!vc->vc_utf_count) goto bad_sequence; vc->vc_utf_char = (vc->vc_utf_char << 6) | (c & 0x3f); vc->vc_npar++; if (--vc->vc_utf_count) goto need_more_bytes; /* Got a whole character */ c = vc->vc_utf_char; /* Reject overlong sequences */ if (c <= utf8_length_changes[vc->vc_npar - 1] || c > utf8_length_changes[vc->vc_npar]) goto bad_sequence; return vc_sanitize_unicode(c); } /* Single ASCII byte or first byte of a sequence received */ if (vc->vc_utf_count) { /* A continuation byte was expected */ *rescan = true; vc->vc_utf_count = 0; goto bad_sequence; } /* Nothing to do if an ASCII byte was received */ if (c <= 0x7f) return c; /* First byte of a multibyte sequence received */ vc->vc_npar = 0; if ((c & 0xe0) == 0xc0) { vc->vc_utf_count = 1; vc->vc_utf_char = (c & 0x1f); } else if ((c & 0xf0) == 0xe0) { vc->vc_utf_count = 2; vc->vc_utf_char = (c & 0x0f); } else if ((c & 0xf8) == 0xf0) { vc->vc_utf_count = 3; vc->vc_utf_char = (c & 0x07); } else { goto bad_sequence; } need_more_bytes: return -1; bad_sequence: return 0xfffd; } static int vc_translate(struct vc_data *vc, int *c, bool *rescan) { /* Do no translation at all in control states */ if (vc->vc_state != ESnormal) return *c; if (vc->vc_utf && !vc->vc_disp_ctrl) return *c = vc_translate_unicode(vc, *c, rescan); /* no utf or alternate charset mode */ return vc_translate_ascii(vc, *c); } static inline unsigned char vc_invert_attr(const struct vc_data *vc) { if (!vc->vc_can_do_color) return vc->vc_attr ^ 0x08; if (vc->vc_hi_font_mask == 0x100) return (vc->vc_attr & 0x11) | ((vc->vc_attr & 0xe0) >> 4) | ((vc->vc_attr & 0x0e) << 4); return (vc->vc_attr & 0x88) | ((vc->vc_attr & 0x70) >> 4) | ((vc->vc_attr & 0x07) << 4); } static bool vc_is_control(struct vc_data *vc, int tc, int c) { /* * A bitmap for codes <32. A bit of 1 indicates that the code * corresponding to that bit number invokes some special action (such * as cursor movement) and should not be displayed as a glyph unless * the disp_ctrl mode is explicitly enabled. */ static const u32 CTRL_ACTION = BIT(ASCII_NULL) | GENMASK(ASCII_SHIFTIN, ASCII_BELL) | BIT(ASCII_CANCEL) | BIT(ASCII_SUBSTITUTE) | BIT(ASCII_ESCAPE); /* Cannot be overridden by disp_ctrl */ static const u32 CTRL_ALWAYS = BIT(ASCII_NULL) | BIT(ASCII_BACKSPACE) | BIT(ASCII_LINEFEED) | BIT(ASCII_SHIFTIN) | BIT(ASCII_SHIFTOUT) | BIT(ASCII_CAR_RET) | BIT(ASCII_FORMFEED) | BIT(ASCII_ESCAPE); if (vc->vc_state != ESnormal) return true; if (!tc) return true; /* * If the original code was a control character we only allow a glyph * to be displayed if the code is not normally used (such as for cursor * movement) or if the disp_ctrl mode has been explicitly enabled. * Certain characters (as given by the CTRL_ALWAYS bitmap) are always * displayed as control characters, as the console would be pretty * useless without them; to display an arbitrary font position use the * direct-to-font zone in UTF-8 mode. */ if (c < BITS_PER_TYPE(CTRL_ALWAYS)) { if (vc->vc_disp_ctrl) return CTRL_ALWAYS & BIT(c); else return vc->vc_utf || (CTRL_ACTION & BIT(c)); } if (c == ASCII_DEL && !vc->vc_disp_ctrl) return true; if (c == ASCII_EXT_CSI) return true; return false; } static void vc_con_rewind(struct vc_data *vc) { if (vc->state.x && !vc->vc_need_wrap) { vc->vc_pos -= 2; vc->state.x--; } vc->vc_need_wrap = 0; } #define UCS_ZWS 0x200b /* Zero Width Space */ #define UCS_VS16 0xfe0f /* Variation Selector 16 */ #define UCS_REPLACEMENT 0xfffd /* Replacement Character */ static int vc_process_ucs(struct vc_data *vc, int *c, int *tc) { u32 prev_c, curr_c = *c; if (ucs_is_double_width(curr_c)) { /* * The Unicode screen memory is allocated only when * required. This is one such case as we need to remember * which displayed characters are double-width. */ vc_uniscr_check(vc); return 2; } if (!ucs_is_zero_width(curr_c)) return 1; /* From here curr_c is known to be zero-width. */ if (ucs_is_double_width(vc_uniscr_getc(vc, -2))) { /* * Let's merge this zero-width code point with the preceding * double-width code point by replacing the existing * zero-width space padding. To do so we rewind one column * and pretend this has a width of 1. * We give the legacy display the same initial space padding. */ vc_con_rewind(vc); *tc = ' '; return 1; } /* From here the preceding character, if any, must be single-width. */ prev_c = vc_uniscr_getc(vc, -1); if (curr_c == UCS_VS16 && prev_c != 0) { /* * VS16 (U+FE0F) is special. It typically turns the preceding * single-width character into a double-width one. Let it * have a width of 1 effectively making the combination with * the preceding character double-width. */ *tc = ' '; return 1; } /* try recomposition */ prev_c = ucs_recompose(prev_c, curr_c); if (prev_c != 0) { vc_con_rewind(vc); *tc = *c = prev_c; return 1; } /* Otherwise zero-width code points are ignored. */ return 0; } static int vc_get_glyph(struct vc_data *vc, int tc) { int glyph = conv_uni_to_pc(vc, tc); u16 charmask = vc->vc_hi_font_mask ? 0x1ff : 0xff; if (!(glyph & ~charmask)) return glyph; if (glyph == -1) return -1; /* nothing to display */ /* Glyph not found */ if ((!vc->vc_utf || vc->vc_disp_ctrl || tc < 128) && !(tc & ~charmask)) { /* * In legacy mode use the glyph we get by a 1:1 mapping. * This would make absolutely no sense with Unicode in mind, but do this for * ASCII characters since a font may lack Unicode mapping info and we don't * want to end up with having question marks only. */ return tc; } /* * The Unicode screen memory is allocated only when required. * This is one such case: we're about to "cheat" with the displayed * character meaning the simple screen buffer won't hold the original * information, whereas the Unicode screen buffer always does. */ vc_uniscr_check(vc); /* Try getting a simpler fallback character. */ tc = ucs_get_fallback(tc); if (tc) return vc_get_glyph(vc, tc); /* Display U+FFFD (Unicode Replacement Character). */ return conv_uni_to_pc(vc, UCS_REPLACEMENT); } static int vc_con_write_normal(struct vc_data *vc, int tc, int c, struct vc_draw_region *draw) { int next_c; unsigned char vc_attr = vc->vc_attr; u16 himask = vc->vc_hi_font_mask; u8 width = 1; bool inverse = false; if (vc->vc_utf && !vc->vc_disp_ctrl) { width = vc_process_ucs(vc, &c, &tc); if (!width) goto out; } /* Now try to find out how to display it */ tc = vc_get_glyph(vc, tc); if (tc == -1) return -1; /* nothing to display */ if (tc < 0) { inverse = true; tc = conv_uni_to_pc(vc, '?'); if (tc < 0) tc = '?'; vc_attr = vc_invert_attr(vc); con_flush(vc, draw); } next_c = c; while (1) { if (vc->vc_need_wrap || vc->vc_decim) con_flush(vc, draw); if (vc->vc_need_wrap) { cr(vc); lf(vc); } if (vc->vc_decim) insert_char(vc, 1); vc_uniscr_putc(vc, next_c); if (himask) tc = ((tc & 0x100) ? himask : 0) | (tc & 0xff); tc |= (vc_attr << 8) & ~himask; scr_writew(tc, (u16 *)vc->vc_pos); if (con_should_update(vc) && draw->x < 0) { draw->x = vc->state.x; draw->from = vc->vc_pos; } if (vc->state.x == vc->vc_cols - 1) { vc->vc_need_wrap = vc->vc_decawm; draw->to = vc->vc_pos + 2; } else { vc->state.x++; draw->to = (vc->vc_pos += 2); } if (!--width) break; /* A space is printed in the second column */ tc = conv_uni_to_pc(vc, ' '); if (tc < 0) tc = ' '; /* * Store a zero-width space in the Unicode screen given that * the previous code point is semantically double width. */ next_c = UCS_ZWS; } out: notify_write(vc, c); if (inverse) con_flush(vc, draw); return 0; } /* acquires console_lock */ static int do_con_write(struct tty_struct *tty, const u8 *buf, int count) { struct vc_draw_region draw = { .x = -1, }; int c, tc, n = 0; unsigned int currcons; struct vc_data *vc = tty->driver_data; struct vt_notifier_param param; bool rescan; if (in_interrupt()) return count; console_lock(); currcons = vc->vc_num; if (!vc_cons_allocated(currcons)) { /* could this happen? */ pr_warn_once("con_write: tty %d not allocated\n", currcons+1); console_unlock(); return 0; } /* undraw cursor first */ if (con_is_fg(vc)) hide_cursor(vc); param.vc = vc; while (!tty->flow.stopped && count) { u8 orig = *buf; buf++; n++; count--; rescan_last_byte: c = orig; rescan = false; tc = vc_translate(vc, &c, &rescan); if (tc == -1) continue; param.c = tc; if (atomic_notifier_call_chain(&vt_notifier_list, VT_PREWRITE, ¶m) == NOTIFY_STOP) continue; if (vc_is_control(vc, tc, c)) { con_flush(vc, &draw); do_con_trol(tty, vc, orig); continue; } if (vc_con_write_normal(vc, tc, c, &draw) < 0) continue; if (rescan) goto rescan_last_byte; } con_flush(vc, &draw); console_conditional_schedule(); notify_update(vc); console_unlock(); return n; } /* * This is the console switching callback. * * Doing console switching in a process context allows * us to do the switches asynchronously (needed when we want * to switch due to a keyboard interrupt). Synchronization * with other console code and prevention of re-entrancy is * ensured with console_lock. */ static void console_callback(struct work_struct *ignored) { console_lock(); if (want_console >= 0) { if (want_console != fg_console && vc_cons_allocated(want_console)) { hide_cursor(vc_cons[fg_console].d); change_console(vc_cons[want_console].d); /* we only changed when the console had already been allocated - a new console is not created in an interrupt routine */ } want_console = -1; } if (do_poke_blanked_console) { /* do not unblank for a LED change */ do_poke_blanked_console = 0; poke_blanked_console(); } if (scrollback_delta) { struct vc_data *vc = vc_cons[fg_console].d; clear_selection(); if (vc->vc_mode == KD_TEXT && vc->vc_sw->con_scrolldelta) vc->vc_sw->con_scrolldelta(vc, scrollback_delta); scrollback_delta = 0; } if (blank_timer_expired) { do_blank_screen(0); blank_timer_expired = 0; } notify_update(vc_cons[fg_console].d); console_unlock(); } int set_console(int nr) { struct vc_data *vc = vc_cons[fg_console].d; if (!vc_cons_allocated(nr) || vt_dont_switch || (vc->vt_mode.mode == VT_AUTO && vc->vc_mode == KD_GRAPHICS)) { /* * Console switch will fail in console_callback() or * change_console() so there is no point scheduling * the callback * * Existing set_console() users don't check the return * value so this shouldn't break anything */ return -EINVAL; } want_console = nr; schedule_console_callback(); return 0; } struct tty_driver *console_driver; #ifdef CONFIG_VT_CONSOLE /** * vt_kmsg_redirect() - sets/gets the kernel message console * @new: the new virtual terminal number or -1 if the console should stay * unchanged * * By default, the kernel messages are always printed on the current virtual * console. However, the user may modify that default with the * %TIOCL_SETKMSGREDIRECT ioctl call. * * This function sets the kernel message console to be @new. It returns the old * virtual console number. The virtual terminal number %0 (both as parameter and * return value) means no redirection (i.e. always printed on the currently * active console). * * The parameter -1 means that only the current console is returned, but the * value is not modified. You may use the macro vt_get_kmsg_redirect() in that * case to make the code more understandable. * * When the kernel is compiled without %CONFIG_VT_CONSOLE, this function ignores * the parameter and always returns %0. */ int vt_kmsg_redirect(int new) { static int kmsg_con; if (new != -1) return xchg(&kmsg_con, new); else return kmsg_con; } /* * Console on virtual terminal * * The console must be locked when we get here. */ static void vt_console_print(struct console *co, const char *b, unsigned count) { struct vc_data *vc = vc_cons[fg_console].d; unsigned char c; static DEFINE_SPINLOCK(printing_lock); const ushort *start; ushort start_x, cnt; int kmsg_console; WARN_CONSOLE_UNLOCKED(); /* this protects against concurrent oops only */ if (!spin_trylock(&printing_lock)) return; kmsg_console = vt_get_kmsg_redirect(); if (kmsg_console && vc_cons_allocated(kmsg_console - 1)) vc = vc_cons[kmsg_console - 1].d; if (!vc_cons_allocated(fg_console)) { /* impossible */ /* printk("vt_console_print: tty %d not allocated ??\n", currcons+1); */ goto quit; } if (vc->vc_mode != KD_TEXT) goto quit; /* undraw cursor first */ if (con_is_fg(vc)) hide_cursor(vc); start = (ushort *)vc->vc_pos; start_x = vc->state.x; cnt = 0; while (count--) { c = *b++; if (c == ASCII_LINEFEED || c == ASCII_CAR_RET || c == ASCII_BACKSPACE || vc->vc_need_wrap) { if (cnt && con_is_visible(vc)) vc->vc_sw->con_putcs(vc, start, cnt, vc->state.y, start_x); cnt = 0; if (c == ASCII_BACKSPACE) { bs(vc); start = (ushort *)vc->vc_pos; start_x = vc->state.x; continue; } if (c != ASCII_CAR_RET) lf(vc); cr(vc); start = (ushort *)vc->vc_pos; start_x = vc->state.x; if (c == ASCII_LINEFEED || c == ASCII_CAR_RET) continue; } vc_uniscr_putc(vc, c); scr_writew((vc->vc_attr << 8) + c, (unsigned short *)vc->vc_pos); notify_write(vc, c); cnt++; if (vc->state.x == vc->vc_cols - 1) { vc->vc_need_wrap = 1; } else { vc->vc_pos += 2; vc->state.x++; } } if (cnt && con_is_visible(vc)) vc->vc_sw->con_putcs(vc, start, cnt, vc->state.y, start_x); set_cursor(vc); notify_update(vc); quit: spin_unlock(&printing_lock); } static struct tty_driver *vt_console_device(struct console *c, int *index) { *index = c->index ? c->index-1 : fg_console; return console_driver; } static int vt_console_setup(struct console *co, char *options) { return co->index >= MAX_NR_CONSOLES ? -EINVAL : 0; } static struct console vt_console_driver = { .name = "tty", .setup = vt_console_setup, .write = vt_console_print, .device = vt_console_device, .unblank = unblank_screen, .flags = CON_PRINTBUFFER, .index = -1, }; #endif /* * Handling of Linux-specific VC ioctls */ /* * Generally a bit racy with respect to console_lock();. * * There are some functions which don't need it. * * There are some functions which can sleep for arbitrary periods * (paste_selection) but we don't need the lock there anyway. * * set_selection_user has locking, and definitely needs it */ int tioclinux(struct tty_struct *tty, unsigned long arg) { char type, data; char __user *p = (char __user *)arg; void __user *param_aligned32 = (u32 __user *)arg + 1; void __user *param = (void __user *)arg + 1; int lines; int ret; if (current->signal->tty != tty && !capable(CAP_SYS_ADMIN)) return -EPERM; if (get_user(type, p)) return -EFAULT; ret = 0; switch (type) { case TIOCL_SETSEL: return set_selection_user(param, tty); case TIOCL_PASTESEL: if (!capable(CAP_SYS_ADMIN)) return -EPERM; return paste_selection(tty); case TIOCL_UNBLANKSCREEN: console_lock(); unblank_screen(); console_unlock(); break; case TIOCL_SELLOADLUT: if (!capable(CAP_SYS_ADMIN)) return -EPERM; return sel_loadlut(param_aligned32); case TIOCL_GETSHIFTSTATE: /* * Make it possible to react to Shift+Mousebutton. Note that * 'shift_state' is an undocumented kernel-internal variable; * programs not closely related to the kernel should not use * this. */ data = vt_get_shift_state(); return put_user(data, p); case TIOCL_GETMOUSEREPORTING: console_lock(); /* May be overkill */ data = mouse_reporting(); console_unlock(); return put_user(data, p); case TIOCL_SETVESABLANK: return set_vesa_blanking(param); case TIOCL_GETKMSGREDIRECT: data = vt_get_kmsg_redirect(); return put_user(data, p); case TIOCL_SETKMSGREDIRECT: if (!capable(CAP_SYS_ADMIN)) return -EPERM; if (get_user(data, p+1)) return -EFAULT; vt_kmsg_redirect(data); break; case TIOCL_GETFGCONSOLE: /* * No locking needed as this is a transiently correct return * anyway if the caller hasn't disabled switching. */ return fg_console; case TIOCL_SCROLLCONSOLE: if (get_user(lines, (s32 __user *)param_aligned32)) return -EFAULT; /* * Needs the console lock here. Note that lots of other calls * need fixing before the lock is actually useful! */ console_lock(); scrollfront(vc_cons[fg_console].d, lines); console_unlock(); break; case TIOCL_BLANKSCREEN: /* until explicitly unblanked, not only poked */ console_lock(); ignore_poke = 1; do_blank_screen(0); console_unlock(); break; case TIOCL_BLANKEDSCREEN: return console_blanked; case TIOCL_GETBRACKETEDPASTE: return get_bracketed_paste(tty); default: return -EINVAL; } return ret; } /* * /dev/ttyN handling */ static ssize_t con_write(struct tty_struct *tty, const u8 *buf, size_t count) { int retval; retval = do_con_write(tty, buf, count); con_flush_chars(tty); return retval; } static int con_put_char(struct tty_struct *tty, u8 ch) { return do_con_write(tty, &ch, 1); } static unsigned int con_write_room(struct tty_struct *tty) { if (tty->flow.stopped) return 0; return 32768; /* No limit, really; we're not buffering */ } /* * con_throttle and con_unthrottle are only used for * paste_selection(), which has to stuff in a large number of * characters... */ static void con_throttle(struct tty_struct *tty) { } static void con_unthrottle(struct tty_struct *tty) { struct vc_data *vc = tty->driver_data; wake_up_interruptible(&vc->paste_wait); } /* * Turn the Scroll-Lock LED on when the tty is stopped */ static void con_stop(struct tty_struct *tty) { int console_num; if (!tty) return; console_num = tty->index; if (!vc_cons_allocated(console_num)) return; vt_kbd_con_stop(console_num); } /* * Turn the Scroll-Lock LED off when the console is started */ static void con_start(struct tty_struct *tty) { int console_num; if (!tty) return; console_num = tty->index; if (!vc_cons_allocated(console_num)) return; vt_kbd_con_start(console_num); } static void con_flush_chars(struct tty_struct *tty) { struct vc_data *vc = tty->driver_data; if (in_interrupt()) /* from flush_to_ldisc */ return; console_lock(); set_cursor(vc); console_unlock(); } /* * Allocate the console screen memory. */ static int con_install(struct tty_driver *driver, struct tty_struct *tty) { unsigned int currcons = tty->index; struct vc_data *vc; int ret; console_lock(); ret = vc_allocate(currcons); if (ret) goto unlock; vc = vc_cons[currcons].d; /* Still being freed */ if (vc->port.tty) { ret = -ERESTARTSYS; goto unlock; } ret = tty_port_install(&vc->port, driver, tty); if (ret) goto unlock; tty->driver_data = vc; vc->port.tty = tty; tty_port_get(&vc->port); if (!tty->winsize.ws_row && !tty->winsize.ws_col) { tty->winsize.ws_row = vc_cons[currcons].d->vc_rows; tty->winsize.ws_col = vc_cons[currcons].d->vc_cols; } if (vc->vc_utf) tty->termios.c_iflag |= IUTF8; else tty->termios.c_iflag &= ~IUTF8; unlock: console_unlock(); return ret; } static int con_open(struct tty_struct *tty, struct file *filp) { /* everything done in install */ return 0; } static void con_close(struct tty_struct *tty, struct file *filp) { /* Nothing to do - we defer to shutdown */ } static void con_shutdown(struct tty_struct *tty) { struct vc_data *vc = tty->driver_data; BUG_ON(vc == NULL); console_lock(); vc->port.tty = NULL; console_unlock(); } static void con_cleanup(struct tty_struct *tty) { struct vc_data *vc = tty->driver_data; tty_port_put(&vc->port); } /* * We can't deal with anything but the N_TTY ldisc, * because we can sleep in our write() routine. */ static int con_ldisc_ok(struct tty_struct *tty, int ldisc) { return ldisc == N_TTY ? 0 : -EINVAL; } static int default_color = 7; /* white */ static int default_italic_color = 2; // green (ASCII) static int default_underline_color = 3; // cyan (ASCII) module_param_named(color, default_color, int, S_IRUGO | S_IWUSR); module_param_named(italic, default_italic_color, int, S_IRUGO | S_IWUSR); module_param_named(underline, default_underline_color, int, S_IRUGO | S_IWUSR); static void vc_init(struct vc_data *vc, int do_clear) { int j, k ; set_origin(vc); vc->vc_pos = vc->vc_origin; reset_vc(vc); for (j=k=0; j<16; j++) { vc->vc_palette[k++] = default_red[j] ; vc->vc_palette[k++] = default_grn[j] ; vc->vc_palette[k++] = default_blu[j] ; } vc->vc_def_color = default_color; vc->vc_ulcolor = default_underline_color; vc->vc_itcolor = default_italic_color; vc->vc_halfcolor = 0x08; /* grey */ init_waitqueue_head(&vc->paste_wait); reset_terminal(vc, do_clear); } /* * This routine initializes console interrupts, and does nothing * else. If you want the screen to clear, call tty_write with * the appropriate escape-sequence. */ static int __init con_init(void) { const char *display_desc = NULL; struct vc_data *vc; unsigned int currcons = 0, i; console_lock(); if (!conswitchp) conswitchp = &dummy_con; display_desc = conswitchp->con_startup(); if (!display_desc) { fg_console = 0; console_unlock(); return 0; } for (i = 0; i < MAX_NR_CON_DRIVER; i++) { struct con_driver *con_driver = ®istered_con_driver[i]; if (con_driver->con == NULL) { con_driver->con = conswitchp; con_driver->desc = display_desc; con_driver->flag = CON_DRIVER_FLAG_INIT; con_driver->first = 0; con_driver->last = MAX_NR_CONSOLES - 1; break; } } for (i = 0; i < MAX_NR_CONSOLES; i++) con_driver_map[i] = conswitchp; if (blankinterval) { blank_state = blank_normal_wait; mod_timer(&console_timer, jiffies + (blankinterval * HZ)); } for (currcons = 0; currcons < MIN_NR_CONSOLES; currcons++) { vc_cons[currcons].d = vc = kzalloc(sizeof(struct vc_data), GFP_NOWAIT); INIT_WORK(&vc_cons[currcons].SAK_work, vc_SAK); tty_port_init(&vc->port); visual_init(vc, currcons, true); /* Assuming vc->vc_{cols,rows,screenbuf_size} are sane here. */ vc->vc_screenbuf = kzalloc(vc->vc_screenbuf_size, GFP_NOWAIT); vc_init(vc, currcons || !vc->vc_sw->con_save_screen); } currcons = fg_console = 0; master_display_fg = vc = vc_cons[currcons].d; set_origin(vc); save_screen(vc); gotoxy(vc, vc->state.x, vc->state.y); csi_J(vc, CSI_J_CURSOR_TO_END); update_screen(vc); pr_info("Console: %s %s %dx%d\n", vc->vc_can_do_color ? "colour" : "mono", display_desc, vc->vc_cols, vc->vc_rows); console_unlock(); #ifdef CONFIG_VT_CONSOLE register_console(&vt_console_driver); #endif return 0; } console_initcall(con_init); static const struct tty_operations con_ops = { .install = con_install, .open = con_open, .close = con_close, .write = con_write, .write_room = con_write_room, .put_char = con_put_char, .flush_chars = con_flush_chars, .ioctl = vt_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = vt_compat_ioctl, #endif .stop = con_stop, .start = con_start, .throttle = con_throttle, .unthrottle = con_unthrottle, .resize = vt_resize, .shutdown = con_shutdown, .cleanup = con_cleanup, .ldisc_ok = con_ldisc_ok, }; static struct cdev vc0_cdev; static ssize_t show_tty_active(struct device *dev, struct device_attribute *attr, char *buf) { return sprintf(buf, "tty%d\n", fg_console + 1); } static DEVICE_ATTR(active, S_IRUGO, show_tty_active, NULL); static struct attribute *vt_dev_attrs[] = { &dev_attr_active.attr, NULL }; ATTRIBUTE_GROUPS(vt_dev); int __init vty_init(const struct file_operations *console_fops) { cdev_init(&vc0_cdev, console_fops); if (cdev_add(&vc0_cdev, MKDEV(TTY_MAJOR, 0), 1) || register_chrdev_region(MKDEV(TTY_MAJOR, 0), 1, "/dev/vc/0") < 0) panic("Couldn't register /dev/tty0 driver\n"); tty0dev = device_create_with_groups(&tty_class, NULL, MKDEV(TTY_MAJOR, 0), NULL, vt_dev_groups, "tty0"); if (IS_ERR(tty0dev)) tty0dev = NULL; vcs_init(); console_driver = tty_alloc_driver(MAX_NR_CONSOLES, TTY_DRIVER_REAL_RAW | TTY_DRIVER_RESET_TERMIOS); if (IS_ERR(console_driver)) panic("Couldn't allocate console driver\n"); console_driver->name = "tty"; console_driver->name_base = 1; console_driver->major = TTY_MAJOR; console_driver->minor_start = 1; console_driver->type = TTY_DRIVER_TYPE_CONSOLE; console_driver->init_termios = tty_std_termios; if (default_utf8) console_driver->init_termios.c_iflag |= IUTF8; tty_set_operations(console_driver, &con_ops); if (tty_register_driver(console_driver)) panic("Couldn't register console driver\n"); kbd_init(); console_map_init(); #ifdef CONFIG_MDA_CONSOLE mda_console_init(); #endif return 0; } static const struct class vtconsole_class = { .name = "vtconsole", }; static int do_bind_con_driver(const struct consw *csw, int first, int last, int deflt) { struct module *owner = csw->owner; const char *desc = NULL; struct con_driver *con_driver; int i, j = -1, k = -1, retval = -ENODEV; if (!try_module_get(owner)) return -ENODEV; WARN_CONSOLE_UNLOCKED(); /* check if driver is registered */ for (i = 0; i < MAX_NR_CON_DRIVER; i++) { con_driver = ®istered_con_driver[i]; if (con_driver->con == csw) { desc = con_driver->desc; retval = 0; break; } } if (retval) goto err; if (!(con_driver->flag & CON_DRIVER_FLAG_INIT)) { csw->con_startup(); con_driver->flag |= CON_DRIVER_FLAG_INIT; } if (deflt) { if (conswitchp) module_put(conswitchp->owner); __module_get(owner); conswitchp = csw; } first = max(first, con_driver->first); last = min(last, con_driver->last); for (i = first; i <= last; i++) { int old_was_color; struct vc_data *vc = vc_cons[i].d; if (con_driver_map[i]) module_put(con_driver_map[i]->owner); __module_get(owner); con_driver_map[i] = csw; if (!vc || !vc->vc_sw) continue; j = i; if (con_is_visible(vc)) { k = i; save_screen(vc); } old_was_color = vc->vc_can_do_color; vc->vc_sw->con_deinit(vc); vc->vc_origin = (unsigned long)vc->vc_screenbuf; visual_init(vc, i, false); set_origin(vc); update_attr(vc); /* If the console changed between mono <-> color, then * the attributes in the screenbuf will be wrong. The * following resets all attributes to something sane. */ if (old_was_color != vc->vc_can_do_color) clear_buffer_attributes(vc); } pr_info("Console: switching "); if (!deflt) pr_cont("consoles %d-%d ", first + 1, last + 1); if (j >= 0) { struct vc_data *vc = vc_cons[j].d; pr_cont("to %s %s %dx%d\n", vc->vc_can_do_color ? "colour" : "mono", desc, vc->vc_cols, vc->vc_rows); if (k >= 0) { vc = vc_cons[k].d; update_screen(vc); } } else { pr_cont("to %s\n", desc); } retval = 0; err: module_put(owner); return retval; }; #ifdef CONFIG_VT_HW_CONSOLE_BINDING int do_unbind_con_driver(const struct consw *csw, int first, int last, int deflt) { struct module *owner = csw->owner; const struct consw *defcsw = NULL; struct con_driver *con_driver = NULL, *con_back = NULL; int i, retval = -ENODEV; if (!try_module_get(owner)) return -ENODEV; WARN_CONSOLE_UNLOCKED(); /* check if driver is registered and if it is unbindable */ for (i = 0; i < MAX_NR_CON_DRIVER; i++) { con_driver = ®istered_con_driver[i]; if (con_driver->con == csw && con_driver->flag & CON_DRIVER_FLAG_MODULE) { retval = 0; break; } } if (retval) goto err; retval = -ENODEV; /* check if backup driver exists */ for (i = 0; i < MAX_NR_CON_DRIVER; i++) { con_back = ®istered_con_driver[i]; if (con_back->con && con_back->con != csw) { defcsw = con_back->con; retval = 0; break; } } if (retval) goto err; if (!con_is_bound(csw)) goto err; first = max(first, con_driver->first); last = min(last, con_driver->last); for (i = first; i <= last; i++) { if (con_driver_map[i] == csw) { module_put(csw->owner); con_driver_map[i] = NULL; } } if (!con_is_bound(defcsw)) { const struct consw *defconsw = conswitchp; defcsw->con_startup(); con_back->flag |= CON_DRIVER_FLAG_INIT; /* * vgacon may change the default driver to point * to dummycon, we restore it here... */ conswitchp = defconsw; } if (!con_is_bound(csw)) con_driver->flag &= ~CON_DRIVER_FLAG_INIT; /* ignore return value, binding should not fail */ do_bind_con_driver(defcsw, first, last, deflt); err: module_put(owner); return retval; } EXPORT_SYMBOL_GPL(do_unbind_con_driver); static int vt_bind(struct con_driver *con) { const struct consw *defcsw = NULL, *csw = NULL; int i, more = 1, first = -1, last = -1, deflt = 0; if (!con->con || !(con->flag & CON_DRIVER_FLAG_MODULE)) goto err; csw = con->con; for (i = 0; i < MAX_NR_CON_DRIVER; i++) { struct con_driver *con = ®istered_con_driver[i]; if (con->con && !(con->flag & CON_DRIVER_FLAG_MODULE)) { defcsw = con->con; break; } } if (!defcsw) goto err; while (more) { more = 0; for (i = con->first; i <= con->last; i++) { if (con_driver_map[i] == defcsw) { if (first == -1) first = i; last = i; more = 1; } else if (first != -1) break; } if (first == 0 && last == MAX_NR_CONSOLES -1) deflt = 1; if (first != -1) do_bind_con_driver(csw, first, last, deflt); first = -1; last = -1; deflt = 0; } err: return 0; } static int vt_unbind(struct con_driver *con) { const struct consw *csw = NULL; int i, more = 1, first = -1, last = -1, deflt = 0; int ret; if (!con->con || !(con->flag & CON_DRIVER_FLAG_MODULE)) goto err; csw = con->con; while (more) { more = 0; for (i = con->first; i <= con->last; i++) { if (con_driver_map[i] == csw) { if (first == -1) first = i; last = i; more = 1; } else if (first != -1) break; } if (first == 0 && last == MAX_NR_CONSOLES -1) deflt = 1; if (first != -1) { ret = do_unbind_con_driver(csw, first, last, deflt); if (ret != 0) return ret; } first = -1; last = -1; deflt = 0; } err: return 0; } #else static inline int vt_bind(struct con_driver *con) { return 0; } static inline int vt_unbind(struct con_driver *con) { return 0; } #endif /* CONFIG_VT_HW_CONSOLE_BINDING */ static ssize_t store_bind(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct con_driver *con = dev_get_drvdata(dev); int bind = simple_strtoul(buf, NULL, 0); console_lock(); if (bind) vt_bind(con); else vt_unbind(con); console_unlock(); return count; } static ssize_t show_bind(struct device *dev, struct device_attribute *attr, char *buf) { struct con_driver *con = dev_get_drvdata(dev); int bind; console_lock(); bind = con_is_bound(con->con); console_unlock(); return sysfs_emit(buf, "%i\n", bind); } static ssize_t show_name(struct device *dev, struct device_attribute *attr, char *buf) { struct con_driver *con = dev_get_drvdata(dev); return sysfs_emit(buf, "%s %s\n", (con->flag & CON_DRIVER_FLAG_MODULE) ? "(M)" : "(S)", con->desc); } static DEVICE_ATTR(bind, S_IRUGO|S_IWUSR, show_bind, store_bind); static DEVICE_ATTR(name, S_IRUGO, show_name, NULL); static struct attribute *con_dev_attrs[] = { &dev_attr_bind.attr, &dev_attr_name.attr, NULL }; ATTRIBUTE_GROUPS(con_dev); static int vtconsole_init_device(struct con_driver *con) { con->flag |= CON_DRIVER_FLAG_ATTR; return 0; } static void vtconsole_deinit_device(struct con_driver *con) { con->flag &= ~CON_DRIVER_FLAG_ATTR; } /** * con_is_bound - checks if driver is bound to the console * @csw: console driver * * RETURNS: zero if unbound, nonzero if bound * * Drivers can call this and if zero, they should release * all resources allocated on &consw.con_startup() */ int con_is_bound(const struct consw *csw) { int i, bound = 0; WARN_CONSOLE_UNLOCKED(); for (i = 0; i < MAX_NR_CONSOLES; i++) { if (con_driver_map[i] == csw) { bound = 1; break; } } return bound; } EXPORT_SYMBOL(con_is_bound); /** * con_is_visible - checks whether the current console is visible * @vc: virtual console * * RETURNS: zero if not visible, nonzero if visible */ bool con_is_visible(const struct vc_data *vc) { WARN_CONSOLE_UNLOCKED(); return *vc->vc_display_fg == vc; } EXPORT_SYMBOL(con_is_visible); /** * con_debug_enter - prepare the console for the kernel debugger * @vc: virtual console * * Called when the console is taken over by the kernel debugger, this * function needs to save the current console state, then put the console * into a state suitable for the kernel debugger. */ void con_debug_enter(struct vc_data *vc) { saved_fg_console = fg_console; saved_last_console = last_console; saved_want_console = want_console; saved_vc_mode = vc->vc_mode; saved_console_blanked = console_blanked; vc->vc_mode = KD_TEXT; console_blanked = 0; if (vc->vc_sw->con_debug_enter) vc->vc_sw->con_debug_enter(vc); #ifdef CONFIG_KGDB_KDB /* Set the initial LINES variable if it is not already set */ if (vc->vc_rows < 999) { int linecount; char lns[4]; const char *setargs[3] = { "set", "LINES", lns, }; if (kdbgetintenv(setargs[0], &linecount)) { snprintf(lns, 4, "%i", vc->vc_rows); kdb_set(2, setargs); } } if (vc->vc_cols < 999) { int colcount; char cols[4]; const char *setargs[3] = { "set", "COLUMNS", cols, }; if (kdbgetintenv(setargs[0], &colcount)) { snprintf(cols, 4, "%i", vc->vc_cols); kdb_set(2, setargs); } } #endif /* CONFIG_KGDB_KDB */ } EXPORT_SYMBOL_GPL(con_debug_enter); /** * con_debug_leave - restore console state * * Restore the console state to what it was before the kernel debugger * was invoked. */ void con_debug_leave(void) { struct vc_data *vc; fg_console = saved_fg_console; last_console = saved_last_console; want_console = saved_want_console; console_blanked = saved_console_blanked; vc_cons[fg_console].d->vc_mode = saved_vc_mode; vc = vc_cons[fg_console].d; if (vc->vc_sw->con_debug_leave) vc->vc_sw->con_debug_leave(vc); } EXPORT_SYMBOL_GPL(con_debug_leave); static int do_register_con_driver(const struct consw *csw, int first, int last) { struct module *owner = csw->owner; struct con_driver *con_driver; const char *desc; int i, retval; WARN_CONSOLE_UNLOCKED(); if (!try_module_get(owner)) return -ENODEV; for (i = 0; i < MAX_NR_CON_DRIVER; i++) { con_driver = ®istered_con_driver[i]; /* already registered */ if (con_driver->con == csw) { retval = -EBUSY; goto err; } } desc = csw->con_startup(); if (!desc) { retval = -ENODEV; goto err; } retval = -EINVAL; for (i = 0; i < MAX_NR_CON_DRIVER; i++) { con_driver = ®istered_con_driver[i]; if (con_driver->con == NULL && !(con_driver->flag & CON_DRIVER_FLAG_ZOMBIE)) { con_driver->con = csw; con_driver->desc = desc; con_driver->node = i; con_driver->flag = CON_DRIVER_FLAG_MODULE | CON_DRIVER_FLAG_INIT; con_driver->first = first; con_driver->last = last; retval = 0; break; } } if (retval) goto err; con_driver->dev = device_create_with_groups(&vtconsole_class, NULL, MKDEV(0, con_driver->node), con_driver, con_dev_groups, "vtcon%i", con_driver->node); if (IS_ERR(con_driver->dev)) { pr_warn("Unable to create device for %s; errno = %ld\n", con_driver->desc, PTR_ERR(con_driver->dev)); con_driver->dev = NULL; } else { vtconsole_init_device(con_driver); } err: module_put(owner); return retval; } /** * do_unregister_con_driver - unregister console driver from console layer * @csw: console driver * * DESCRIPTION: All drivers that registers to the console layer must * call this function upon exit, or if the console driver is in a state * where it won't be able to handle console services, such as the * framebuffer console without loaded framebuffer drivers. * * The driver must unbind first prior to unregistration. */ int do_unregister_con_driver(const struct consw *csw) { int i; /* cannot unregister a bound driver */ if (con_is_bound(csw)) return -EBUSY; if (csw == conswitchp) return -EINVAL; for (i = 0; i < MAX_NR_CON_DRIVER; i++) { struct con_driver *con_driver = ®istered_con_driver[i]; if (con_driver->con == csw) { /* * Defer the removal of the sysfs entries since that * will acquire the kernfs s_active lock and we can't * acquire this lock while holding the console lock: * the unbind sysfs entry imposes already the opposite * order. Reset con already here to prevent any later * lookup to succeed and mark this slot as zombie, so * it won't get reused until we complete the removal * in the deferred work. */ con_driver->con = NULL; con_driver->flag = CON_DRIVER_FLAG_ZOMBIE; schedule_work(&con_driver_unregister_work); return 0; } } return -ENODEV; } EXPORT_SYMBOL_GPL(do_unregister_con_driver); static void con_driver_unregister_callback(struct work_struct *ignored) { int i; console_lock(); for (i = 0; i < MAX_NR_CON_DRIVER; i++) { struct con_driver *con_driver = ®istered_con_driver[i]; if (!(con_driver->flag & CON_DRIVER_FLAG_ZOMBIE)) continue; console_unlock(); vtconsole_deinit_device(con_driver); device_destroy(&vtconsole_class, MKDEV(0, con_driver->node)); console_lock(); if (WARN_ON_ONCE(con_driver->con)) con_driver->con = NULL; con_driver->desc = NULL; con_driver->dev = NULL; con_driver->node = 0; WARN_ON_ONCE(con_driver->flag != CON_DRIVER_FLAG_ZOMBIE); con_driver->flag = 0; con_driver->first = 0; con_driver->last = 0; } console_unlock(); } /* * If we support more console drivers, this function is used * when a driver wants to take over some existing consoles * and become default driver for newly opened ones. * * do_take_over_console is basically a register followed by bind */ int do_take_over_console(const struct consw *csw, int first, int last, int deflt) { int err; err = do_register_con_driver(csw, first, last); /* * If we get an busy error we still want to bind the console driver * and return success, as we may have unbound the console driver * but not unregistered it. */ if (err == -EBUSY) err = 0; if (!err) do_bind_con_driver(csw, first, last, deflt); return err; } EXPORT_SYMBOL_GPL(do_take_over_console); /* * give_up_console is a wrapper to unregister_con_driver. It will only * work if driver is fully unbound. */ void give_up_console(const struct consw *csw) { console_lock(); do_unregister_con_driver(csw); console_unlock(); } EXPORT_SYMBOL(give_up_console); static int __init vtconsole_class_init(void) { int i; i = class_register(&vtconsole_class); if (i) pr_warn("Unable to create vt console class; errno = %d\n", i); /* Add system drivers to sysfs */ for (i = 0; i < MAX_NR_CON_DRIVER; i++) { struct con_driver *con = ®istered_con_driver[i]; if (con->con && !con->dev) { con->dev = device_create_with_groups(&vtconsole_class, NULL, MKDEV(0, con->node), con, con_dev_groups, "vtcon%i", con->node); if (IS_ERR(con->dev)) { pr_warn("Unable to create device for %s; errno = %ld\n", con->desc, PTR_ERR(con->dev)); con->dev = NULL; } else { vtconsole_init_device(con); } } } return 0; } postcore_initcall(vtconsole_class_init); /* * Screen blanking */ static int set_vesa_blanking(u8 __user *mode_user) { u8 mode; if (get_user(mode, mode_user)) return -EFAULT; console_lock(); vesa_blank_mode = (mode <= VESA_BLANK_MAX) ? mode : VESA_NO_BLANKING; console_unlock(); return 0; } void do_blank_screen(int entering_gfx) { struct vc_data *vc = vc_cons[fg_console].d; int i; might_sleep(); WARN_CONSOLE_UNLOCKED(); if (console_blanked) { if (blank_state == blank_vesa_wait) { blank_state = blank_off; vc->vc_sw->con_blank(vc, vesa_blank_mode + 1, 0); } return; } /* entering graphics mode? */ if (entering_gfx) { hide_cursor(vc); save_screen(vc); vc->vc_sw->con_blank(vc, VESA_VSYNC_SUSPEND, 1); console_blanked = fg_console + 1; blank_state = blank_off; set_origin(vc); return; } blank_state = blank_off; /* don't blank graphics */ if (vc->vc_mode != KD_TEXT) { console_blanked = fg_console + 1; return; } hide_cursor(vc); timer_delete_sync(&console_timer); blank_timer_expired = 0; save_screen(vc); /* In case we need to reset origin, blanking hook returns 1 */ i = vc->vc_sw->con_blank(vc, vesa_off_interval ? VESA_VSYNC_SUSPEND : (vesa_blank_mode + 1), 0); console_blanked = fg_console + 1; if (i) set_origin(vc); if (console_blank_hook && console_blank_hook(1)) return; if (vesa_off_interval && vesa_blank_mode) { blank_state = blank_vesa_wait; mod_timer(&console_timer, jiffies + vesa_off_interval); } vt_event_post(VT_EVENT_BLANK, vc->vc_num, vc->vc_num); } EXPORT_SYMBOL(do_blank_screen); /* * Called by timer as well as from vt_console_driver */ void do_unblank_screen(int leaving_gfx) { struct vc_data *vc; /* This should now always be called from a "sane" (read: can schedule) * context for the sake of the low level drivers, except in the special * case of oops_in_progress */ if (!oops_in_progress) might_sleep(); WARN_CONSOLE_UNLOCKED(); ignore_poke = 0; if (!console_blanked) return; if (!vc_cons_allocated(fg_console)) { /* impossible */ pr_warn("unblank_screen: tty %d not allocated ??\n", fg_console + 1); return; } vc = vc_cons[fg_console].d; if (vc->vc_mode != KD_TEXT) return; /* but leave console_blanked != 0 */ if (blankinterval) { mod_timer(&console_timer, jiffies + (blankinterval * HZ)); blank_state = blank_normal_wait; } console_blanked = 0; if (vc->vc_sw->con_blank(vc, VESA_NO_BLANKING, leaving_gfx)) /* Low-level driver cannot restore -> do it ourselves */ update_screen(vc); if (console_blank_hook) console_blank_hook(0); set_palette(vc); set_cursor(vc); vt_event_post(VT_EVENT_UNBLANK, vc->vc_num, vc->vc_num); notify_update(vc); } EXPORT_SYMBOL(do_unblank_screen); /* * This is called by the outside world to cause a forced unblank, mostly for * oopses. Currently, I just call do_unblank_screen(0), but we could eventually * call it with 1 as an argument and so force a mode restore... that may kill * X or at least garbage the screen but would also make the Oops visible... */ static void unblank_screen(void) { do_unblank_screen(0); } /* * We defer the timer blanking to work queue so it can take the console mutex * (console operations can still happen at irq time, but only from printk which * has the console mutex. Not perfect yet, but better than no locking */ static void blank_screen_t(struct timer_list *unused) { blank_timer_expired = 1; schedule_work(&console_work); } void poke_blanked_console(void) { WARN_CONSOLE_UNLOCKED(); /* Add this so we quickly catch whoever might call us in a non * safe context. Nowadays, unblank_screen() isn't to be called in * atomic contexts and is allowed to schedule (with the special case * of oops_in_progress, but that isn't of any concern for this * function. --BenH. */ might_sleep(); /* This isn't perfectly race free, but a race here would be mostly harmless, * at worst, we'll do a spurious blank and it's unlikely */ timer_delete(&console_timer); blank_timer_expired = 0; if (ignore_poke || !vc_cons[fg_console].d || vc_cons[fg_console].d->vc_mode == KD_GRAPHICS) return; if (console_blanked) unblank_screen(); else if (blankinterval) { mod_timer(&console_timer, jiffies + (blankinterval * HZ)); blank_state = blank_normal_wait; } } /* * Palettes */ static void set_palette(struct vc_data *vc) { WARN_CONSOLE_UNLOCKED(); if (vc->vc_mode != KD_GRAPHICS && vc->vc_sw->con_set_palette) vc->vc_sw->con_set_palette(vc, color_table); } /* * Load palette into the DAC registers. arg points to a colour * map, 3 bytes per colour, 16 colours, range from 0 to 255. */ int con_set_cmap(unsigned char __user *arg) { int i, j, k; unsigned char colormap[3*16]; if (copy_from_user(colormap, arg, sizeof(colormap))) return -EFAULT; console_lock(); for (i = k = 0; i < 16; i++) { default_red[i] = colormap[k++]; default_grn[i] = colormap[k++]; default_blu[i] = colormap[k++]; } for (i = 0; i < MAX_NR_CONSOLES; i++) { if (!vc_cons_allocated(i)) continue; for (j = k = 0; j < 16; j++) { vc_cons[i].d->vc_palette[k++] = default_red[j]; vc_cons[i].d->vc_palette[k++] = default_grn[j]; vc_cons[i].d->vc_palette[k++] = default_blu[j]; } set_palette(vc_cons[i].d); } console_unlock(); return 0; } int con_get_cmap(unsigned char __user *arg) { int i, k; unsigned char colormap[3*16]; console_lock(); for (i = k = 0; i < 16; i++) { colormap[k++] = default_red[i]; colormap[k++] = default_grn[i]; colormap[k++] = default_blu[i]; } console_unlock(); if (copy_to_user(arg, colormap, sizeof(colormap))) return -EFAULT; return 0; } void reset_palette(struct vc_data *vc) { int j, k; for (j=k=0; j<16; j++) { vc->vc_palette[k++] = default_red[j]; vc->vc_palette[k++] = default_grn[j]; vc->vc_palette[k++] = default_blu[j]; } set_palette(vc); } /* * Font switching * * Currently we only support fonts up to 128 pixels wide, at a maximum height * of 128 pixels. Userspace fontdata may have to be stored with 32 bytes * (shorts/ints, depending on width) reserved for each character which is * kinda wasty, but this is done in order to maintain compatibility with the * EGA/VGA fonts. It is up to the actual low-level console-driver convert data * into its favorite format (maybe we should add a `fontoffset' field to the * `display' structure so we won't have to convert the fontdata all the time. * /Jes */ #define max_font_width 64 #define max_font_height 128 #define max_font_glyphs 512 #define max_font_size (max_font_glyphs*max_font_width*max_font_height) static int con_font_get(struct vc_data *vc, struct console_font_op *op) { struct console_font font; int rc = -EINVAL; int c; unsigned int vpitch = op->op == KD_FONT_OP_GET_TALL ? op->height : 32; if (vpitch > max_font_height) return -EINVAL; if (op->data) { font.data = kvzalloc(max_font_size, GFP_KERNEL); if (!font.data) return -ENOMEM; } else font.data = NULL; console_lock(); if (vc->vc_mode != KD_TEXT) rc = -EINVAL; else if (vc->vc_sw->con_font_get) rc = vc->vc_sw->con_font_get(vc, &font, vpitch); else rc = -ENOSYS; console_unlock(); if (rc) goto out; c = (font.width+7)/8 * vpitch * font.charcount; if (op->data && font.charcount > op->charcount) rc = -ENOSPC; if (font.width > op->width || font.height > op->height) rc = -ENOSPC; if (rc) goto out; op->height = font.height; op->width = font.width; op->charcount = font.charcount; if (op->data && copy_to_user(op->data, font.data, c)) rc = -EFAULT; out: kvfree(font.data); return rc; } static int con_font_set(struct vc_data *vc, const struct console_font_op *op) { struct console_font font; int rc = -EINVAL; int size; unsigned int vpitch = op->op == KD_FONT_OP_SET_TALL ? op->height : 32; if (vc->vc_mode != KD_TEXT) return -EINVAL; if (!op->data) return -EINVAL; if (op->charcount > max_font_glyphs) return -EINVAL; if (op->width <= 0 || op->width > max_font_width || !op->height || op->height > max_font_height) return -EINVAL; if (vpitch < op->height) return -EINVAL; size = (op->width+7)/8 * vpitch * op->charcount; if (size > max_font_size) return -ENOSPC; font.data = memdup_user(op->data, size); if (IS_ERR(font.data)) return PTR_ERR(font.data); font.charcount = op->charcount; font.width = op->width; font.height = op->height; console_lock(); if (vc->vc_mode != KD_TEXT) rc = -EINVAL; else if (vc->vc_sw->con_font_set) { if (vc_is_sel(vc)) clear_selection(); rc = vc->vc_sw->con_font_set(vc, &font, vpitch, op->flags); } else rc = -ENOSYS; console_unlock(); kfree(font.data); return rc; } static int con_font_default(struct vc_data *vc, struct console_font_op *op) { struct console_font font = {.width = op->width, .height = op->height}; char name[MAX_FONT_NAME]; char *s = name; int rc; if (!op->data) s = NULL; else if (strncpy_from_user(name, op->data, MAX_FONT_NAME - 1) < 0) return -EFAULT; else name[MAX_FONT_NAME - 1] = 0; console_lock(); if (vc->vc_mode != KD_TEXT) { console_unlock(); return -EINVAL; } if (vc->vc_sw->con_font_default) { if (vc_is_sel(vc)) clear_selection(); rc = vc->vc_sw->con_font_default(vc, &font, s); } else rc = -ENOSYS; console_unlock(); if (!rc) { op->width = font.width; op->height = font.height; } return rc; } int con_font_op(struct vc_data *vc, struct console_font_op *op) { switch (op->op) { case KD_FONT_OP_SET: case KD_FONT_OP_SET_TALL: return con_font_set(vc, op); case KD_FONT_OP_GET: case KD_FONT_OP_GET_TALL: return con_font_get(vc, op); case KD_FONT_OP_SET_DEFAULT: return con_font_default(vc, op); case KD_FONT_OP_COPY: /* was buggy and never really used */ return -EINVAL; } return -ENOSYS; } /* * Interface exported to selection and vcs. */ /* used by selection */ u16 screen_glyph(const struct vc_data *vc, int offset) { u16 w = scr_readw(screenpos(vc, offset, true)); u16 c = w & 0xff; if (w & vc->vc_hi_font_mask) c |= 0x100; return c; } EXPORT_SYMBOL_GPL(screen_glyph); u32 screen_glyph_unicode(const struct vc_data *vc, int n) { u32 **uni_lines = vc->vc_uni_lines; if (uni_lines) return uni_lines[n / vc->vc_cols][n % vc->vc_cols]; return inverse_translate(vc, screen_glyph(vc, n * 2), true); } EXPORT_SYMBOL_GPL(screen_glyph_unicode); /* used by vcs - note the word offset */ unsigned short *screen_pos(const struct vc_data *vc, int w_offset, bool viewed) { return screenpos(vc, 2 * w_offset, viewed); } EXPORT_SYMBOL_GPL(screen_pos); void getconsxy(const struct vc_data *vc, unsigned char xy[static 2]) { /* clamp values if they don't fit */ xy[0] = min(vc->state.x, 0xFFu); xy[1] = min(vc->state.y, 0xFFu); } void putconsxy(struct vc_data *vc, unsigned char xy[static const 2]) { hide_cursor(vc); gotoxy(vc, xy[0], xy[1]); set_cursor(vc); } u16 vcs_scr_readw(const struct vc_data *vc, const u16 *org) { if ((unsigned long)org == vc->vc_pos && softcursor_original != -1) return softcursor_original; return scr_readw(org); } void vcs_scr_writew(struct vc_data *vc, u16 val, u16 *org) { scr_writew(val, org); if ((unsigned long)org == vc->vc_pos) { softcursor_original = -1; add_softcursor(vc); } } void vcs_scr_updated(struct vc_data *vc) { notify_update(vc); } |
| 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 | /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _NETFILTER_NETDEV_H_ #define _NETFILTER_NETDEV_H_ #include <linux/netfilter.h> #include <linux/netdevice.h> #ifdef CONFIG_NETFILTER_INGRESS static inline bool nf_hook_ingress_active(const struct sk_buff *skb) { #ifdef CONFIG_JUMP_LABEL if (!static_key_false(&nf_hooks_needed[NFPROTO_NETDEV][NF_NETDEV_INGRESS])) return false; #endif return rcu_access_pointer(skb->dev->nf_hooks_ingress); } /* caller must hold rcu_read_lock */ static inline int nf_hook_ingress(struct sk_buff *skb) { struct nf_hook_entries *e = rcu_dereference(skb->dev->nf_hooks_ingress); struct nf_hook_state state; int ret; /* Must recheck the ingress hook head, in the event it became NULL * after the check in nf_hook_ingress_active evaluated to true. */ if (unlikely(!e)) return 0; nf_hook_state_init(&state, NF_NETDEV_INGRESS, NFPROTO_NETDEV, skb->dev, NULL, NULL, dev_net(skb->dev), NULL); ret = nf_hook_slow(skb, &state, e, 0); if (ret == 0) return -1; return ret; } #else /* CONFIG_NETFILTER_INGRESS */ static inline int nf_hook_ingress_active(struct sk_buff *skb) { return 0; } static inline int nf_hook_ingress(struct sk_buff *skb) { return 0; } #endif /* CONFIG_NETFILTER_INGRESS */ #ifdef CONFIG_NETFILTER_EGRESS static inline bool nf_hook_egress_active(void) { #ifdef CONFIG_JUMP_LABEL if (!static_key_false(&nf_hooks_needed[NFPROTO_NETDEV][NF_NETDEV_EGRESS])) return false; #endif return true; } /** * nf_hook_egress - classify packets before transmission * @skb: packet to be classified * @rc: result code which shall be returned by __dev_queue_xmit() on failure * @dev: netdev whose egress hooks shall be applied to @skb * * Caller must hold rcu_read_lock. * * On ingress, packets are classified first by tc, then by netfilter. * On egress, the order is reversed for symmetry. Conceptually, tc and * netfilter can be thought of as layers, with netfilter layered above tc: * When tc redirects a packet to another interface, netfilter is not applied * because the packet is on the tc layer. * * The nf_skip_egress flag controls whether netfilter is applied on egress. * It is updated by __netif_receive_skb_core() and __dev_queue_xmit() when the * packet passes through tc and netfilter. Because __dev_queue_xmit() may be * called recursively by tunnel drivers such as vxlan, the flag is reverted to * false after sch_handle_egress(). This ensures that netfilter is applied * both on the overlay and underlying network. * * Returns: @skb on success or %NULL if the packet was consumed or filtered. */ static inline struct sk_buff *nf_hook_egress(struct sk_buff *skb, int *rc, struct net_device *dev) { struct nf_hook_entries *e; struct nf_hook_state state; int ret; #ifdef CONFIG_NETFILTER_SKIP_EGRESS if (skb->nf_skip_egress) return skb; #endif e = rcu_dereference_check(dev->nf_hooks_egress, rcu_read_lock_bh_held()); if (!e) return skb; nf_hook_state_init(&state, NF_NETDEV_EGRESS, NFPROTO_NETDEV, NULL, dev, NULL, dev_net(dev), NULL); /* nf assumes rcu_read_lock, not just read_lock_bh */ rcu_read_lock(); ret = nf_hook_slow(skb, &state, e, 0); rcu_read_unlock(); if (ret == 1) { return skb; } else if (ret < 0) { *rc = NET_XMIT_DROP; return NULL; } else { /* ret == 0 */ *rc = NET_XMIT_SUCCESS; return NULL; } } #else /* CONFIG_NETFILTER_EGRESS */ static inline bool nf_hook_egress_active(void) { return false; } static inline struct sk_buff *nf_hook_egress(struct sk_buff *skb, int *rc, struct net_device *dev) { return skb; } #endif /* CONFIG_NETFILTER_EGRESS */ static inline void nf_skip_egress(struct sk_buff *skb, bool skip) { #ifdef CONFIG_NETFILTER_SKIP_EGRESS skb->nf_skip_egress = skip; #endif } static inline void nf_hook_netdev_init(struct net_device *dev) { #ifdef CONFIG_NETFILTER_INGRESS RCU_INIT_POINTER(dev->nf_hooks_ingress, NULL); #endif #ifdef CONFIG_NETFILTER_EGRESS RCU_INIT_POINTER(dev->nf_hooks_egress, NULL); #endif } #endif /* _NETFILTER_NETDEV_H_ */ |
| 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 | /* SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause */ /* * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). * You may select, at your option, one of the above-listed licenses. */ #ifndef MEM_H_MODULE #define MEM_H_MODULE /*-**************************************** * Dependencies ******************************************/ #include <linux/unaligned.h> /* get_unaligned, put_unaligned* */ #include <linux/compiler.h> /* inline */ #include <linux/swab.h> /* swab32, swab64 */ #include <linux/types.h> /* size_t, ptrdiff_t */ #include "debug.h" /* DEBUG_STATIC_ASSERT */ /*-**************************************** * Compiler specifics ******************************************/ #undef MEM_STATIC /* may be already defined from common/compiler.h */ #define MEM_STATIC static inline /*-************************************************************** * Basic Types *****************************************************************/ typedef uint8_t BYTE; typedef uint8_t U8; typedef int8_t S8; typedef uint16_t U16; typedef int16_t S16; typedef uint32_t U32; typedef int32_t S32; typedef uint64_t U64; typedef int64_t S64; /*-************************************************************** * Memory I/O API *****************************************************************/ /*=== Static platform detection ===*/ MEM_STATIC unsigned MEM_32bits(void); MEM_STATIC unsigned MEM_64bits(void); MEM_STATIC unsigned MEM_isLittleEndian(void); /*=== Native unaligned read/write ===*/ MEM_STATIC U16 MEM_read16(const void* memPtr); MEM_STATIC U32 MEM_read32(const void* memPtr); MEM_STATIC U64 MEM_read64(const void* memPtr); MEM_STATIC size_t MEM_readST(const void* memPtr); MEM_STATIC void MEM_write16(void* memPtr, U16 value); MEM_STATIC void MEM_write32(void* memPtr, U32 value); MEM_STATIC void MEM_write64(void* memPtr, U64 value); /*=== Little endian unaligned read/write ===*/ MEM_STATIC U16 MEM_readLE16(const void* memPtr); MEM_STATIC U32 MEM_readLE24(const void* memPtr); MEM_STATIC U32 MEM_readLE32(const void* memPtr); MEM_STATIC U64 MEM_readLE64(const void* memPtr); MEM_STATIC size_t MEM_readLEST(const void* memPtr); MEM_STATIC void MEM_writeLE16(void* memPtr, U16 val); MEM_STATIC void MEM_writeLE24(void* memPtr, U32 val); MEM_STATIC void MEM_writeLE32(void* memPtr, U32 val32); MEM_STATIC void MEM_writeLE64(void* memPtr, U64 val64); MEM_STATIC void MEM_writeLEST(void* memPtr, size_t val); /*=== Big endian unaligned read/write ===*/ MEM_STATIC U32 MEM_readBE32(const void* memPtr); MEM_STATIC U64 MEM_readBE64(const void* memPtr); MEM_STATIC size_t MEM_readBEST(const void* memPtr); MEM_STATIC void MEM_writeBE32(void* memPtr, U32 val32); MEM_STATIC void MEM_writeBE64(void* memPtr, U64 val64); MEM_STATIC void MEM_writeBEST(void* memPtr, size_t val); /*=== Byteswap ===*/ MEM_STATIC U32 MEM_swap32(U32 in); MEM_STATIC U64 MEM_swap64(U64 in); MEM_STATIC size_t MEM_swapST(size_t in); /*-************************************************************** * Memory I/O Implementation *****************************************************************/ MEM_STATIC unsigned MEM_32bits(void) { return sizeof(size_t) == 4; } MEM_STATIC unsigned MEM_64bits(void) { return sizeof(size_t) == 8; } #if defined(__LITTLE_ENDIAN) #define MEM_LITTLE_ENDIAN 1 #else #define MEM_LITTLE_ENDIAN 0 #endif MEM_STATIC unsigned MEM_isLittleEndian(void) { return MEM_LITTLE_ENDIAN; } MEM_STATIC U16 MEM_read16(const void *memPtr) { return get_unaligned((const U16 *)memPtr); } MEM_STATIC U32 MEM_read32(const void *memPtr) { return get_unaligned((const U32 *)memPtr); } MEM_STATIC U64 MEM_read64(const void *memPtr) { return get_unaligned((const U64 *)memPtr); } MEM_STATIC size_t MEM_readST(const void *memPtr) { return get_unaligned((const size_t *)memPtr); } MEM_STATIC void MEM_write16(void *memPtr, U16 value) { put_unaligned(value, (U16 *)memPtr); } MEM_STATIC void MEM_write32(void *memPtr, U32 value) { put_unaligned(value, (U32 *)memPtr); } MEM_STATIC void MEM_write64(void *memPtr, U64 value) { put_unaligned(value, (U64 *)memPtr); } /*=== Little endian r/w ===*/ MEM_STATIC U16 MEM_readLE16(const void *memPtr) { return get_unaligned_le16(memPtr); } MEM_STATIC void MEM_writeLE16(void *memPtr, U16 val) { put_unaligned_le16(val, memPtr); } MEM_STATIC U32 MEM_readLE24(const void *memPtr) { return MEM_readLE16(memPtr) + (((const BYTE *)memPtr)[2] << 16); } MEM_STATIC void MEM_writeLE24(void *memPtr, U32 val) { MEM_writeLE16(memPtr, (U16)val); ((BYTE *)memPtr)[2] = (BYTE)(val >> 16); } MEM_STATIC U32 MEM_readLE32(const void *memPtr) { return get_unaligned_le32(memPtr); } MEM_STATIC void MEM_writeLE32(void *memPtr, U32 val32) { put_unaligned_le32(val32, memPtr); } MEM_STATIC U64 MEM_readLE64(const void *memPtr) { return get_unaligned_le64(memPtr); } MEM_STATIC void MEM_writeLE64(void *memPtr, U64 val64) { put_unaligned_le64(val64, memPtr); } MEM_STATIC size_t MEM_readLEST(const void *memPtr) { if (MEM_32bits()) return (size_t)MEM_readLE32(memPtr); else return (size_t)MEM_readLE64(memPtr); } MEM_STATIC void MEM_writeLEST(void *memPtr, size_t val) { if (MEM_32bits()) MEM_writeLE32(memPtr, (U32)val); else MEM_writeLE64(memPtr, (U64)val); } /*=== Big endian r/w ===*/ MEM_STATIC U32 MEM_readBE32(const void *memPtr) { return get_unaligned_be32(memPtr); } MEM_STATIC void MEM_writeBE32(void *memPtr, U32 val32) { put_unaligned_be32(val32, memPtr); } MEM_STATIC U64 MEM_readBE64(const void *memPtr) { return get_unaligned_be64(memPtr); } MEM_STATIC void MEM_writeBE64(void *memPtr, U64 val64) { put_unaligned_be64(val64, memPtr); } MEM_STATIC size_t MEM_readBEST(const void *memPtr) { if (MEM_32bits()) return (size_t)MEM_readBE32(memPtr); else return (size_t)MEM_readBE64(memPtr); } MEM_STATIC void MEM_writeBEST(void *memPtr, size_t val) { if (MEM_32bits()) MEM_writeBE32(memPtr, (U32)val); else MEM_writeBE64(memPtr, (U64)val); } MEM_STATIC U32 MEM_swap32(U32 in) { return swab32(in); } MEM_STATIC U64 MEM_swap64(U64 in) { return swab64(in); } MEM_STATIC size_t MEM_swapST(size_t in) { if (MEM_32bits()) return (size_t)MEM_swap32((U32)in); else return (size_t)MEM_swap64((U64)in); } #endif /* MEM_H_MODULE */ |
| 1441 40 58 3 1387 2 8 4 5 49 12 42 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 | /* SPDX-License-Identifier: GPL-2.0 */ /* * Events for filesystem locks * * Copyright 2013 Jeff Layton <jlayton@poochiereds.net> */ #undef TRACE_SYSTEM #define TRACE_SYSTEM filelock #if !defined(_TRACE_FILELOCK_H) || defined(TRACE_HEADER_MULTI_READ) #define _TRACE_FILELOCK_H #include <linux/tracepoint.h> #include <linux/fs.h> #include <linux/device.h> #include <linux/kdev_t.h> #define show_fl_flags(val) \ __print_flags(val, "|", \ { FL_POSIX, "FL_POSIX" }, \ { FL_FLOCK, "FL_FLOCK" }, \ { FL_DELEG, "FL_DELEG" }, \ { FL_ACCESS, "FL_ACCESS" }, \ { FL_EXISTS, "FL_EXISTS" }, \ { FL_LEASE, "FL_LEASE" }, \ { FL_CLOSE, "FL_CLOSE" }, \ { FL_SLEEP, "FL_SLEEP" }, \ { FL_DOWNGRADE_PENDING, "FL_DOWNGRADE_PENDING" }, \ { FL_UNLOCK_PENDING, "FL_UNLOCK_PENDING" }, \ { FL_OFDLCK, "FL_OFDLCK" }) #define show_fl_type(val) \ __print_symbolic(val, \ { F_RDLCK, "F_RDLCK" }, \ { F_WRLCK, "F_WRLCK" }, \ { F_UNLCK, "F_UNLCK" }) TRACE_EVENT(locks_get_lock_context, TP_PROTO(struct inode *inode, int type, struct file_lock_context *ctx), TP_ARGS(inode, type, ctx), TP_STRUCT__entry( __field(unsigned long, i_ino) __field(dev_t, s_dev) __field(unsigned char, type) __field(struct file_lock_context *, ctx) ), TP_fast_assign( __entry->s_dev = inode->i_sb->s_dev; __entry->i_ino = inode->i_ino; __entry->type = type; __entry->ctx = ctx; ), TP_printk("dev=0x%x:0x%x ino=0x%lx type=%s ctx=%p", MAJOR(__entry->s_dev), MINOR(__entry->s_dev), __entry->i_ino, show_fl_type(__entry->type), __entry->ctx) ); DECLARE_EVENT_CLASS(filelock_lock, TP_PROTO(struct inode *inode, struct file_lock *fl, int ret), TP_ARGS(inode, fl, ret), TP_STRUCT__entry( __field(struct file_lock *, fl) __field(unsigned long, i_ino) __field(dev_t, s_dev) __field(struct file_lock_core *, blocker) __field(fl_owner_t, owner) __field(unsigned int, pid) __field(unsigned int, flags) __field(unsigned char, type) __field(loff_t, fl_start) __field(loff_t, fl_end) __field(int, ret) ), TP_fast_assign( __entry->fl = fl ? fl : NULL; __entry->s_dev = inode->i_sb->s_dev; __entry->i_ino = inode->i_ino; __entry->blocker = fl ? fl->c.flc_blocker : NULL; __entry->owner = fl ? fl->c.flc_owner : NULL; __entry->pid = fl ? fl->c.flc_pid : 0; __entry->flags = fl ? fl->c.flc_flags : 0; __entry->type = fl ? fl->c.flc_type : 0; __entry->fl_start = fl ? fl->fl_start : 0; __entry->fl_end = fl ? fl->fl_end : 0; __entry->ret = ret; ), TP_printk("fl=%p dev=0x%x:0x%x ino=0x%lx fl_blocker=%p fl_owner=%p fl_pid=%u fl_flags=%s fl_type=%s fl_start=%lld fl_end=%lld ret=%d", __entry->fl, MAJOR(__entry->s_dev), MINOR(__entry->s_dev), __entry->i_ino, __entry->blocker, __entry->owner, __entry->pid, show_fl_flags(__entry->flags), show_fl_type(__entry->type), __entry->fl_start, __entry->fl_end, __entry->ret) ); DEFINE_EVENT(filelock_lock, posix_lock_inode, TP_PROTO(struct inode *inode, struct file_lock *fl, int ret), TP_ARGS(inode, fl, ret)); DEFINE_EVENT(filelock_lock, fcntl_setlk, TP_PROTO(struct inode *inode, struct file_lock *fl, int ret), TP_ARGS(inode, fl, ret)); DEFINE_EVENT(filelock_lock, locks_remove_posix, TP_PROTO(struct inode *inode, struct file_lock *fl, int ret), TP_ARGS(inode, fl, ret)); DEFINE_EVENT(filelock_lock, flock_lock_inode, TP_PROTO(struct inode *inode, struct file_lock *fl, int ret), TP_ARGS(inode, fl, ret)); DECLARE_EVENT_CLASS(filelock_lease, TP_PROTO(struct inode *inode, struct file_lease *fl), TP_ARGS(inode, fl), TP_STRUCT__entry( __field(struct file_lease *, fl) __field(unsigned long, i_ino) __field(dev_t, s_dev) __field(struct file_lock_core *, blocker) __field(fl_owner_t, owner) __field(unsigned int, flags) __field(unsigned char, type) __field(unsigned long, break_time) __field(unsigned long, downgrade_time) ), TP_fast_assign( __entry->fl = fl ? fl : NULL; __entry->s_dev = inode->i_sb->s_dev; __entry->i_ino = inode->i_ino; __entry->blocker = fl ? fl->c.flc_blocker : NULL; __entry->owner = fl ? fl->c.flc_owner : NULL; __entry->flags = fl ? fl->c.flc_flags : 0; __entry->type = fl ? fl->c.flc_type : 0; __entry->break_time = fl ? fl->fl_break_time : 0; __entry->downgrade_time = fl ? fl->fl_downgrade_time : 0; ), TP_printk("fl=%p dev=0x%x:0x%x ino=0x%lx fl_blocker=%p fl_owner=%p fl_flags=%s fl_type=%s fl_break_time=%lu fl_downgrade_time=%lu", __entry->fl, MAJOR(__entry->s_dev), MINOR(__entry->s_dev), __entry->i_ino, __entry->blocker, __entry->owner, show_fl_flags(__entry->flags), show_fl_type(__entry->type), __entry->break_time, __entry->downgrade_time) ); DEFINE_EVENT(filelock_lease, break_lease_noblock, TP_PROTO(struct inode *inode, struct file_lease *fl), TP_ARGS(inode, fl)); DEFINE_EVENT(filelock_lease, break_lease_block, TP_PROTO(struct inode *inode, struct file_lease *fl), TP_ARGS(inode, fl)); DEFINE_EVENT(filelock_lease, break_lease_unblock, TP_PROTO(struct inode *inode, struct file_lease *fl), TP_ARGS(inode, fl)); DEFINE_EVENT(filelock_lease, generic_delete_lease, TP_PROTO(struct inode *inode, struct file_lease *fl), TP_ARGS(inode, fl)); DEFINE_EVENT(filelock_lease, time_out_leases, TP_PROTO(struct inode *inode, struct file_lease *fl), TP_ARGS(inode, fl)); TRACE_EVENT(generic_add_lease, TP_PROTO(struct inode *inode, struct file_lease *fl), TP_ARGS(inode, fl), TP_STRUCT__entry( __field(unsigned long, i_ino) __field(int, wcount) __field(int, rcount) __field(int, icount) __field(dev_t, s_dev) __field(fl_owner_t, owner) __field(unsigned int, flags) __field(unsigned char, type) ), TP_fast_assign( __entry->s_dev = inode->i_sb->s_dev; __entry->i_ino = inode->i_ino; __entry->wcount = atomic_read(&inode->i_writecount); __entry->rcount = atomic_read(&inode->i_readcount); __entry->icount = atomic_read(&inode->i_count); __entry->owner = fl->c.flc_owner; __entry->flags = fl->c.flc_flags; __entry->type = fl->c.flc_type; ), TP_printk("dev=0x%x:0x%x ino=0x%lx wcount=%d rcount=%d icount=%d fl_owner=%p fl_flags=%s fl_type=%s", MAJOR(__entry->s_dev), MINOR(__entry->s_dev), __entry->i_ino, __entry->wcount, __entry->rcount, __entry->icount, __entry->owner, show_fl_flags(__entry->flags), show_fl_type(__entry->type)) ); TRACE_EVENT(leases_conflict, TP_PROTO(bool conflict, struct file_lease *lease, struct file_lease *breaker), TP_ARGS(conflict, lease, breaker), TP_STRUCT__entry( __field(void *, lease) __field(void *, breaker) __field(unsigned int, l_fl_flags) __field(unsigned int, b_fl_flags) __field(unsigned char, l_fl_type) __field(unsigned char, b_fl_type) __field(bool, conflict) ), TP_fast_assign( __entry->lease = lease; __entry->l_fl_flags = lease->c.flc_flags; __entry->l_fl_type = lease->c.flc_type; __entry->breaker = breaker; __entry->b_fl_flags = breaker->c.flc_flags; __entry->b_fl_type = breaker->c.flc_type; __entry->conflict = conflict; ), TP_printk("conflict %d: lease=%p fl_flags=%s fl_type=%s; breaker=%p fl_flags=%s fl_type=%s", __entry->conflict, __entry->lease, show_fl_flags(__entry->l_fl_flags), show_fl_type(__entry->l_fl_type), __entry->breaker, show_fl_flags(__entry->b_fl_flags), show_fl_type(__entry->b_fl_type)) ); #endif /* _TRACE_FILELOCK_H */ /* This part must be outside protection */ #include <trace/define_trace.h> |
| 16 12 15 19 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 | /* * linux/fs/nls/nls_cp866.c * * Charset cp866 translation tables. * Generated automatically from the Unicode and charset * tables from the Unicode Organization (www.unicode.org). * The Unicode to charset table has only exact mappings. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/nls.h> #include <linux/errno.h> static const wchar_t charset2uni[256] = { /* 0x00*/ 0x0000, 0x0001, 0x0002, 0x0003, 0x0004, 0x0005, 0x0006, 0x0007, 0x0008, 0x0009, 0x000a, 0x000b, 0x000c, 0x000d, 0x000e, 0x000f, /* 0x10*/ 0x0010, 0x0011, 0x0012, 0x0013, 0x0014, 0x0015, 0x0016, 0x0017, 0x0018, 0x0019, 0x001a, 0x001b, 0x001c, 0x001d, 0x001e, 0x001f, /* 0x20*/ 0x0020, 0x0021, 0x0022, 0x0023, 0x0024, 0x0025, 0x0026, 0x0027, 0x0028, 0x0029, 0x002a, 0x002b, 0x002c, 0x002d, 0x002e, 0x002f, /* 0x30*/ 0x0030, 0x0031, 0x0032, 0x0033, 0x0034, 0x0035, 0x0036, 0x0037, 0x0038, 0x0039, 0x003a, 0x003b, 0x003c, 0x003d, 0x003e, 0x003f, /* 0x40*/ 0x0040, 0x0041, 0x0042, 0x0043, 0x0044, 0x0045, 0x0046, 0x0047, 0x0048, 0x0049, 0x004a, 0x004b, 0x004c, 0x004d, 0x004e, 0x004f, /* 0x50*/ 0x0050, 0x0051, 0x0052, 0x0053, 0x0054, 0x0055, 0x0056, 0x0057, 0x0058, 0x0059, 0x005a, 0x005b, 0x005c, 0x005d, 0x005e, 0x005f, /* 0x60*/ 0x0060, 0x0061, 0x0062, 0x0063, 0x0064, 0x0065, 0x0066, 0x0067, 0x0068, 0x0069, 0x006a, 0x006b, 0x006c, 0x006d, 0x006e, 0x006f, /* 0x70*/ 0x0070, 0x0071, 0x0072, 0x0073, 0x0074, 0x0075, 0x0076, 0x0077, 0x0078, 0x0079, 0x007a, 0x007b, 0x007c, 0x007d, 0x007e, 0x007f, /* 0x80*/ 0x0410, 0x0411, 0x0412, 0x0413, 0x0414, 0x0415, 0x0416, 0x0417, 0x0418, 0x0419, 0x041a, 0x041b, 0x041c, 0x041d, 0x041e, 0x041f, /* 0x90*/ 0x0420, 0x0421, 0x0422, 0x0423, 0x0424, 0x0425, 0x0426, 0x0427, 0x0428, 0x0429, 0x042a, 0x042b, 0x042c, 0x042d, 0x042e, 0x042f, /* 0xa0*/ 0x0430, 0x0431, 0x0432, 0x0433, 0x0434, 0x0435, 0x0436, 0x0437, 0x0438, 0x0439, 0x043a, 0x043b, 0x043c, 0x043d, 0x043e, 0x043f, /* 0xb0*/ 0x2591, 0x2592, 0x2593, 0x2502, 0x2524, 0x2561, 0x2562, 0x2556, 0x2555, 0x2563, 0x2551, 0x2557, 0x255d, 0x255c, 0x255b, 0x2510, /* 0xc0*/ 0x2514, 0x2534, 0x252c, 0x251c, 0x2500, 0x253c, 0x255e, 0x255f, 0x255a, 0x2554, 0x2569, 0x2566, 0x2560, 0x2550, 0x256c, 0x2567, /* 0xd0*/ 0x2568, 0x2564, 0x2565, 0x2559, 0x2558, 0x2552, 0x2553, 0x256b, 0x256a, 0x2518, 0x250c, 0x2588, 0x2584, 0x258c, 0x2590, 0x2580, /* 0xe0*/ 0x0440, 0x0441, 0x0442, 0x0443, 0x0444, 0x0445, 0x0446, 0x0447, 0x0448, 0x0449, 0x044a, 0x044b, 0x044c, 0x044d, 0x044e, 0x044f, /* 0xf0*/ 0x0401, 0x0451, 0x0404, 0x0454, 0x0407, 0x0457, 0x040e, 0x045e, 0x00b0, 0x2219, 0x00b7, 0x221a, 0x2116, 0x00a4, 0x25a0, 0x00a0, }; static const unsigned char page00[256] = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */ 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */ 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */ 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */ 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */ 0xff, 0x00, 0x00, 0x00, 0xfd, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */ 0xf8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfa, /* 0xb0-0xb7 */ }; static const unsigned char page04[256] = { 0x00, 0xf0, 0x00, 0x00, 0xf2, 0x00, 0x00, 0xf4, /* 0x00-0x07 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf6, 0x00, /* 0x08-0x0f */ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, /* 0x10-0x17 */ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, /* 0x18-0x1f */ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, /* 0x20-0x27 */ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, /* 0x28-0x2f */ 0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, /* 0x30-0x37 */ 0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, /* 0x38-0x3f */ 0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, /* 0x40-0x47 */ 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, /* 0x48-0x4f */ 0x00, 0xf1, 0x00, 0x00, 0xf3, 0x00, 0x00, 0xf5, /* 0x50-0x57 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf7, 0x00, /* 0x58-0x5f */ }; static const unsigned char page21[256] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfc, 0x00, /* 0x10-0x17 */ }; static const unsigned char page22[256] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */ 0x00, 0xf9, 0xfb, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */ }; static const unsigned char page25[256] = { 0xc4, 0x00, 0xb3, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */ 0x00, 0x00, 0x00, 0x00, 0xda, 0x00, 0x00, 0x00, /* 0x08-0x0f */ 0xbf, 0x00, 0x00, 0x00, 0xc0, 0x00, 0x00, 0x00, /* 0x10-0x17 */ 0xd9, 0x00, 0x00, 0x00, 0xc3, 0x00, 0x00, 0x00, /* 0x18-0x1f */ 0x00, 0x00, 0x00, 0x00, 0xb4, 0x00, 0x00, 0x00, /* 0x20-0x27 */ 0x00, 0x00, 0x00, 0x00, 0xc2, 0x00, 0x00, 0x00, /* 0x28-0x2f */ 0x00, 0x00, 0x00, 0x00, 0xc1, 0x00, 0x00, 0x00, /* 0x30-0x37 */ 0x00, 0x00, 0x00, 0x00, 0xc5, 0x00, 0x00, 0x00, /* 0x38-0x3f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */ 0xcd, 0xba, 0xd5, 0xd6, 0xc9, 0xb8, 0xb7, 0xbb, /* 0x50-0x57 */ 0xd4, 0xd3, 0xc8, 0xbe, 0xbd, 0xbc, 0xc6, 0xc7, /* 0x58-0x5f */ 0xcc, 0xb5, 0xb6, 0xb9, 0xd1, 0xd2, 0xcb, 0xcf, /* 0x60-0x67 */ 0xd0, 0xca, 0xd8, 0xd7, 0xce, 0x00, 0x00, 0x00, /* 0x68-0x6f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */ 0xdf, 0x00, 0x00, 0x00, 0xdc, 0x00, 0x00, 0x00, /* 0x80-0x87 */ 0xdb, 0x00, 0x00, 0x00, 0xdd, 0x00, 0x00, 0x00, /* 0x88-0x8f */ 0xde, 0xb0, 0xb1, 0xb2, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */ 0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */ }; static const unsigned char *const page_uni2charset[256] = { page00, NULL, NULL, NULL, page04, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, page21, page22, NULL, NULL, page25, NULL, NULL, }; static const unsigned char charset2lower[256] = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */ 0x40, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x40-0x47 */ 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x48-0x4f */ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x50-0x57 */ 0x78, 0x79, 0x7a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */ 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */ 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */ 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */ 0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, /* 0x80-0x87 */ 0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, /* 0x88-0x8f */ 0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, /* 0x90-0x97 */ 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, /* 0x98-0x9f */ 0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, /* 0xa0-0xa7 */ 0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, /* 0xa8-0xaf */ 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, /* 0xb0-0xb7 */ 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, /* 0xb8-0xbf */ 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, /* 0xc0-0xc7 */ 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0xc8-0xcf */ 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, /* 0xd0-0xd7 */ 0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, /* 0xd8-0xdf */ 0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, /* 0xe0-0xe7 */ 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, /* 0xe8-0xef */ 0xf1, 0xf1, 0xf3, 0xf3, 0xf5, 0xf5, 0xf7, 0xf7, /* 0xf0-0xf7 */ 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff, /* 0xf8-0xff */ }; static const unsigned char charset2upper[256] = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */ 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */ 0x60, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x60-0x67 */ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x68-0x6f */ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x70-0x77 */ 0x58, 0x59, 0x5a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, /* 0x80-0x87 */ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, /* 0x88-0x8f */ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, /* 0x90-0x97 */ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, /* 0x98-0x9f */ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, /* 0xa0-0xa7 */ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, /* 0xa8-0xaf */ 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, /* 0xb0-0xb7 */ 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, /* 0xb8-0xbf */ 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, /* 0xc0-0xc7 */ 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0xc8-0xcf */ 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, /* 0xd0-0xd7 */ 0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, /* 0xd8-0xdf */ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, /* 0xe0-0xe7 */ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, /* 0xe8-0xef */ 0xf0, 0xf0, 0xf2, 0xf2, 0xf4, 0xf4, 0xf6, 0xf6, /* 0xf0-0xf7 */ 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff, /* 0xf8-0xff */ }; static int uni2char(wchar_t uni, unsigned char *out, int boundlen) { const unsigned char *uni2charset; unsigned char cl = uni & 0x00ff; unsigned char ch = (uni & 0xff00) >> 8; if (boundlen <= 0) return -ENAMETOOLONG; uni2charset = page_uni2charset[ch]; if (uni2charset && uni2charset[cl]) out[0] = uni2charset[cl]; else return -EINVAL; return 1; } static int char2uni(const unsigned char *rawstring, int boundlen, wchar_t *uni) { *uni = charset2uni[*rawstring]; if (*uni == 0x0000) return -EINVAL; return 1; } static struct nls_table table = { .charset = "cp866", .uni2char = uni2char, .char2uni = char2uni, .charset2lower = charset2lower, .charset2upper = charset2upper, }; static int __init init_nls_cp866(void) { return register_nls(&table); } static void __exit exit_nls_cp866(void) { unregister_nls(&table); } module_init(init_nls_cp866) module_exit(exit_nls_cp866) MODULE_DESCRIPTION("NLS Codepage 866 (Cyrillic/Russian)"); MODULE_LICENSE("Dual BSD/GPL"); |
| 9 9 33 15 42 42 42 42 42 42 42 42 33 15 42 42 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 | // SPDX-License-Identifier: GPL-2.0-or-later /* * Cryptographic API. * * Cipher operations. * * Copyright (c) 2002 James Morris <jmorris@intercode.com.au> * 2002 Adam J. Richter <adam@yggdrasil.com> * 2004 Jean-Luc Cooke <jlcooke@certainkey.com> */ #include <crypto/scatterwalk.h> #include <linux/crypto.h> #include <linux/errno.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/module.h> #include <linux/scatterlist.h> #include <linux/slab.h> enum { SKCIPHER_WALK_SLOW = 1 << 0, SKCIPHER_WALK_COPY = 1 << 1, SKCIPHER_WALK_DIFF = 1 << 2, SKCIPHER_WALK_SLEEP = 1 << 3, }; static inline gfp_t skcipher_walk_gfp(struct skcipher_walk *walk) { return walk->flags & SKCIPHER_WALK_SLEEP ? GFP_KERNEL : GFP_ATOMIC; } void scatterwalk_skip(struct scatter_walk *walk, unsigned int nbytes) { struct scatterlist *sg = walk->sg; nbytes += walk->offset - sg->offset; while (nbytes > sg->length) { nbytes -= sg->length; sg = sg_next(sg); } walk->sg = sg; walk->offset = sg->offset + nbytes; } EXPORT_SYMBOL_GPL(scatterwalk_skip); inline void memcpy_from_scatterwalk(void *buf, struct scatter_walk *walk, unsigned int nbytes) { do { unsigned int to_copy; to_copy = scatterwalk_next(walk, nbytes); memcpy(buf, walk->addr, to_copy); scatterwalk_done_src(walk, to_copy); buf += to_copy; nbytes -= to_copy; } while (nbytes); } EXPORT_SYMBOL_GPL(memcpy_from_scatterwalk); inline void memcpy_to_scatterwalk(struct scatter_walk *walk, const void *buf, unsigned int nbytes) { do { unsigned int to_copy; to_copy = scatterwalk_next(walk, nbytes); memcpy(walk->addr, buf, to_copy); scatterwalk_done_dst(walk, to_copy); buf += to_copy; nbytes -= to_copy; } while (nbytes); } EXPORT_SYMBOL_GPL(memcpy_to_scatterwalk); void memcpy_from_sglist(void *buf, struct scatterlist *sg, unsigned int start, unsigned int nbytes) { struct scatter_walk walk; if (unlikely(nbytes == 0)) /* in case sg == NULL */ return; scatterwalk_start_at_pos(&walk, sg, start); memcpy_from_scatterwalk(buf, &walk, nbytes); } EXPORT_SYMBOL_GPL(memcpy_from_sglist); void memcpy_to_sglist(struct scatterlist *sg, unsigned int start, const void *buf, unsigned int nbytes) { struct scatter_walk walk; if (unlikely(nbytes == 0)) /* in case sg == NULL */ return; scatterwalk_start_at_pos(&walk, sg, start); memcpy_to_scatterwalk(&walk, buf, nbytes); } EXPORT_SYMBOL_GPL(memcpy_to_sglist); void memcpy_sglist(struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) { struct skcipher_walk walk = {}; if (unlikely(nbytes == 0)) /* in case sg == NULL */ return; walk.total = nbytes; scatterwalk_start(&walk.in, src); scatterwalk_start(&walk.out, dst); skcipher_walk_first(&walk, true); do { if (walk.src.virt.addr != walk.dst.virt.addr) memcpy(walk.dst.virt.addr, walk.src.virt.addr, walk.nbytes); skcipher_walk_done(&walk, 0); } while (walk.nbytes); } EXPORT_SYMBOL_GPL(memcpy_sglist); struct scatterlist *scatterwalk_ffwd(struct scatterlist dst[2], struct scatterlist *src, unsigned int len) { for (;;) { if (!len) return src; if (src->length > len) break; len -= src->length; src = sg_next(src); } sg_init_table(dst, 2); sg_set_page(dst, sg_page(src), src->length - len, src->offset + len); scatterwalk_crypto_chain(dst, sg_next(src), 2); return dst; } EXPORT_SYMBOL_GPL(scatterwalk_ffwd); static int skcipher_next_slow(struct skcipher_walk *walk, unsigned int bsize) { unsigned alignmask = walk->alignmask; unsigned n; void *buffer; if (!walk->buffer) walk->buffer = walk->page; buffer = walk->buffer; if (!buffer) { /* Min size for a buffer of bsize bytes aligned to alignmask */ n = bsize + (alignmask & ~(crypto_tfm_ctx_alignment() - 1)); buffer = kzalloc(n, skcipher_walk_gfp(walk)); if (!buffer) return skcipher_walk_done(walk, -ENOMEM); walk->buffer = buffer; } buffer = PTR_ALIGN(buffer, alignmask + 1); memcpy_from_scatterwalk(buffer, &walk->in, bsize); walk->out.__addr = buffer; walk->in.__addr = walk->out.addr; walk->nbytes = bsize; walk->flags |= SKCIPHER_WALK_SLOW; return 0; } static int skcipher_next_copy(struct skcipher_walk *walk) { void *tmp = walk->page; scatterwalk_map(&walk->in); memcpy(tmp, walk->in.addr, walk->nbytes); scatterwalk_unmap(&walk->in); /* * walk->in is advanced later when the number of bytes actually * processed (which might be less than walk->nbytes) is known. */ walk->in.__addr = tmp; walk->out.__addr = tmp; return 0; } static int skcipher_next_fast(struct skcipher_walk *walk) { unsigned long diff; diff = offset_in_page(walk->in.offset) - offset_in_page(walk->out.offset); diff |= (u8 *)(sg_page(walk->in.sg) + (walk->in.offset >> PAGE_SHIFT)) - (u8 *)(sg_page(walk->out.sg) + (walk->out.offset >> PAGE_SHIFT)); scatterwalk_map(&walk->out); walk->in.__addr = walk->out.__addr; if (diff) { walk->flags |= SKCIPHER_WALK_DIFF; scatterwalk_map(&walk->in); } return 0; } static int skcipher_walk_next(struct skcipher_walk *walk) { unsigned int bsize; unsigned int n; n = walk->total; bsize = min(walk->stride, max(n, walk->blocksize)); n = scatterwalk_clamp(&walk->in, n); n = scatterwalk_clamp(&walk->out, n); if (unlikely(n < bsize)) { if (unlikely(walk->total < walk->blocksize)) return skcipher_walk_done(walk, -EINVAL); slow_path: return skcipher_next_slow(walk, bsize); } walk->nbytes = n; if (unlikely((walk->in.offset | walk->out.offset) & walk->alignmask)) { if (!walk->page) { gfp_t gfp = skcipher_walk_gfp(walk); walk->page = (void *)__get_free_page(gfp); if (!walk->page) goto slow_path; } walk->flags |= SKCIPHER_WALK_COPY; return skcipher_next_copy(walk); } return skcipher_next_fast(walk); } static int skcipher_copy_iv(struct skcipher_walk *walk) { unsigned alignmask = walk->alignmask; unsigned ivsize = walk->ivsize; unsigned aligned_stride = ALIGN(walk->stride, alignmask + 1); unsigned size; u8 *iv; /* Min size for a buffer of stride + ivsize, aligned to alignmask */ size = aligned_stride + ivsize + (alignmask & ~(crypto_tfm_ctx_alignment() - 1)); walk->buffer = kmalloc(size, skcipher_walk_gfp(walk)); if (!walk->buffer) return -ENOMEM; iv = PTR_ALIGN(walk->buffer, alignmask + 1) + aligned_stride; walk->iv = memcpy(iv, walk->iv, walk->ivsize); return 0; } int skcipher_walk_first(struct skcipher_walk *walk, bool atomic) { if (WARN_ON_ONCE(in_hardirq())) return -EDEADLK; walk->flags = atomic ? 0 : SKCIPHER_WALK_SLEEP; walk->buffer = NULL; if (unlikely(((unsigned long)walk->iv & walk->alignmask))) { int err = skcipher_copy_iv(walk); if (err) return err; } walk->page = NULL; return skcipher_walk_next(walk); } EXPORT_SYMBOL_GPL(skcipher_walk_first); /** * skcipher_walk_done() - finish one step of a skcipher_walk * @walk: the skcipher_walk * @res: number of bytes *not* processed (>= 0) from walk->nbytes, * or a -errno value to terminate the walk due to an error * * This function cleans up after one step of walking through the source and * destination scatterlists, and advances to the next step if applicable. * walk->nbytes is set to the number of bytes available in the next step, * walk->total is set to the new total number of bytes remaining, and * walk->{src,dst}.virt.addr is set to the next pair of data pointers. If there * is no more data, or if an error occurred (i.e. -errno return), then * walk->nbytes and walk->total are set to 0 and all resources owned by the * skcipher_walk are freed. * * Return: 0 or a -errno value. If @res was a -errno value then it will be * returned, but other errors may occur too. */ int skcipher_walk_done(struct skcipher_walk *walk, int res) { unsigned int n = walk->nbytes; /* num bytes processed this step */ unsigned int total = 0; /* new total remaining */ if (!n) goto finish; if (likely(res >= 0)) { n -= res; /* subtract num bytes *not* processed */ total = walk->total - n; } if (likely(!(walk->flags & (SKCIPHER_WALK_SLOW | SKCIPHER_WALK_COPY | SKCIPHER_WALK_DIFF)))) { scatterwalk_advance(&walk->in, n); } else if (walk->flags & SKCIPHER_WALK_DIFF) { scatterwalk_done_src(&walk->in, n); } else if (walk->flags & SKCIPHER_WALK_COPY) { scatterwalk_advance(&walk->in, n); scatterwalk_map(&walk->out); memcpy(walk->out.addr, walk->page, n); } else { /* SKCIPHER_WALK_SLOW */ if (res > 0) { /* * Didn't process all bytes. Either the algorithm is * broken, or this was the last step and it turned out * the message wasn't evenly divisible into blocks but * the algorithm requires it. */ res = -EINVAL; total = 0; } else memcpy_to_scatterwalk(&walk->out, walk->out.addr, n); goto dst_done; } scatterwalk_done_dst(&walk->out, n); dst_done: if (res > 0) res = 0; walk->total = total; walk->nbytes = 0; if (total) { if (walk->flags & SKCIPHER_WALK_SLEEP) cond_resched(); walk->flags &= ~(SKCIPHER_WALK_SLOW | SKCIPHER_WALK_COPY | SKCIPHER_WALK_DIFF); return skcipher_walk_next(walk); } finish: /* Short-circuit for the common/fast path. */ if (!((unsigned long)walk->buffer | (unsigned long)walk->page)) goto out; if (walk->iv != walk->oiv) memcpy(walk->oiv, walk->iv, walk->ivsize); if (walk->buffer != walk->page) kfree(walk->buffer); if (walk->page) free_page((unsigned long)walk->page); out: return res; } EXPORT_SYMBOL_GPL(skcipher_walk_done); |
| 2 5 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 | /* SPDX-License-Identifier: GPL-2.0 */ #undef TRACE_SYSTEM #define TRACE_SYSTEM snd_pcm #define TRACE_INCLUDE_FILE pcm_trace #if !defined(_PCM_TRACE_H) || defined(TRACE_HEADER_MULTI_READ) #define _PCM_TRACE_H #include <linux/tracepoint.h> TRACE_EVENT(hwptr, TP_PROTO(struct snd_pcm_substream *substream, snd_pcm_uframes_t pos, bool irq), TP_ARGS(substream, pos, irq), TP_STRUCT__entry( __field( bool, in_interrupt ) __field( unsigned int, card ) __field( unsigned int, device ) __field( unsigned int, number ) __field( unsigned int, stream ) __field( snd_pcm_uframes_t, pos ) __field( snd_pcm_uframes_t, period_size ) __field( snd_pcm_uframes_t, buffer_size ) __field( snd_pcm_uframes_t, old_hw_ptr ) __field( snd_pcm_uframes_t, hw_ptr_base ) ), TP_fast_assign( __entry->in_interrupt = (irq); __entry->card = (substream)->pcm->card->number; __entry->device = (substream)->pcm->device; __entry->number = (substream)->number; __entry->stream = (substream)->stream; __entry->pos = (pos); __entry->period_size = (substream)->runtime->period_size; __entry->buffer_size = (substream)->runtime->buffer_size; __entry->old_hw_ptr = (substream)->runtime->status->hw_ptr; __entry->hw_ptr_base = (substream)->runtime->hw_ptr_base; ), TP_printk("pcmC%dD%d%s/sub%d: %s: pos=%lu, old=%lu, base=%lu, period=%lu, buf=%lu", __entry->card, __entry->device, __entry->stream == SNDRV_PCM_STREAM_PLAYBACK ? "p" : "c", __entry->number, __entry->in_interrupt ? "IRQ" : "POS", (unsigned long)__entry->pos, (unsigned long)__entry->old_hw_ptr, (unsigned long)__entry->hw_ptr_base, (unsigned long)__entry->period_size, (unsigned long)__entry->buffer_size) ); TRACE_EVENT(xrun, TP_PROTO(struct snd_pcm_substream *substream), TP_ARGS(substream), TP_STRUCT__entry( __field( unsigned int, card ) __field( unsigned int, device ) __field( unsigned int, number ) __field( unsigned int, stream ) __field( snd_pcm_uframes_t, period_size ) __field( snd_pcm_uframes_t, buffer_size ) __field( snd_pcm_uframes_t, old_hw_ptr ) __field( snd_pcm_uframes_t, hw_ptr_base ) ), TP_fast_assign( __entry->card = (substream)->pcm->card->number; __entry->device = (substream)->pcm->device; __entry->number = (substream)->number; __entry->stream = (substream)->stream; __entry->period_size = (substream)->runtime->period_size; __entry->buffer_size = (substream)->runtime->buffer_size; __entry->old_hw_ptr = (substream)->runtime->status->hw_ptr; __entry->hw_ptr_base = (substream)->runtime->hw_ptr_base; ), TP_printk("pcmC%dD%d%s/sub%d: XRUN: old=%lu, base=%lu, period=%lu, buf=%lu", __entry->card, __entry->device, __entry->stream == SNDRV_PCM_STREAM_PLAYBACK ? "p" : "c", __entry->number, (unsigned long)__entry->old_hw_ptr, (unsigned long)__entry->hw_ptr_base, (unsigned long)__entry->period_size, (unsigned long)__entry->buffer_size) ); TRACE_EVENT(hw_ptr_error, TP_PROTO(struct snd_pcm_substream *substream, const char *why), TP_ARGS(substream, why), TP_STRUCT__entry( __field( unsigned int, card ) __field( unsigned int, device ) __field( unsigned int, number ) __field( unsigned int, stream ) __string( reason, why ) ), TP_fast_assign( __entry->card = (substream)->pcm->card->number; __entry->device = (substream)->pcm->device; __entry->number = (substream)->number; __entry->stream = (substream)->stream; __assign_str(reason); ), TP_printk("pcmC%dD%d%s/sub%d: ERROR: %s", __entry->card, __entry->device, __entry->stream == SNDRV_PCM_STREAM_PLAYBACK ? "p" : "c", __entry->number, __get_str(reason)) ); TRACE_EVENT(applptr, TP_PROTO(struct snd_pcm_substream *substream, snd_pcm_uframes_t prev, snd_pcm_uframes_t curr), TP_ARGS(substream, prev, curr), TP_STRUCT__entry( __field( unsigned int, card ) __field( unsigned int, device ) __field( unsigned int, number ) __field( unsigned int, stream ) __field( snd_pcm_uframes_t, prev ) __field( snd_pcm_uframes_t, curr ) __field( snd_pcm_uframes_t, avail ) __field( snd_pcm_uframes_t, period_size ) __field( snd_pcm_uframes_t, buffer_size ) ), TP_fast_assign( __entry->card = (substream)->pcm->card->number; __entry->device = (substream)->pcm->device; __entry->number = (substream)->number; __entry->stream = (substream)->stream; __entry->prev = (prev); __entry->curr = (curr); __entry->avail = (substream)->stream ? snd_pcm_capture_avail(substream->runtime) : snd_pcm_playback_avail(substream->runtime); __entry->period_size = (substream)->runtime->period_size; __entry->buffer_size = (substream)->runtime->buffer_size; ), TP_printk("pcmC%dD%d%s/sub%d: prev=%lu, curr=%lu, avail=%lu, period=%lu, buf=%lu", __entry->card, __entry->device, __entry->stream ? "c" : "p", __entry->number, __entry->prev, __entry->curr, __entry->avail, __entry->period_size, __entry->buffer_size ) ); #endif /* _PCM_TRACE_H */ /* This part must be outside protection */ #undef TRACE_INCLUDE_PATH #define TRACE_INCLUDE_PATH . #include <trace/define_trace.h> |
| 120 3 121 121 64 3 64 115 115 114 312 406 458 458 405 59 59 42 19 12 12 255 255 10 406 405 67 67 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 | // SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2007 Oracle. All rights reserved. */ #include "messages.h" #include "ctree.h" #include "disk-io.h" #include "transaction.h" #include "accessors.h" #include "dir-item.h" /* * insert a name into a directory, doing overflow properly if there is a hash * collision. data_size indicates how big the item inserted should be. On * success a struct btrfs_dir_item pointer is returned, otherwise it is * an ERR_PTR. * * The name is not copied into the dir item, you have to do that yourself. */ static struct btrfs_dir_item *insert_with_overflow(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, const struct btrfs_key *cpu_key, u32 data_size, const char *name, int name_len) { int ret; char *ptr; struct extent_buffer *leaf; ret = btrfs_insert_empty_item(trans, root, path, cpu_key, data_size); if (ret == -EEXIST) { struct btrfs_dir_item *di; di = btrfs_match_dir_item_name(path, name, name_len); if (di) return ERR_PTR(-EEXIST); btrfs_extend_item(trans, path, data_size); } else if (ret < 0) return ERR_PTR(ret); WARN_ON(ret > 0); leaf = path->nodes[0]; ptr = btrfs_item_ptr(leaf, path->slots[0], char); ASSERT(data_size <= btrfs_item_size(leaf, path->slots[0])); ptr += btrfs_item_size(leaf, path->slots[0]) - data_size; return (struct btrfs_dir_item *)ptr; } /* * xattrs work a lot like directories, this inserts an xattr item * into the tree */ int btrfs_insert_xattr_item(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, u64 objectid, const char *name, u16 name_len, const void *data, u16 data_len) { int ret = 0; struct btrfs_dir_item *dir_item; unsigned long name_ptr, data_ptr; struct btrfs_key key, location; struct btrfs_disk_key disk_key; struct extent_buffer *leaf; u32 data_size; if (name_len + data_len > BTRFS_MAX_XATTR_SIZE(root->fs_info)) return -ENOSPC; key.objectid = objectid; key.type = BTRFS_XATTR_ITEM_KEY; key.offset = btrfs_name_hash(name, name_len); data_size = sizeof(*dir_item) + name_len + data_len; dir_item = insert_with_overflow(trans, root, path, &key, data_size, name, name_len); if (IS_ERR(dir_item)) return PTR_ERR(dir_item); memset(&location, 0, sizeof(location)); leaf = path->nodes[0]; btrfs_cpu_key_to_disk(&disk_key, &location); btrfs_set_dir_item_key(leaf, dir_item, &disk_key); btrfs_set_dir_flags(leaf, dir_item, BTRFS_FT_XATTR); btrfs_set_dir_name_len(leaf, dir_item, name_len); btrfs_set_dir_transid(leaf, dir_item, trans->transid); btrfs_set_dir_data_len(leaf, dir_item, data_len); name_ptr = (unsigned long)(dir_item + 1); data_ptr = (unsigned long)((char *)name_ptr + name_len); write_extent_buffer(leaf, name, name_ptr, name_len); write_extent_buffer(leaf, data, data_ptr, data_len); return ret; } /* * insert a directory item in the tree, doing all the magic for * both indexes. 'dir' indicates which objectid to insert it into, * 'location' is the key to stuff into the directory item, 'type' is the * type of the inode we're pointing to, and 'index' is the sequence number * to use for the second index (if one is created). * Will return 0 or -ENOMEM */ int btrfs_insert_dir_item(struct btrfs_trans_handle *trans, const struct fscrypt_str *name, struct btrfs_inode *dir, const struct btrfs_key *location, u8 type, u64 index) { int ret = 0; int ret2 = 0; struct btrfs_root *root = dir->root; struct btrfs_path *path; struct btrfs_dir_item *dir_item; struct extent_buffer *leaf; unsigned long name_ptr; struct btrfs_key key; struct btrfs_disk_key disk_key; u32 data_size; key.objectid = btrfs_ino(dir); key.type = BTRFS_DIR_ITEM_KEY; key.offset = btrfs_name_hash(name->name, name->len); path = btrfs_alloc_path(); if (!path) return -ENOMEM; btrfs_cpu_key_to_disk(&disk_key, location); data_size = sizeof(*dir_item) + name->len; dir_item = insert_with_overflow(trans, root, path, &key, data_size, name->name, name->len); if (IS_ERR(dir_item)) { ret = PTR_ERR(dir_item); if (ret == -EEXIST) goto second_insert; goto out_free; } if (IS_ENCRYPTED(&dir->vfs_inode)) type |= BTRFS_FT_ENCRYPTED; leaf = path->nodes[0]; btrfs_set_dir_item_key(leaf, dir_item, &disk_key); btrfs_set_dir_flags(leaf, dir_item, type); btrfs_set_dir_data_len(leaf, dir_item, 0); btrfs_set_dir_name_len(leaf, dir_item, name->len); btrfs_set_dir_transid(leaf, dir_item, trans->transid); name_ptr = (unsigned long)(dir_item + 1); write_extent_buffer(leaf, name->name, name_ptr, name->len); second_insert: /* FIXME, use some real flag for selecting the extra index */ if (root == root->fs_info->tree_root) { ret = 0; goto out_free; } btrfs_release_path(path); ret2 = btrfs_insert_delayed_dir_index(trans, name->name, name->len, dir, &disk_key, type, index); out_free: btrfs_free_path(path); if (ret) return ret; if (ret2) return ret2; return 0; } static struct btrfs_dir_item *btrfs_lookup_match_dir( struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, struct btrfs_key *key, const char *name, int name_len, int mod) { const int ins_len = (mod < 0 ? -1 : 0); const int cow = (mod != 0); int ret; ret = btrfs_search_slot(trans, root, key, path, ins_len, cow); if (ret < 0) return ERR_PTR(ret); if (ret > 0) return ERR_PTR(-ENOENT); return btrfs_match_dir_item_name(path, name, name_len); } /* * Lookup for a directory item by name. * * @trans: The transaction handle to use. Can be NULL if @mod is 0. * @root: The root of the target tree. * @path: Path to use for the search. * @dir: The inode number (objectid) of the directory. * @name: The name associated to the directory entry we are looking for. * @name_len: The length of the name. * @mod: Used to indicate if the tree search is meant for a read only * lookup, for a modification lookup or for a deletion lookup, so * its value should be 0, 1 or -1, respectively. * * Returns: NULL if the dir item does not exists, an error pointer if an error * happened, or a pointer to a dir item if a dir item exists for the given name. */ struct btrfs_dir_item *btrfs_lookup_dir_item(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, u64 dir, const struct fscrypt_str *name, int mod) { struct btrfs_key key; struct btrfs_dir_item *di; key.objectid = dir; key.type = BTRFS_DIR_ITEM_KEY; key.offset = btrfs_name_hash(name->name, name->len); di = btrfs_lookup_match_dir(trans, root, path, &key, name->name, name->len, mod); if (IS_ERR(di) && PTR_ERR(di) == -ENOENT) return NULL; return di; } int btrfs_check_dir_item_collision(struct btrfs_root *root, u64 dir_ino, const struct fscrypt_str *name) { int ret; struct btrfs_key key; struct btrfs_dir_item *di; int data_size; struct extent_buffer *leaf; int slot; BTRFS_PATH_AUTO_FREE(path); path = btrfs_alloc_path(); if (!path) return -ENOMEM; key.objectid = dir_ino; key.type = BTRFS_DIR_ITEM_KEY; key.offset = btrfs_name_hash(name->name, name->len); di = btrfs_lookup_match_dir(NULL, root, path, &key, name->name, name->len, 0); if (IS_ERR(di)) { ret = PTR_ERR(di); /* Nothing found, we're safe */ if (ret == -ENOENT) return 0; if (ret < 0) return ret; } /* we found an item, look for our name in the item */ if (di) { /* our exact name was found */ return -EEXIST; } /* See if there is room in the item to insert this name. */ data_size = sizeof(*di) + name->len; leaf = path->nodes[0]; slot = path->slots[0]; if (data_size + btrfs_item_size(leaf, slot) + sizeof(struct btrfs_item) > BTRFS_LEAF_DATA_SIZE(root->fs_info)) { return -EOVERFLOW; } /* Plenty of insertion room. */ return 0; } /* * Lookup for a directory index item by name and index number. * * @trans: The transaction handle to use. Can be NULL if @mod is 0. * @root: The root of the target tree. * @path: Path to use for the search. * @dir: The inode number (objectid) of the directory. * @index: The index number. * @name: The name associated to the directory entry we are looking for. * @name_len: The length of the name. * @mod: Used to indicate if the tree search is meant for a read only * lookup, for a modification lookup or for a deletion lookup, so * its value should be 0, 1 or -1, respectively. * * Returns: NULL if the dir index item does not exists, an error pointer if an * error happened, or a pointer to a dir item if the dir index item exists and * matches the criteria (name and index number). */ struct btrfs_dir_item * btrfs_lookup_dir_index_item(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, u64 dir, u64 index, const struct fscrypt_str *name, int mod) { struct btrfs_dir_item *di; struct btrfs_key key; key.objectid = dir; key.type = BTRFS_DIR_INDEX_KEY; key.offset = index; di = btrfs_lookup_match_dir(trans, root, path, &key, name->name, name->len, mod); if (di == ERR_PTR(-ENOENT)) return NULL; return di; } struct btrfs_dir_item * btrfs_search_dir_index_item(struct btrfs_root *root, struct btrfs_path *path, u64 dirid, const struct fscrypt_str *name) { struct btrfs_dir_item *di; struct btrfs_key key; int ret; key.objectid = dirid; key.type = BTRFS_DIR_INDEX_KEY; key.offset = 0; btrfs_for_each_slot(root, &key, &key, path, ret) { if (key.objectid != dirid || key.type != BTRFS_DIR_INDEX_KEY) break; di = btrfs_match_dir_item_name(path, name->name, name->len); if (di) return di; } /* Adjust return code if the key was not found in the next leaf. */ if (ret >= 0) ret = -ENOENT; return ERR_PTR(ret); } struct btrfs_dir_item *btrfs_lookup_xattr(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, u64 dir, const char *name, u16 name_len, int mod) { struct btrfs_key key; struct btrfs_dir_item *di; key.objectid = dir; key.type = BTRFS_XATTR_ITEM_KEY; key.offset = btrfs_name_hash(name, name_len); di = btrfs_lookup_match_dir(trans, root, path, &key, name, name_len, mod); if (IS_ERR(di) && PTR_ERR(di) == -ENOENT) return NULL; return di; } /* * helper function to look at the directory item pointed to by 'path' * this walks through all the entries in a dir item and finds one * for a specific name. */ struct btrfs_dir_item *btrfs_match_dir_item_name(const struct btrfs_path *path, const char *name, int name_len) { struct btrfs_dir_item *dir_item; unsigned long name_ptr; u32 total_len; u32 cur = 0; u32 this_len; struct extent_buffer *leaf; leaf = path->nodes[0]; dir_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dir_item); total_len = btrfs_item_size(leaf, path->slots[0]); while (cur < total_len) { this_len = sizeof(*dir_item) + btrfs_dir_name_len(leaf, dir_item) + btrfs_dir_data_len(leaf, dir_item); name_ptr = (unsigned long)(dir_item + 1); if (btrfs_dir_name_len(leaf, dir_item) == name_len && memcmp_extent_buffer(leaf, name, name_ptr, name_len) == 0) return dir_item; cur += this_len; dir_item = (struct btrfs_dir_item *)((char *)dir_item + this_len); } return NULL; } /* * given a pointer into a directory item, delete it. This * handles items that have more than one entry in them. */ int btrfs_delete_one_dir_name(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, const struct btrfs_dir_item *di) { struct extent_buffer *leaf; u32 sub_item_len; u32 item_len; int ret = 0; leaf = path->nodes[0]; sub_item_len = sizeof(*di) + btrfs_dir_name_len(leaf, di) + btrfs_dir_data_len(leaf, di); item_len = btrfs_item_size(leaf, path->slots[0]); if (sub_item_len == item_len) { ret = btrfs_del_item(trans, root, path); } else { /* MARKER */ unsigned long ptr = (unsigned long)di; unsigned long start; start = btrfs_item_ptr_offset(leaf, path->slots[0]); memmove_extent_buffer(leaf, ptr, ptr + sub_item_len, item_len - (ptr + sub_item_len - start)); btrfs_truncate_item(trans, path, item_len - sub_item_len, 1); } return ret; } |
| 3 3 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 | // SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (C) 2004, 2005 Oracle. All rights reserved. */ #include <linux/slab.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/configfs.h> #include "tcp.h" #include "nodemanager.h" #include "heartbeat.h" #include "masklog.h" #include "sys.h" /* for now we operate under the assertion that there can be only one * cluster active at a time. Changing this will require trickling * cluster references throughout where nodes are looked up */ struct o2nm_cluster *o2nm_single_cluster = NULL; static const char *o2nm_fence_method_desc[O2NM_FENCE_METHODS] = { "reset", /* O2NM_FENCE_RESET */ "panic", /* O2NM_FENCE_PANIC */ }; static inline void o2nm_lock_subsystem(void); static inline void o2nm_unlock_subsystem(void); struct o2nm_node *o2nm_get_node_by_num(u8 node_num) { struct o2nm_node *node = NULL; if (node_num >= O2NM_MAX_NODES || o2nm_single_cluster == NULL) goto out; read_lock(&o2nm_single_cluster->cl_nodes_lock); node = o2nm_single_cluster->cl_nodes[node_num]; if (node) config_item_get(&node->nd_item); read_unlock(&o2nm_single_cluster->cl_nodes_lock); out: return node; } EXPORT_SYMBOL_GPL(o2nm_get_node_by_num); int o2nm_configured_node_map(unsigned long *map, unsigned bytes) { struct o2nm_cluster *cluster = o2nm_single_cluster; BUG_ON(bytes < (sizeof(cluster->cl_nodes_bitmap))); if (cluster == NULL) return -EINVAL; read_lock(&cluster->cl_nodes_lock); bitmap_copy(map, cluster->cl_nodes_bitmap, O2NM_MAX_NODES); read_unlock(&cluster->cl_nodes_lock); return 0; } EXPORT_SYMBOL_GPL(o2nm_configured_node_map); static struct o2nm_node *o2nm_node_ip_tree_lookup(struct o2nm_cluster *cluster, __be32 ip_needle, struct rb_node ***ret_p, struct rb_node **ret_parent) { struct rb_node **p = &cluster->cl_node_ip_tree.rb_node; struct rb_node *parent = NULL; struct o2nm_node *node, *ret = NULL; while (*p) { int cmp; parent = *p; node = rb_entry(parent, struct o2nm_node, nd_ip_node); cmp = memcmp(&ip_needle, &node->nd_ipv4_address, sizeof(ip_needle)); if (cmp < 0) p = &(*p)->rb_left; else if (cmp > 0) p = &(*p)->rb_right; else { ret = node; break; } } if (ret_p != NULL) *ret_p = p; if (ret_parent != NULL) *ret_parent = parent; return ret; } struct o2nm_node *o2nm_get_node_by_ip(__be32 addr) { struct o2nm_node *node = NULL; struct o2nm_cluster *cluster = o2nm_single_cluster; if (cluster == NULL) goto out; read_lock(&cluster->cl_nodes_lock); node = o2nm_node_ip_tree_lookup(cluster, addr, NULL, NULL); if (node) config_item_get(&node->nd_item); read_unlock(&cluster->cl_nodes_lock); out: return node; } EXPORT_SYMBOL_GPL(o2nm_get_node_by_ip); void o2nm_node_put(struct o2nm_node *node) { config_item_put(&node->nd_item); } EXPORT_SYMBOL_GPL(o2nm_node_put); void o2nm_node_get(struct o2nm_node *node) { config_item_get(&node->nd_item); } EXPORT_SYMBOL_GPL(o2nm_node_get); u8 o2nm_this_node(void) { u8 node_num = O2NM_MAX_NODES; if (o2nm_single_cluster && o2nm_single_cluster->cl_has_local) node_num = o2nm_single_cluster->cl_local_node; return node_num; } EXPORT_SYMBOL_GPL(o2nm_this_node); /* node configfs bits */ static struct o2nm_cluster *to_o2nm_cluster(struct config_item *item) { return item ? container_of(to_config_group(item), struct o2nm_cluster, cl_group) : NULL; } static struct o2nm_node *to_o2nm_node(struct config_item *item) { return item ? container_of(item, struct o2nm_node, nd_item) : NULL; } static void o2nm_node_release(struct config_item *item) { struct o2nm_node *node = to_o2nm_node(item); kfree(node); } static ssize_t o2nm_node_num_show(struct config_item *item, char *page) { return sprintf(page, "%d\n", to_o2nm_node(item)->nd_num); } static struct o2nm_cluster *to_o2nm_cluster_from_node(struct o2nm_node *node) { /* through the first node_set .parent * mycluster/nodes/mynode == o2nm_cluster->o2nm_node_group->o2nm_node */ if (node->nd_item.ci_parent) return to_o2nm_cluster(node->nd_item.ci_parent->ci_parent); else return NULL; } enum { O2NM_NODE_ATTR_NUM = 0, O2NM_NODE_ATTR_PORT, O2NM_NODE_ATTR_ADDRESS, }; static ssize_t o2nm_node_num_store(struct config_item *item, const char *page, size_t count) { struct o2nm_node *node = to_o2nm_node(item); struct o2nm_cluster *cluster; unsigned long tmp; char *p = (char *)page; int ret = 0; tmp = simple_strtoul(p, &p, 0); if (!p || (*p && (*p != '\n'))) return -EINVAL; if (tmp >= O2NM_MAX_NODES) return -ERANGE; /* once we're in the cl_nodes tree networking can look us up by * node number and try to use our address and port attributes * to connect to this node.. make sure that they've been set * before writing the node attribute? */ if (!test_bit(O2NM_NODE_ATTR_ADDRESS, &node->nd_set_attributes) || !test_bit(O2NM_NODE_ATTR_PORT, &node->nd_set_attributes)) return -EINVAL; /* XXX */ o2nm_lock_subsystem(); cluster = to_o2nm_cluster_from_node(node); if (!cluster) { o2nm_unlock_subsystem(); return -EINVAL; } write_lock(&cluster->cl_nodes_lock); if (cluster->cl_nodes[tmp]) ret = -EEXIST; else if (test_and_set_bit(O2NM_NODE_ATTR_NUM, &node->nd_set_attributes)) ret = -EBUSY; else { cluster->cl_nodes[tmp] = node; node->nd_num = tmp; set_bit(tmp, cluster->cl_nodes_bitmap); } write_unlock(&cluster->cl_nodes_lock); o2nm_unlock_subsystem(); if (ret) return ret; return count; } static ssize_t o2nm_node_ipv4_port_show(struct config_item *item, char *page) { return sprintf(page, "%u\n", ntohs(to_o2nm_node(item)->nd_ipv4_port)); } static ssize_t o2nm_node_ipv4_port_store(struct config_item *item, const char *page, size_t count) { struct o2nm_node *node = to_o2nm_node(item); unsigned long tmp; char *p = (char *)page; tmp = simple_strtoul(p, &p, 0); if (!p || (*p && (*p != '\n'))) return -EINVAL; if (tmp == 0) return -EINVAL; if (tmp >= (u16)-1) return -ERANGE; if (test_and_set_bit(O2NM_NODE_ATTR_PORT, &node->nd_set_attributes)) return -EBUSY; node->nd_ipv4_port = htons(tmp); return count; } static ssize_t o2nm_node_ipv4_address_show(struct config_item *item, char *page) { return sprintf(page, "%pI4\n", &to_o2nm_node(item)->nd_ipv4_address); } static ssize_t o2nm_node_ipv4_address_store(struct config_item *item, const char *page, size_t count) { struct o2nm_node *node = to_o2nm_node(item); struct o2nm_cluster *cluster; int ret, i; struct rb_node **p, *parent; unsigned int octets[4]; __be32 ipv4_addr = 0; ret = sscanf(page, "%3u.%3u.%3u.%3u", &octets[3], &octets[2], &octets[1], &octets[0]); if (ret != 4) return -EINVAL; for (i = 0; i < ARRAY_SIZE(octets); i++) { if (octets[i] > 255) return -ERANGE; be32_add_cpu(&ipv4_addr, octets[i] << (i * 8)); } o2nm_lock_subsystem(); cluster = to_o2nm_cluster_from_node(node); if (!cluster) { o2nm_unlock_subsystem(); return -EINVAL; } ret = 0; write_lock(&cluster->cl_nodes_lock); if (o2nm_node_ip_tree_lookup(cluster, ipv4_addr, &p, &parent)) ret = -EEXIST; else if (test_and_set_bit(O2NM_NODE_ATTR_ADDRESS, &node->nd_set_attributes)) ret = -EBUSY; else { rb_link_node(&node->nd_ip_node, parent, p); rb_insert_color(&node->nd_ip_node, &cluster->cl_node_ip_tree); } write_unlock(&cluster->cl_nodes_lock); o2nm_unlock_subsystem(); if (ret) return ret; memcpy(&node->nd_ipv4_address, &ipv4_addr, sizeof(ipv4_addr)); return count; } static ssize_t o2nm_node_local_show(struct config_item *item, char *page) { return sprintf(page, "%d\n", to_o2nm_node(item)->nd_local); } static ssize_t o2nm_node_local_store(struct config_item *item, const char *page, size_t count) { struct o2nm_node *node = to_o2nm_node(item); struct o2nm_cluster *cluster; unsigned long tmp; char *p = (char *)page; ssize_t ret; tmp = simple_strtoul(p, &p, 0); if (!p || (*p && (*p != '\n'))) return -EINVAL; tmp = !!tmp; /* boolean of whether this node wants to be local */ /* setting local turns on networking rx for now so we require having * set everything else first */ if (!test_bit(O2NM_NODE_ATTR_ADDRESS, &node->nd_set_attributes) || !test_bit(O2NM_NODE_ATTR_NUM, &node->nd_set_attributes) || !test_bit(O2NM_NODE_ATTR_PORT, &node->nd_set_attributes)) return -EINVAL; /* XXX */ o2nm_lock_subsystem(); cluster = to_o2nm_cluster_from_node(node); if (!cluster) { ret = -EINVAL; goto out; } /* the only failure case is trying to set a new local node * when a different one is already set */ if (tmp && tmp == cluster->cl_has_local && cluster->cl_local_node != node->nd_num) { ret = -EBUSY; goto out; } /* bring up the rx thread if we're setting the new local node. */ if (tmp && !cluster->cl_has_local) { ret = o2net_start_listening(node); if (ret) goto out; } if (!tmp && cluster->cl_has_local && cluster->cl_local_node == node->nd_num) { o2net_stop_listening(node); cluster->cl_local_node = O2NM_INVALID_NODE_NUM; } node->nd_local = tmp; if (node->nd_local) { cluster->cl_has_local = tmp; cluster->cl_local_node = node->nd_num; } ret = count; out: o2nm_unlock_subsystem(); return ret; } CONFIGFS_ATTR(o2nm_node_, num); CONFIGFS_ATTR(o2nm_node_, ipv4_port); CONFIGFS_ATTR(o2nm_node_, ipv4_address); CONFIGFS_ATTR(o2nm_node_, local); static struct configfs_attribute *o2nm_node_attrs[] = { &o2nm_node_attr_num, &o2nm_node_attr_ipv4_port, &o2nm_node_attr_ipv4_address, &o2nm_node_attr_local, NULL, }; static struct configfs_item_operations o2nm_node_item_ops = { .release = o2nm_node_release, }; static const struct config_item_type o2nm_node_type = { .ct_item_ops = &o2nm_node_item_ops, .ct_attrs = o2nm_node_attrs, .ct_owner = THIS_MODULE, }; /* node set */ struct o2nm_node_group { struct config_group ns_group; /* some stuff? */ }; #if 0 static struct o2nm_node_group *to_o2nm_node_group(struct config_group *group) { return group ? container_of(group, struct o2nm_node_group, ns_group) : NULL; } #endif static ssize_t o2nm_cluster_attr_write(const char *page, ssize_t count, unsigned int *val) { unsigned long tmp; char *p = (char *)page; tmp = simple_strtoul(p, &p, 0); if (!p || (*p && (*p != '\n'))) return -EINVAL; if (tmp == 0) return -EINVAL; if (tmp >= (u32)-1) return -ERANGE; *val = tmp; return count; } static ssize_t o2nm_cluster_idle_timeout_ms_show(struct config_item *item, char *page) { return sprintf(page, "%u\n", to_o2nm_cluster(item)->cl_idle_timeout_ms); } static ssize_t o2nm_cluster_idle_timeout_ms_store(struct config_item *item, const char *page, size_t count) { struct o2nm_cluster *cluster = to_o2nm_cluster(item); ssize_t ret; unsigned int val; ret = o2nm_cluster_attr_write(page, count, &val); if (ret > 0) { if (cluster->cl_idle_timeout_ms != val && o2net_num_connected_peers()) { mlog(ML_NOTICE, "o2net: cannot change idle timeout after " "the first peer has agreed to it." " %d connected peers\n", o2net_num_connected_peers()); ret = -EINVAL; } else if (val <= cluster->cl_keepalive_delay_ms) { mlog(ML_NOTICE, "o2net: idle timeout must be larger " "than keepalive delay\n"); ret = -EINVAL; } else { cluster->cl_idle_timeout_ms = val; } } return ret; } static ssize_t o2nm_cluster_keepalive_delay_ms_show( struct config_item *item, char *page) { return sprintf(page, "%u\n", to_o2nm_cluster(item)->cl_keepalive_delay_ms); } static ssize_t o2nm_cluster_keepalive_delay_ms_store( struct config_item *item, const char *page, size_t count) { struct o2nm_cluster *cluster = to_o2nm_cluster(item); ssize_t ret; unsigned int val; ret = o2nm_cluster_attr_write(page, count, &val); if (ret > 0) { if (cluster->cl_keepalive_delay_ms != val && o2net_num_connected_peers()) { mlog(ML_NOTICE, "o2net: cannot change keepalive delay after" " the first peer has agreed to it." " %d connected peers\n", o2net_num_connected_peers()); ret = -EINVAL; } else if (val >= cluster->cl_idle_timeout_ms) { mlog(ML_NOTICE, "o2net: keepalive delay must be " "smaller than idle timeout\n"); ret = -EINVAL; } else { cluster->cl_keepalive_delay_ms = val; } } return ret; } static ssize_t o2nm_cluster_reconnect_delay_ms_show( struct config_item *item, char *page) { return sprintf(page, "%u\n", to_o2nm_cluster(item)->cl_reconnect_delay_ms); } static ssize_t o2nm_cluster_reconnect_delay_ms_store( struct config_item *item, const char *page, size_t count) { return o2nm_cluster_attr_write(page, count, &to_o2nm_cluster(item)->cl_reconnect_delay_ms); } static ssize_t o2nm_cluster_fence_method_show( struct config_item *item, char *page) { struct o2nm_cluster *cluster = to_o2nm_cluster(item); ssize_t ret = 0; if (cluster) ret = sprintf(page, "%s\n", o2nm_fence_method_desc[cluster->cl_fence_method]); return ret; } static ssize_t o2nm_cluster_fence_method_store( struct config_item *item, const char *page, size_t count) { unsigned int i; if (page[count - 1] != '\n') goto bail; for (i = 0; i < O2NM_FENCE_METHODS; ++i) { if (count != strlen(o2nm_fence_method_desc[i]) + 1) continue; if (strncasecmp(page, o2nm_fence_method_desc[i], count - 1)) continue; if (to_o2nm_cluster(item)->cl_fence_method != i) { printk(KERN_INFO "ocfs2: Changing fence method to %s\n", o2nm_fence_method_desc[i]); to_o2nm_cluster(item)->cl_fence_method = i; } return count; } bail: return -EINVAL; } CONFIGFS_ATTR(o2nm_cluster_, idle_timeout_ms); CONFIGFS_ATTR(o2nm_cluster_, keepalive_delay_ms); CONFIGFS_ATTR(o2nm_cluster_, reconnect_delay_ms); CONFIGFS_ATTR(o2nm_cluster_, fence_method); static struct configfs_attribute *o2nm_cluster_attrs[] = { &o2nm_cluster_attr_idle_timeout_ms, &o2nm_cluster_attr_keepalive_delay_ms, &o2nm_cluster_attr_reconnect_delay_ms, &o2nm_cluster_attr_fence_method, NULL, }; static struct config_item *o2nm_node_group_make_item(struct config_group *group, const char *name) { struct o2nm_node *node = NULL; if (strlen(name) > O2NM_MAX_NAME_LEN) return ERR_PTR(-ENAMETOOLONG); node = kzalloc(sizeof(struct o2nm_node), GFP_KERNEL); if (node == NULL) return ERR_PTR(-ENOMEM); strcpy(node->nd_name, name); /* use item.ci_namebuf instead? */ config_item_init_type_name(&node->nd_item, name, &o2nm_node_type); spin_lock_init(&node->nd_lock); mlog(ML_CLUSTER, "o2nm: Registering node %s\n", name); return &node->nd_item; } static void o2nm_node_group_drop_item(struct config_group *group, struct config_item *item) { struct o2nm_node *node = to_o2nm_node(item); struct o2nm_cluster *cluster = to_o2nm_cluster(group->cg_item.ci_parent); if (cluster->cl_nodes[node->nd_num] == node) { o2net_disconnect_node(node); if (cluster->cl_has_local && (cluster->cl_local_node == node->nd_num)) { cluster->cl_has_local = 0; cluster->cl_local_node = O2NM_INVALID_NODE_NUM; o2net_stop_listening(node); } } /* XXX call into net to stop this node from trading messages */ write_lock(&cluster->cl_nodes_lock); /* XXX sloppy */ if (node->nd_ipv4_address) rb_erase(&node->nd_ip_node, &cluster->cl_node_ip_tree); /* nd_num might be 0 if the node number hasn't been set.. */ if (cluster->cl_nodes[node->nd_num] == node) { cluster->cl_nodes[node->nd_num] = NULL; clear_bit(node->nd_num, cluster->cl_nodes_bitmap); } write_unlock(&cluster->cl_nodes_lock); mlog(ML_CLUSTER, "o2nm: Unregistered node %s\n", config_item_name(&node->nd_item)); config_item_put(item); } static struct configfs_group_operations o2nm_node_group_group_ops = { .make_item = o2nm_node_group_make_item, .drop_item = o2nm_node_group_drop_item, }; static const struct config_item_type o2nm_node_group_type = { .ct_group_ops = &o2nm_node_group_group_ops, .ct_owner = THIS_MODULE, }; /* cluster */ static void o2nm_cluster_release(struct config_item *item) { struct o2nm_cluster *cluster = to_o2nm_cluster(item); kfree(cluster); } static struct configfs_item_operations o2nm_cluster_item_ops = { .release = o2nm_cluster_release, }; static const struct config_item_type o2nm_cluster_type = { .ct_item_ops = &o2nm_cluster_item_ops, .ct_attrs = o2nm_cluster_attrs, .ct_owner = THIS_MODULE, }; /* cluster set */ struct o2nm_cluster_group { struct configfs_subsystem cs_subsys; /* some stuff? */ }; #if 0 static struct o2nm_cluster_group *to_o2nm_cluster_group(struct config_group *group) { return group ? container_of(to_configfs_subsystem(group), struct o2nm_cluster_group, cs_subsys) : NULL; } #endif static struct config_group *o2nm_cluster_group_make_group(struct config_group *group, const char *name) { struct o2nm_cluster *cluster = NULL; struct o2nm_node_group *ns = NULL; struct config_group *o2hb_group = NULL, *ret = NULL; /* this runs under the parent dir's i_rwsem; there can be only * one caller in here at a time */ if (o2nm_single_cluster) return ERR_PTR(-ENOSPC); cluster = kzalloc(sizeof(struct o2nm_cluster), GFP_KERNEL); ns = kzalloc(sizeof(struct o2nm_node_group), GFP_KERNEL); o2hb_group = o2hb_alloc_hb_set(); if (cluster == NULL || ns == NULL || o2hb_group == NULL) goto out; config_group_init_type_name(&cluster->cl_group, name, &o2nm_cluster_type); configfs_add_default_group(&ns->ns_group, &cluster->cl_group); config_group_init_type_name(&ns->ns_group, "node", &o2nm_node_group_type); configfs_add_default_group(o2hb_group, &cluster->cl_group); rwlock_init(&cluster->cl_nodes_lock); cluster->cl_node_ip_tree = RB_ROOT; cluster->cl_reconnect_delay_ms = O2NET_RECONNECT_DELAY_MS_DEFAULT; cluster->cl_idle_timeout_ms = O2NET_IDLE_TIMEOUT_MS_DEFAULT; cluster->cl_keepalive_delay_ms = O2NET_KEEPALIVE_DELAY_MS_DEFAULT; cluster->cl_fence_method = O2NM_FENCE_RESET; ret = &cluster->cl_group; o2nm_single_cluster = cluster; out: if (ret == NULL) { kfree(cluster); kfree(ns); o2hb_free_hb_set(o2hb_group); ret = ERR_PTR(-ENOMEM); } return ret; } static void o2nm_cluster_group_drop_item(struct config_group *group, struct config_item *item) { struct o2nm_cluster *cluster = to_o2nm_cluster(item); BUG_ON(o2nm_single_cluster != cluster); o2nm_single_cluster = NULL; configfs_remove_default_groups(&cluster->cl_group); config_item_put(item); } static struct configfs_group_operations o2nm_cluster_group_group_ops = { .make_group = o2nm_cluster_group_make_group, .drop_item = o2nm_cluster_group_drop_item, }; static const struct config_item_type o2nm_cluster_group_type = { .ct_group_ops = &o2nm_cluster_group_group_ops, .ct_owner = THIS_MODULE, }; static struct o2nm_cluster_group o2nm_cluster_group = { .cs_subsys = { .su_group = { .cg_item = { .ci_namebuf = "cluster", .ci_type = &o2nm_cluster_group_type, }, }, }, }; static inline void o2nm_lock_subsystem(void) { mutex_lock(&o2nm_cluster_group.cs_subsys.su_mutex); } static inline void o2nm_unlock_subsystem(void) { mutex_unlock(&o2nm_cluster_group.cs_subsys.su_mutex); } int o2nm_depend_item(struct config_item *item) { return configfs_depend_item(&o2nm_cluster_group.cs_subsys, item); } void o2nm_undepend_item(struct config_item *item) { configfs_undepend_item(item); } int o2nm_depend_this_node(void) { int ret = 0; struct o2nm_node *local_node; local_node = o2nm_get_node_by_num(o2nm_this_node()); if (!local_node) { ret = -EINVAL; goto out; } ret = o2nm_depend_item(&local_node->nd_item); o2nm_node_put(local_node); out: return ret; } void o2nm_undepend_this_node(void) { struct o2nm_node *local_node; local_node = o2nm_get_node_by_num(o2nm_this_node()); BUG_ON(!local_node); o2nm_undepend_item(&local_node->nd_item); o2nm_node_put(local_node); } static void __exit exit_o2nm(void) { /* XXX sync with hb callbacks and shut down hb? */ o2net_unregister_hb_callbacks(); configfs_unregister_subsystem(&o2nm_cluster_group.cs_subsys); o2cb_sys_shutdown(); o2net_exit(); o2hb_exit(); } static int __init init_o2nm(void) { int ret; o2hb_init(); ret = o2net_init(); if (ret) goto out_o2hb; ret = o2net_register_hb_callbacks(); if (ret) goto out_o2net; config_group_init(&o2nm_cluster_group.cs_subsys.su_group); mutex_init(&o2nm_cluster_group.cs_subsys.su_mutex); ret = configfs_register_subsystem(&o2nm_cluster_group.cs_subsys); if (ret) { printk(KERN_ERR "nodemanager: Registration returned %d\n", ret); goto out_callbacks; } ret = o2cb_sys_init(); if (!ret) goto out; configfs_unregister_subsystem(&o2nm_cluster_group.cs_subsys); out_callbacks: o2net_unregister_hb_callbacks(); out_o2net: o2net_exit(); out_o2hb: o2hb_exit(); out: return ret; } MODULE_AUTHOR("Oracle"); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("OCFS2 cluster management"); module_init(init_o2nm) module_exit(exit_o2nm) |
| 4215 203 105 378 50 794 740 62 395 41 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 | /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_FORTIFY_STRING_H_ #define _LINUX_FORTIFY_STRING_H_ #include <linux/bitfield.h> #include <linux/bug.h> #include <linux/const.h> #include <linux/limits.h> #define __FORTIFY_INLINE extern __always_inline __gnu_inline __overloadable #define __RENAME(x) __asm__(#x) #define FORTIFY_REASON_DIR(r) FIELD_GET(BIT(0), r) #define FORTIFY_REASON_FUNC(r) FIELD_GET(GENMASK(7, 1), r) #define FORTIFY_REASON(func, write) (FIELD_PREP(BIT(0), write) | \ FIELD_PREP(GENMASK(7, 1), func)) /* Overridden by KUnit tests. */ #ifndef fortify_panic # define fortify_panic(func, write, avail, size, retfail) \ __fortify_panic(FORTIFY_REASON(func, write), avail, size) #endif #ifndef fortify_warn_once # define fortify_warn_once(x...) WARN_ONCE(x) #endif #define FORTIFY_READ 0 #define FORTIFY_WRITE 1 #define EACH_FORTIFY_FUNC(macro) \ macro(strncpy), \ macro(strnlen), \ macro(strlen), \ macro(strscpy), \ macro(strlcat), \ macro(strcat), \ macro(strncat), \ macro(memset), \ macro(memcpy), \ macro(memmove), \ macro(memscan), \ macro(memcmp), \ macro(memchr), \ macro(memchr_inv), \ macro(kmemdup), \ macro(strcpy), \ macro(UNKNOWN), #define MAKE_FORTIFY_FUNC(func) FORTIFY_FUNC_##func enum fortify_func { EACH_FORTIFY_FUNC(MAKE_FORTIFY_FUNC) }; void __fortify_report(const u8 reason, const size_t avail, const size_t size); void __fortify_panic(const u8 reason, const size_t avail, const size_t size) __cold __noreturn; void __read_overflow(void) __compiletime_error("detected read beyond size of object (1st parameter)"); void __read_overflow2(void) __compiletime_error("detected read beyond size of object (2nd parameter)"); void __read_overflow2_field(size_t avail, size_t wanted) __compiletime_warning("detected read beyond size of field (2nd parameter); maybe use struct_group()?"); void __write_overflow(void) __compiletime_error("detected write beyond size of object (1st parameter)"); void __write_overflow_field(size_t avail, size_t wanted) __compiletime_warning("detected write beyond size of field (1st parameter); maybe use struct_group()?"); #define __compiletime_strlen(p) \ ({ \ char *__p = (char *)(p); \ size_t __ret = SIZE_MAX; \ const size_t __p_size = __member_size(p); \ if (__p_size != SIZE_MAX && \ __builtin_constant_p(*__p)) { \ size_t __p_len = __p_size - 1; \ if (__builtin_constant_p(__p[__p_len]) && \ __p[__p_len] == '\0') \ __ret = __builtin_strlen(__p); \ } \ __ret; \ }) #if defined(__SANITIZE_ADDRESS__) #if !defined(CONFIG_CC_HAS_KASAN_MEMINTRINSIC_PREFIX) && !defined(CONFIG_GENERIC_ENTRY) extern void *__underlying_memset(void *p, int c, __kernel_size_t size) __RENAME(memset); extern void *__underlying_memmove(void *p, const void *q, __kernel_size_t size) __RENAME(memmove); extern void *__underlying_memcpy(void *p, const void *q, __kernel_size_t size) __RENAME(memcpy); #elif defined(CONFIG_KASAN_GENERIC) extern void *__underlying_memset(void *p, int c, __kernel_size_t size) __RENAME(__asan_memset); extern void *__underlying_memmove(void *p, const void *q, __kernel_size_t size) __RENAME(__asan_memmove); extern void *__underlying_memcpy(void *p, const void *q, __kernel_size_t size) __RENAME(__asan_memcpy); #else /* CONFIG_KASAN_SW_TAGS */ extern void *__underlying_memset(void *p, int c, __kernel_size_t size) __RENAME(__hwasan_memset); extern void *__underlying_memmove(void *p, const void *q, __kernel_size_t size) __RENAME(__hwasan_memmove); extern void *__underlying_memcpy(void *p, const void *q, __kernel_size_t size) __RENAME(__hwasan_memcpy); #endif extern void *__underlying_memchr(const void *p, int c, __kernel_size_t size) __RENAME(memchr); extern int __underlying_memcmp(const void *p, const void *q, __kernel_size_t size) __RENAME(memcmp); extern char *__underlying_strcat(char *p, const char *q) __RENAME(strcat); extern char *__underlying_strcpy(char *p, const char *q) __RENAME(strcpy); extern __kernel_size_t __underlying_strlen(const char *p) __RENAME(strlen); extern char *__underlying_strncat(char *p, const char *q, __kernel_size_t count) __RENAME(strncat); extern char *__underlying_strncpy(char *p, const char *q, __kernel_size_t size) __RENAME(strncpy); #else #if defined(__SANITIZE_MEMORY__) /* * For KMSAN builds all memcpy/memset/memmove calls should be replaced by the * corresponding __msan_XXX functions. */ #include <linux/kmsan_string.h> #define __underlying_memcpy __msan_memcpy #define __underlying_memmove __msan_memmove #define __underlying_memset __msan_memset #else #define __underlying_memcpy __builtin_memcpy #define __underlying_memmove __builtin_memmove #define __underlying_memset __builtin_memset #endif #define __underlying_memchr __builtin_memchr #define __underlying_memcmp __builtin_memcmp #define __underlying_strcat __builtin_strcat #define __underlying_strcpy __builtin_strcpy #define __underlying_strlen __builtin_strlen #define __underlying_strncat __builtin_strncat #define __underlying_strncpy __builtin_strncpy #endif /** * unsafe_memcpy - memcpy implementation with no FORTIFY bounds checking * * @dst: Destination memory address to write to * @src: Source memory address to read from * @bytes: How many bytes to write to @dst from @src * @justification: Free-form text or comment describing why the use is needed * * This should be used for corner cases where the compiler cannot do the * right thing, or during transitions between APIs, etc. It should be used * very rarely, and includes a place for justification detailing where bounds * checking has happened, and why existing solutions cannot be employed. */ #define unsafe_memcpy(dst, src, bytes, justification) \ __underlying_memcpy(dst, src, bytes) /* * Clang's use of __builtin_*object_size() within inlines needs hinting via * __pass_*object_size(). The preference is to only ever use type 1 (member * size, rather than struct size), but there remain some stragglers using * type 0 that will be converted in the future. */ #if __has_builtin(__builtin_dynamic_object_size) #define POS __pass_dynamic_object_size(1) #define POS0 __pass_dynamic_object_size(0) #else #define POS __pass_object_size(1) #define POS0 __pass_object_size(0) #endif #define __compiletime_lessthan(bounds, length) ( \ __builtin_constant_p((bounds) < (length)) && \ (bounds) < (length) \ ) /** * strncpy - Copy a string to memory with non-guaranteed NUL padding * * @p: pointer to destination of copy * @q: pointer to NUL-terminated source string to copy * @size: bytes to write at @p * * If strlen(@q) >= @size, the copy of @q will stop after @size bytes, * and @p will NOT be NUL-terminated * * If strlen(@q) < @size, following the copy of @q, trailing NUL bytes * will be written to @p until @size total bytes have been written. * * Do not use this function. While FORTIFY_SOURCE tries to avoid * over-reads of @q, it cannot defend against writing unterminated * results to @p. Using strncpy() remains ambiguous and fragile. * Instead, please choose an alternative, so that the expectation * of @p's contents is unambiguous: * * +--------------------+--------------------+------------+ * | **p** needs to be: | padded to **size** | not padded | * +====================+====================+============+ * | NUL-terminated | strscpy_pad() | strscpy() | * +--------------------+--------------------+------------+ * | not NUL-terminated | strtomem_pad() | strtomem() | * +--------------------+--------------------+------------+ * * Note strscpy*()'s differing return values for detecting truncation, * and strtomem*()'s expectation that the destination is marked with * __nonstring when it is a character array. * */ __FORTIFY_INLINE __diagnose_as(__builtin_strncpy, 1, 2, 3) char *strncpy(char * const POS p, const char *q, __kernel_size_t size) { const size_t p_size = __member_size(p); if (__compiletime_lessthan(p_size, size)) __write_overflow(); if (p_size < size) fortify_panic(FORTIFY_FUNC_strncpy, FORTIFY_WRITE, p_size, size, p); return __underlying_strncpy(p, q, size); } extern __kernel_size_t __real_strnlen(const char *, __kernel_size_t) __RENAME(strnlen); /** * strnlen - Return bounded count of characters in a NUL-terminated string * * @p: pointer to NUL-terminated string to count. * @maxlen: maximum number of characters to count. * * Returns number of characters in @p (NOT including the final NUL), or * @maxlen, if no NUL has been found up to there. * */ __FORTIFY_INLINE __kernel_size_t strnlen(const char * const POS p, __kernel_size_t maxlen) { const size_t p_size = __member_size(p); const size_t p_len = __compiletime_strlen(p); size_t ret; /* We can take compile-time actions when maxlen is const. */ if (__builtin_constant_p(maxlen) && p_len != SIZE_MAX) { /* If p is const, we can use its compile-time-known len. */ if (maxlen >= p_size) return p_len; } /* Do not check characters beyond the end of p. */ ret = __real_strnlen(p, maxlen < p_size ? maxlen : p_size); if (p_size <= ret && maxlen != ret) fortify_panic(FORTIFY_FUNC_strnlen, FORTIFY_READ, p_size, ret + 1, ret); return ret; } /* * Defined after fortified strnlen to reuse it. However, it must still be * possible for strlen() to be used on compile-time strings for use in * static initializers (i.e. as a constant expression). */ /** * strlen - Return count of characters in a NUL-terminated string * * @p: pointer to NUL-terminated string to count. * * Do not use this function unless the string length is known at * compile-time. When @p is unterminated, this function may crash * or return unexpected counts that could lead to memory content * exposures. Prefer strnlen(). * * Returns number of characters in @p (NOT including the final NUL). * */ #define strlen(p) \ __builtin_choose_expr(__is_constexpr(__builtin_strlen(p)), \ __builtin_strlen(p), __fortify_strlen(p)) __FORTIFY_INLINE __diagnose_as(__builtin_strlen, 1) __kernel_size_t __fortify_strlen(const char * const POS p) { const size_t p_size = __member_size(p); __kernel_size_t ret; /* Give up if we don't know how large p is. */ if (p_size == SIZE_MAX) return __underlying_strlen(p); ret = strnlen(p, p_size); if (p_size <= ret) fortify_panic(FORTIFY_FUNC_strlen, FORTIFY_READ, p_size, ret + 1, ret); return ret; } /* Defined after fortified strnlen() to reuse it. */ extern ssize_t __real_strscpy(char *, const char *, size_t) __RENAME(sized_strscpy); __FORTIFY_INLINE ssize_t sized_strscpy(char * const POS p, const char * const POS q, size_t size) { /* Use string size rather than possible enclosing struct size. */ const size_t p_size = __member_size(p); const size_t q_size = __member_size(q); size_t len; /* If we cannot get size of p and q default to call strscpy. */ if (p_size == SIZE_MAX && q_size == SIZE_MAX) return __real_strscpy(p, q, size); /* * If size can be known at compile time and is greater than * p_size, generate a compile time write overflow error. */ if (__compiletime_lessthan(p_size, size)) __write_overflow(); /* Short-circuit for compile-time known-safe lengths. */ if (__compiletime_lessthan(p_size, SIZE_MAX)) { len = __compiletime_strlen(q); if (len < SIZE_MAX && __compiletime_lessthan(len, size)) { __underlying_memcpy(p, q, len + 1); return len; } } /* * This call protects from read overflow, because len will default to q * length if it smaller than size. */ len = strnlen(q, size); /* * If len equals size, we will copy only size bytes which leads to * -E2BIG being returned. * Otherwise we will copy len + 1 because of the final '\O'. */ len = len == size ? size : len + 1; /* * Generate a runtime write overflow error if len is greater than * p_size. */ if (p_size < len) fortify_panic(FORTIFY_FUNC_strscpy, FORTIFY_WRITE, p_size, len, -E2BIG); /* * We can now safely call vanilla strscpy because we are protected from: * 1. Read overflow thanks to call to strnlen(). * 2. Write overflow thanks to above ifs. */ return __real_strscpy(p, q, len); } /* Defined after fortified strlen() to reuse it. */ extern size_t __real_strlcat(char *p, const char *q, size_t avail) __RENAME(strlcat); /** * strlcat - Append a string to an existing string * * @p: pointer to %NUL-terminated string to append to * @q: pointer to %NUL-terminated string to append from * @avail: Maximum bytes available in @p * * Appends %NUL-terminated string @q after the %NUL-terminated * string at @p, but will not write beyond @avail bytes total, * potentially truncating the copy from @q. @p will stay * %NUL-terminated only if a %NUL already existed within * the @avail bytes of @p. If so, the resulting number of * bytes copied from @q will be at most "@avail - strlen(@p) - 1". * * Do not use this function. While FORTIFY_SOURCE tries to avoid * read and write overflows, this is only possible when the sizes * of @p and @q are known to the compiler. Prefer building the * string with formatting, via scnprintf(), seq_buf, or similar. * * Returns total bytes that _would_ have been contained by @p * regardless of truncation, similar to snprintf(). If return * value is >= @avail, the string has been truncated. * */ __FORTIFY_INLINE size_t strlcat(char * const POS p, const char * const POS q, size_t avail) { const size_t p_size = __member_size(p); const size_t q_size = __member_size(q); size_t p_len, copy_len; size_t actual, wanted; /* Give up immediately if both buffer sizes are unknown. */ if (p_size == SIZE_MAX && q_size == SIZE_MAX) return __real_strlcat(p, q, avail); p_len = strnlen(p, avail); copy_len = strlen(q); wanted = actual = p_len + copy_len; /* Cannot append any more: report truncation. */ if (avail <= p_len) return wanted; /* Give up if string is already overflowed. */ if (p_size <= p_len) fortify_panic(FORTIFY_FUNC_strlcat, FORTIFY_READ, p_size, p_len + 1, wanted); if (actual >= avail) { copy_len = avail - p_len - 1; actual = p_len + copy_len; } /* Give up if copy will overflow. */ if (p_size <= actual) fortify_panic(FORTIFY_FUNC_strlcat, FORTIFY_WRITE, p_size, actual + 1, wanted); __underlying_memcpy(p + p_len, q, copy_len); p[actual] = '\0'; return wanted; } /* Defined after fortified strlcat() to reuse it. */ /** * strcat - Append a string to an existing string * * @p: pointer to NUL-terminated string to append to * @q: pointer to NUL-terminated source string to append from * * Do not use this function. While FORTIFY_SOURCE tries to avoid * read and write overflows, this is only possible when the * destination buffer size is known to the compiler. Prefer * building the string with formatting, via scnprintf() or similar. * At the very least, use strncat(). * * Returns @p. * */ __FORTIFY_INLINE __diagnose_as(__builtin_strcat, 1, 2) char *strcat(char * const POS p, const char *q) { const size_t p_size = __member_size(p); const size_t wanted = strlcat(p, q, p_size); if (p_size <= wanted) fortify_panic(FORTIFY_FUNC_strcat, FORTIFY_WRITE, p_size, wanted + 1, p); return p; } /** * strncat - Append a string to an existing string * * @p: pointer to NUL-terminated string to append to * @q: pointer to source string to append from * @count: Maximum bytes to read from @q * * Appends at most @count bytes from @q (stopping at the first * NUL byte) after the NUL-terminated string at @p. @p will be * NUL-terminated. * * Do not use this function. While FORTIFY_SOURCE tries to avoid * read and write overflows, this is only possible when the sizes * of @p and @q are known to the compiler. Prefer building the * string with formatting, via scnprintf() or similar. * * Returns @p. * */ /* Defined after fortified strlen() and strnlen() to reuse them. */ __FORTIFY_INLINE __diagnose_as(__builtin_strncat, 1, 2, 3) char *strncat(char * const POS p, const char * const POS q, __kernel_size_t count) { const size_t p_size = __member_size(p); const size_t q_size = __member_size(q); size_t p_len, copy_len, total; if (p_size == SIZE_MAX && q_size == SIZE_MAX) return __underlying_strncat(p, q, count); p_len = strlen(p); copy_len = strnlen(q, count); total = p_len + copy_len + 1; if (p_size < total) fortify_panic(FORTIFY_FUNC_strncat, FORTIFY_WRITE, p_size, total, p); __underlying_memcpy(p + p_len, q, copy_len); p[p_len + copy_len] = '\0'; return p; } __FORTIFY_INLINE bool fortify_memset_chk(__kernel_size_t size, const size_t p_size, const size_t p_size_field) { if (__builtin_constant_p(size)) { /* * Length argument is a constant expression, so we * can perform compile-time bounds checking where * buffer sizes are also known at compile time. */ /* Error when size is larger than enclosing struct. */ if (__compiletime_lessthan(p_size_field, p_size) && __compiletime_lessthan(p_size, size)) __write_overflow(); /* Warn when write size is larger than dest field. */ if (__compiletime_lessthan(p_size_field, size)) __write_overflow_field(p_size_field, size); } /* * At this point, length argument may not be a constant expression, * so run-time bounds checking can be done where buffer sizes are * known. (This is not an "else" because the above checks may only * be compile-time warnings, and we want to still warn for run-time * overflows.) */ /* * Always stop accesses beyond the struct that contains the * field, when the buffer's remaining size is known. * (The SIZE_MAX test is to optimize away checks where the buffer * lengths are unknown.) */ if (p_size != SIZE_MAX && p_size < size) fortify_panic(FORTIFY_FUNC_memset, FORTIFY_WRITE, p_size, size, true); return false; } #define __fortify_memset_chk(p, c, size, p_size, p_size_field) ({ \ size_t __fortify_size = (size_t)(size); \ fortify_memset_chk(__fortify_size, p_size, p_size_field), \ __underlying_memset(p, c, __fortify_size); \ }) /* * __struct_size() vs __member_size() must be captured here to avoid * evaluating argument side-effects further into the macro layers. */ #ifndef CONFIG_KMSAN #define memset(p, c, s) __fortify_memset_chk(p, c, s, \ __struct_size(p), __member_size(p)) #endif /* * To make sure the compiler can enforce protection against buffer overflows, * memcpy(), memmove(), and memset() must not be used beyond individual * struct members. If you need to copy across multiple members, please use * struct_group() to create a named mirror of an anonymous struct union. * (e.g. see struct sk_buff.) Read overflow checking is currently only * done when a write overflow is also present, or when building with W=1. * * Mitigation coverage matrix * Bounds checking at: * +-------+-------+-------+-------+ * | Compile time | Run time | * memcpy() argument sizes: | write | read | write | read | * dest source length +-------+-------+-------+-------+ * memcpy(known, known, constant) | y | y | n/a | n/a | * memcpy(known, unknown, constant) | y | n | n/a | V | * memcpy(known, known, dynamic) | n | n | B | B | * memcpy(known, unknown, dynamic) | n | n | B | V | * memcpy(unknown, known, constant) | n | y | V | n/a | * memcpy(unknown, unknown, constant) | n | n | V | V | * memcpy(unknown, known, dynamic) | n | n | V | B | * memcpy(unknown, unknown, dynamic) | n | n | V | V | * +-------+-------+-------+-------+ * * y = perform deterministic compile-time bounds checking * n = cannot perform deterministic compile-time bounds checking * n/a = no run-time bounds checking needed since compile-time deterministic * B = can perform run-time bounds checking (currently unimplemented) * V = vulnerable to run-time overflow (will need refactoring to solve) * */ __FORTIFY_INLINE bool fortify_memcpy_chk(__kernel_size_t size, const size_t p_size, const size_t q_size, const size_t p_size_field, const size_t q_size_field, const u8 func) { if (__builtin_constant_p(size)) { /* * Length argument is a constant expression, so we * can perform compile-time bounds checking where * buffer sizes are also known at compile time. */ /* Error when size is larger than enclosing struct. */ if (__compiletime_lessthan(p_size_field, p_size) && __compiletime_lessthan(p_size, size)) __write_overflow(); if (__compiletime_lessthan(q_size_field, q_size) && __compiletime_lessthan(q_size, size)) __read_overflow2(); /* Warn when write size argument larger than dest field. */ if (__compiletime_lessthan(p_size_field, size)) __write_overflow_field(p_size_field, size); /* * Warn for source field over-read when building with W=1 * or when an over-write happened, so both can be fixed at * the same time. */ if ((IS_ENABLED(KBUILD_EXTRA_WARN1) || __compiletime_lessthan(p_size_field, size)) && __compiletime_lessthan(q_size_field, size)) __read_overflow2_field(q_size_field, size); } /* * At this point, length argument may not be a constant expression, * so run-time bounds checking can be done where buffer sizes are * known. (This is not an "else" because the above checks may only * be compile-time warnings, and we want to still warn for run-time * overflows.) */ /* * Always stop accesses beyond the struct that contains the * field, when the buffer's remaining size is known. * (The SIZE_MAX test is to optimize away checks where the buffer * lengths are unknown.) */ if (p_size != SIZE_MAX && p_size < size) fortify_panic(func, FORTIFY_WRITE, p_size, size, true); else if (q_size != SIZE_MAX && q_size < size) fortify_panic(func, FORTIFY_READ, q_size, size, true); /* * Warn when writing beyond destination field size. * * Note the implementation of __builtin_*object_size() behaves * like sizeof() when not directly referencing a flexible * array member, which means there will be many bounds checks * that will appear at run-time, without a way for them to be * detected at compile-time (as can be done when the destination * is specifically the flexible array member). * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=101832 */ if (p_size_field != SIZE_MAX && p_size != p_size_field && p_size_field < size) return true; return false; } /* * To work around what seems to be an optimizer bug, the macro arguments * need to have const copies or the values end up changed by the time they * reach fortify_warn_once(). See commit 6f7630b1b5bc ("fortify: Capture * __bos() results in const temp vars") for more details. */ #define __fortify_memcpy_chk(p, q, size, p_size, q_size, \ p_size_field, q_size_field, op) ({ \ const size_t __fortify_size = (size_t)(size); \ const size_t __p_size = (p_size); \ const size_t __q_size = (q_size); \ const size_t __p_size_field = (p_size_field); \ const size_t __q_size_field = (q_size_field); \ /* Keep a mutable version of the size for the final copy. */ \ size_t __copy_size = __fortify_size; \ fortify_warn_once(fortify_memcpy_chk(__fortify_size, __p_size, \ __q_size, __p_size_field, \ __q_size_field, FORTIFY_FUNC_ ##op), \ #op ": detected field-spanning write (size %zu) of single %s (size %zu)\n", \ __fortify_size, \ "field \"" #p "\" at " FILE_LINE, \ __p_size_field); \ /* Hide only the run-time size from value range tracking to */ \ /* silence compile-time false positive bounds warnings. */ \ if (!__builtin_constant_p(__copy_size)) \ OPTIMIZER_HIDE_VAR(__copy_size); \ __underlying_##op(p, q, __copy_size); \ }) /* * Notes about compile-time buffer size detection: * * With these types... * * struct middle { * u16 a; * u8 middle_buf[16]; * int b; * }; * struct end { * u16 a; * u8 end_buf[16]; * }; * struct flex { * int a; * u8 flex_buf[]; * }; * * void func(TYPE *ptr) { ... } * * Cases where destination size cannot be currently detected: * - the size of ptr's object (seemingly by design, gcc & clang fail): * __builtin_object_size(ptr, 1) == SIZE_MAX * - the size of flexible arrays in ptr's obj (by design, dynamic size): * __builtin_object_size(ptr->flex_buf, 1) == SIZE_MAX * - the size of ANY array at the end of ptr's obj (gcc and clang bug): * __builtin_object_size(ptr->end_buf, 1) == SIZE_MAX * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=101836 * * Cases where destination size is currently detected: * - the size of non-array members within ptr's object: * __builtin_object_size(ptr->a, 1) == 2 * - the size of non-flexible-array in the middle of ptr's obj: * __builtin_object_size(ptr->middle_buf, 1) == 16 * */ /* * __struct_size() vs __member_size() must be captured here to avoid * evaluating argument side-effects further into the macro layers. */ #define memcpy(p, q, s) __fortify_memcpy_chk(p, q, s, \ __struct_size(p), __struct_size(q), \ __member_size(p), __member_size(q), \ memcpy) #define memmove(p, q, s) __fortify_memcpy_chk(p, q, s, \ __struct_size(p), __struct_size(q), \ __member_size(p), __member_size(q), \ memmove) extern void *__real_memscan(void *, int, __kernel_size_t) __RENAME(memscan); __FORTIFY_INLINE void *memscan(void * const POS0 p, int c, __kernel_size_t size) { const size_t p_size = __struct_size(p); if (__compiletime_lessthan(p_size, size)) __read_overflow(); if (p_size < size) fortify_panic(FORTIFY_FUNC_memscan, FORTIFY_READ, p_size, size, NULL); return __real_memscan(p, c, size); } __FORTIFY_INLINE __diagnose_as(__builtin_memcmp, 1, 2, 3) int memcmp(const void * const POS0 p, const void * const POS0 q, __kernel_size_t size) { const size_t p_size = __struct_size(p); const size_t q_size = __struct_size(q); if (__builtin_constant_p(size)) { if (__compiletime_lessthan(p_size, size)) __read_overflow(); if (__compiletime_lessthan(q_size, size)) __read_overflow2(); } if (p_size < size) fortify_panic(FORTIFY_FUNC_memcmp, FORTIFY_READ, p_size, size, INT_MIN); else if (q_size < size) fortify_panic(FORTIFY_FUNC_memcmp, FORTIFY_READ, q_size, size, INT_MIN); return __underlying_memcmp(p, q, size); } __FORTIFY_INLINE __diagnose_as(__builtin_memchr, 1, 2, 3) void *memchr(const void * const POS0 p, int c, __kernel_size_t size) { const size_t p_size = __struct_size(p); if (__compiletime_lessthan(p_size, size)) __read_overflow(); if (p_size < size) fortify_panic(FORTIFY_FUNC_memchr, FORTIFY_READ, p_size, size, NULL); return __underlying_memchr(p, c, size); } void *__real_memchr_inv(const void *s, int c, size_t n) __RENAME(memchr_inv); __FORTIFY_INLINE void *memchr_inv(const void * const POS0 p, int c, size_t size) { const size_t p_size = __struct_size(p); if (__compiletime_lessthan(p_size, size)) __read_overflow(); if (p_size < size) fortify_panic(FORTIFY_FUNC_memchr_inv, FORTIFY_READ, p_size, size, NULL); return __real_memchr_inv(p, c, size); } extern void *__real_kmemdup(const void *src, size_t len, gfp_t gfp) __RENAME(kmemdup_noprof) __realloc_size(2); __FORTIFY_INLINE void *kmemdup_noprof(const void * const POS0 p, size_t size, gfp_t gfp) { const size_t p_size = __struct_size(p); if (__compiletime_lessthan(p_size, size)) __read_overflow(); if (p_size < size) fortify_panic(FORTIFY_FUNC_kmemdup, FORTIFY_READ, p_size, size, __real_kmemdup(p, 0, gfp)); return __real_kmemdup(p, size, gfp); } #define kmemdup(...) alloc_hooks(kmemdup_noprof(__VA_ARGS__)) /** * strcpy - Copy a string into another string buffer * * @p: pointer to destination of copy * @q: pointer to NUL-terminated source string to copy * * Do not use this function. While FORTIFY_SOURCE tries to avoid * overflows, this is only possible when the sizes of @q and @p are * known to the compiler. Prefer strscpy(), though note its different * return values for detecting truncation. * * Returns @p. * */ /* Defined after fortified strlen to reuse it. */ __FORTIFY_INLINE __diagnose_as(__builtin_strcpy, 1, 2) char *strcpy(char * const POS p, const char * const POS q) { const size_t p_size = __member_size(p); const size_t q_size = __member_size(q); size_t size; /* If neither buffer size is known, immediately give up. */ if (__builtin_constant_p(p_size) && __builtin_constant_p(q_size) && p_size == SIZE_MAX && q_size == SIZE_MAX) return __underlying_strcpy(p, q); size = strlen(q) + 1; /* Compile-time check for const size overflow. */ if (__compiletime_lessthan(p_size, size)) __write_overflow(); /* Run-time check for dynamic size overflow. */ if (p_size < size) fortify_panic(FORTIFY_FUNC_strcpy, FORTIFY_WRITE, p_size, size, p); __underlying_memcpy(p, q, size); return p; } /* Don't use these outside the FORITFY_SOURCE implementation */ #undef __underlying_memchr #undef __underlying_memcmp #undef __underlying_strcat #undef __underlying_strcpy #undef __underlying_strlen #undef __underlying_strncat #undef __underlying_strncpy #undef POS #undef POS0 #endif /* _LINUX_FORTIFY_STRING_H_ */ |
| 1339 1914 414 1783 969 510 1914 1337 2 2 19 47 4 95 3 4 183 183 69 156 156 156 8 237 643 782 644 182 505 182 9 215 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 | /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_HIGHMEM_H #define _LINUX_HIGHMEM_H #include <linux/fs.h> #include <linux/kernel.h> #include <linux/bug.h> #include <linux/cacheflush.h> #include <linux/kmsan.h> #include <linux/mm.h> #include <linux/uaccess.h> #include <linux/hardirq.h> #include "highmem-internal.h" /** * kmap - Map a page for long term usage * @page: Pointer to the page to be mapped * * Returns: The virtual address of the mapping * * Can only be invoked from preemptible task context because on 32bit * systems with CONFIG_HIGHMEM enabled this function might sleep. * * For systems with CONFIG_HIGHMEM=n and for pages in the low memory area * this returns the virtual address of the direct kernel mapping. * * The returned virtual address is globally visible and valid up to the * point where it is unmapped via kunmap(). The pointer can be handed to * other contexts. * * For highmem pages on 32bit systems this can be slow as the mapping space * is limited and protected by a global lock. In case that there is no * mapping slot available the function blocks until a slot is released via * kunmap(). */ static inline void *kmap(struct page *page); /** * kunmap - Unmap the virtual address mapped by kmap() * @page: Pointer to the page which was mapped by kmap() * * Counterpart to kmap(). A NOOP for CONFIG_HIGHMEM=n and for mappings of * pages in the low memory area. */ static inline void kunmap(struct page *page); /** * kmap_to_page - Get the page for a kmap'ed address * @addr: The address to look up * * Returns: The page which is mapped to @addr. */ static inline struct page *kmap_to_page(void *addr); /** * kmap_flush_unused - Flush all unused kmap mappings in order to * remove stray mappings */ static inline void kmap_flush_unused(void); /** * kmap_local_page - Map a page for temporary usage * @page: Pointer to the page to be mapped * * Returns: The virtual address of the mapping * * Can be invoked from any context, including interrupts. * * Requires careful handling when nesting multiple mappings because the map * management is stack based. The unmap has to be in the reverse order of * the map operation: * * addr1 = kmap_local_page(page1); * addr2 = kmap_local_page(page2); * ... * kunmap_local(addr2); * kunmap_local(addr1); * * Unmapping addr1 before addr2 is invalid and causes malfunction. * * Contrary to kmap() mappings the mapping is only valid in the context of * the caller and cannot be handed to other contexts. * * On CONFIG_HIGHMEM=n kernels and for low memory pages this returns the * virtual address of the direct mapping. Only real highmem pages are * temporarily mapped. * * While kmap_local_page() is significantly faster than kmap() for the highmem * case it comes with restrictions about the pointer validity. * * On HIGHMEM enabled systems mapping a highmem page has the side effect of * disabling migration in order to keep the virtual address stable across * preemption. No caller of kmap_local_page() can rely on this side effect. */ static inline void *kmap_local_page(struct page *page); /** * kmap_local_folio - Map a page in this folio for temporary usage * @folio: The folio containing the page. * @offset: The byte offset within the folio which identifies the page. * * Requires careful handling when nesting multiple mappings because the map * management is stack based. The unmap has to be in the reverse order of * the map operation:: * * addr1 = kmap_local_folio(folio1, offset1); * addr2 = kmap_local_folio(folio2, offset2); * ... * kunmap_local(addr2); * kunmap_local(addr1); * * Unmapping addr1 before addr2 is invalid and causes malfunction. * * Contrary to kmap() mappings the mapping is only valid in the context of * the caller and cannot be handed to other contexts. * * On CONFIG_HIGHMEM=n kernels and for low memory pages this returns the * virtual address of the direct mapping. Only real highmem pages are * temporarily mapped. * * While it is significantly faster than kmap() for the highmem case it * comes with restrictions about the pointer validity. * * On HIGHMEM enabled systems mapping a highmem page has the side effect of * disabling migration in order to keep the virtual address stable across * preemption. No caller of kmap_local_folio() can rely on this side effect. * * Context: Can be invoked from any context. * Return: The virtual address of @offset. */ static inline void *kmap_local_folio(struct folio *folio, size_t offset); /** * kmap_atomic - Atomically map a page for temporary usage - Deprecated! * @page: Pointer to the page to be mapped * * Returns: The virtual address of the mapping * * In fact a wrapper around kmap_local_page() which also disables pagefaults * and, depending on PREEMPT_RT configuration, also CPU migration and * preemption. Therefore users should not count on the latter two side effects. * * Mappings should always be released by kunmap_atomic(). * * Do not use in new code. Use kmap_local_page() instead. * * It is used in atomic context when code wants to access the contents of a * page that might be allocated from high memory (see __GFP_HIGHMEM), for * example a page in the pagecache. The API has two functions, and they * can be used in a manner similar to the following:: * * // Find the page of interest. * struct page *page = find_get_page(mapping, offset); * * // Gain access to the contents of that page. * void *vaddr = kmap_atomic(page); * * // Do something to the contents of that page. * memset(vaddr, 0, PAGE_SIZE); * * // Unmap that page. * kunmap_atomic(vaddr); * * Note that the kunmap_atomic() call takes the result of the kmap_atomic() * call, not the argument. * * If you need to map two pages because you want to copy from one page to * another you need to keep the kmap_atomic calls strictly nested, like: * * vaddr1 = kmap_atomic(page1); * vaddr2 = kmap_atomic(page2); * * memcpy(vaddr1, vaddr2, PAGE_SIZE); * * kunmap_atomic(vaddr2); * kunmap_atomic(vaddr1); */ static inline void *kmap_atomic(struct page *page); /* Highmem related interfaces for management code */ static inline unsigned long nr_free_highpages(void); static inline unsigned long totalhigh_pages(void); #ifndef ARCH_HAS_FLUSH_ANON_PAGE static inline void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr) { } #endif #ifndef ARCH_IMPLEMENTS_FLUSH_KERNEL_VMAP_RANGE static inline void flush_kernel_vmap_range(void *vaddr, int size) { } static inline void invalidate_kernel_vmap_range(void *vaddr, int size) { } #endif /* when CONFIG_HIGHMEM is not set these will be plain clear/copy_page */ #ifndef clear_user_highpage static inline void clear_user_highpage(struct page *page, unsigned long vaddr) { void *addr = kmap_local_page(page); clear_user_page(addr, vaddr, page); kunmap_local(addr); } #endif #ifndef vma_alloc_zeroed_movable_folio /** * vma_alloc_zeroed_movable_folio - Allocate a zeroed page for a VMA. * @vma: The VMA the page is to be allocated for. * @vaddr: The virtual address the page will be inserted into. * * This function will allocate a page suitable for inserting into this * VMA at this virtual address. It may be allocated from highmem or * the movable zone. An architecture may provide its own implementation. * * Return: A folio containing one allocated and zeroed page or NULL if * we are out of memory. */ static inline struct folio *vma_alloc_zeroed_movable_folio(struct vm_area_struct *vma, unsigned long vaddr) { struct folio *folio; folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma, vaddr); if (folio && user_alloc_needs_zeroing()) clear_user_highpage(&folio->page, vaddr); return folio; } #endif static inline void clear_highpage(struct page *page) { void *kaddr = kmap_local_page(page); clear_page(kaddr); kunmap_local(kaddr); } static inline void clear_highpage_kasan_tagged(struct page *page) { void *kaddr = kmap_local_page(page); clear_page(kasan_reset_tag(kaddr)); kunmap_local(kaddr); } #ifndef __HAVE_ARCH_TAG_CLEAR_HIGHPAGE static inline void tag_clear_highpage(struct page *page) { } #endif /* * If we pass in a base or tail page, we can zero up to PAGE_SIZE. * If we pass in a head page, we can zero up to the size of the compound page. */ #ifdef CONFIG_HIGHMEM void zero_user_segments(struct page *page, unsigned start1, unsigned end1, unsigned start2, unsigned end2); #else static inline void zero_user_segments(struct page *page, unsigned start1, unsigned end1, unsigned start2, unsigned end2) { void *kaddr = kmap_local_page(page); unsigned int i; BUG_ON(end1 > page_size(page) || end2 > page_size(page)); if (end1 > start1) memset(kaddr + start1, 0, end1 - start1); if (end2 > start2) memset(kaddr + start2, 0, end2 - start2); kunmap_local(kaddr); for (i = 0; i < compound_nr(page); i++) flush_dcache_page(page + i); } #endif static inline void zero_user_segment(struct page *page, unsigned start, unsigned end) { zero_user_segments(page, start, end, 0, 0); } #ifndef __HAVE_ARCH_COPY_USER_HIGHPAGE static inline void copy_user_highpage(struct page *to, struct page *from, unsigned long vaddr, struct vm_area_struct *vma) { char *vfrom, *vto; vfrom = kmap_local_page(from); vto = kmap_local_page(to); copy_user_page(vto, vfrom, vaddr, to); kmsan_unpoison_memory(page_address(to), PAGE_SIZE); kunmap_local(vto); kunmap_local(vfrom); } #endif #ifndef __HAVE_ARCH_COPY_HIGHPAGE static inline void copy_highpage(struct page *to, struct page *from) { char *vfrom, *vto; vfrom = kmap_local_page(from); vto = kmap_local_page(to); copy_page(vto, vfrom); kmsan_copy_page_meta(to, from); kunmap_local(vto); kunmap_local(vfrom); } #endif #ifdef copy_mc_to_kernel /* * If architecture supports machine check exception handling, define the * #MC versions of copy_user_highpage and copy_highpage. They copy a memory * page with #MC in source page (@from) handled, and return the number * of bytes not copied if there was a #MC, otherwise 0 for success. */ static inline int copy_mc_user_highpage(struct page *to, struct page *from, unsigned long vaddr, struct vm_area_struct *vma) { unsigned long ret; char *vfrom, *vto; vfrom = kmap_local_page(from); vto = kmap_local_page(to); ret = copy_mc_to_kernel(vto, vfrom, PAGE_SIZE); if (!ret) kmsan_unpoison_memory(page_address(to), PAGE_SIZE); kunmap_local(vto); kunmap_local(vfrom); if (ret) memory_failure_queue(page_to_pfn(from), 0); return ret; } static inline int copy_mc_highpage(struct page *to, struct page *from) { unsigned long ret; char *vfrom, *vto; vfrom = kmap_local_page(from); vto = kmap_local_page(to); ret = copy_mc_to_kernel(vto, vfrom, PAGE_SIZE); if (!ret) kmsan_copy_page_meta(to, from); kunmap_local(vto); kunmap_local(vfrom); if (ret) memory_failure_queue(page_to_pfn(from), 0); return ret; } #else static inline int copy_mc_user_highpage(struct page *to, struct page *from, unsigned long vaddr, struct vm_area_struct *vma) { copy_user_highpage(to, from, vaddr, vma); return 0; } static inline int copy_mc_highpage(struct page *to, struct page *from) { copy_highpage(to, from); return 0; } #endif static inline void memcpy_page(struct page *dst_page, size_t dst_off, struct page *src_page, size_t src_off, size_t len) { char *dst = kmap_local_page(dst_page); char *src = kmap_local_page(src_page); VM_BUG_ON(dst_off + len > PAGE_SIZE || src_off + len > PAGE_SIZE); memcpy(dst + dst_off, src + src_off, len); kunmap_local(src); kunmap_local(dst); } static inline void memcpy_folio(struct folio *dst_folio, size_t dst_off, struct folio *src_folio, size_t src_off, size_t len) { VM_BUG_ON(dst_off + len > folio_size(dst_folio)); VM_BUG_ON(src_off + len > folio_size(src_folio)); do { char *dst = kmap_local_folio(dst_folio, dst_off); const char *src = kmap_local_folio(src_folio, src_off); size_t chunk = len; if (folio_test_highmem(dst_folio) && chunk > PAGE_SIZE - offset_in_page(dst_off)) chunk = PAGE_SIZE - offset_in_page(dst_off); if (folio_test_highmem(src_folio) && chunk > PAGE_SIZE - offset_in_page(src_off)) chunk = PAGE_SIZE - offset_in_page(src_off); memcpy(dst, src, chunk); kunmap_local(src); kunmap_local(dst); dst_off += chunk; src_off += chunk; len -= chunk; } while (len > 0); } static inline void memset_page(struct page *page, size_t offset, int val, size_t len) { char *addr = kmap_local_page(page); VM_BUG_ON(offset + len > PAGE_SIZE); memset(addr + offset, val, len); kunmap_local(addr); } static inline void memcpy_from_page(char *to, struct page *page, size_t offset, size_t len) { char *from = kmap_local_page(page); VM_BUG_ON(offset + len > PAGE_SIZE); memcpy(to, from + offset, len); kunmap_local(from); } static inline void memcpy_to_page(struct page *page, size_t offset, const char *from, size_t len) { char *to = kmap_local_page(page); VM_BUG_ON(offset + len > PAGE_SIZE); memcpy(to + offset, from, len); flush_dcache_page(page); kunmap_local(to); } static inline void memzero_page(struct page *page, size_t offset, size_t len) { char *addr = kmap_local_page(page); VM_BUG_ON(offset + len > PAGE_SIZE); memset(addr + offset, 0, len); flush_dcache_page(page); kunmap_local(addr); } /** * memcpy_from_folio - Copy a range of bytes from a folio. * @to: The memory to copy to. * @folio: The folio to read from. * @offset: The first byte in the folio to read. * @len: The number of bytes to copy. */ static inline void memcpy_from_folio(char *to, struct folio *folio, size_t offset, size_t len) { VM_BUG_ON(offset + len > folio_size(folio)); do { const char *from = kmap_local_folio(folio, offset); size_t chunk = len; if (folio_test_partial_kmap(folio) && chunk > PAGE_SIZE - offset_in_page(offset)) chunk = PAGE_SIZE - offset_in_page(offset); memcpy(to, from, chunk); kunmap_local(from); to += chunk; offset += chunk; len -= chunk; } while (len > 0); } /** * memcpy_to_folio - Copy a range of bytes to a folio. * @folio: The folio to write to. * @offset: The first byte in the folio to store to. * @from: The memory to copy from. * @len: The number of bytes to copy. */ static inline void memcpy_to_folio(struct folio *folio, size_t offset, const char *from, size_t len) { VM_BUG_ON(offset + len > folio_size(folio)); do { char *to = kmap_local_folio(folio, offset); size_t chunk = len; if (folio_test_partial_kmap(folio) && chunk > PAGE_SIZE - offset_in_page(offset)) chunk = PAGE_SIZE - offset_in_page(offset); memcpy(to, from, chunk); kunmap_local(to); from += chunk; offset += chunk; len -= chunk; } while (len > 0); flush_dcache_folio(folio); } /** * folio_zero_tail - Zero the tail of a folio. * @folio: The folio to zero. * @offset: The byte offset in the folio to start zeroing at. * @kaddr: The address the folio is currently mapped to. * * If you have already used kmap_local_folio() to map a folio, written * some data to it and now need to zero the end of the folio (and flush * the dcache), you can use this function. If you do not have the * folio kmapped (eg the folio has been partially populated by DMA), * use folio_zero_range() or folio_zero_segment() instead. * * Return: An address which can be passed to kunmap_local(). */ static inline __must_check void *folio_zero_tail(struct folio *folio, size_t offset, void *kaddr) { size_t len = folio_size(folio) - offset; if (folio_test_partial_kmap(folio)) { size_t max = PAGE_SIZE - offset_in_page(offset); while (len > max) { memset(kaddr, 0, max); kunmap_local(kaddr); len -= max; offset += max; max = PAGE_SIZE; kaddr = kmap_local_folio(folio, offset); } } memset(kaddr, 0, len); flush_dcache_folio(folio); return kaddr; } /** * folio_fill_tail - Copy some data to a folio and pad with zeroes. * @folio: The destination folio. * @offset: The offset into @folio at which to start copying. * @from: The data to copy. * @len: How many bytes of data to copy. * * This function is most useful for filesystems which support inline data. * When they want to copy data from the inode into the page cache, this * function does everything for them. It supports large folios even on * HIGHMEM configurations. */ static inline void folio_fill_tail(struct folio *folio, size_t offset, const char *from, size_t len) { char *to = kmap_local_folio(folio, offset); VM_BUG_ON(offset + len > folio_size(folio)); if (folio_test_partial_kmap(folio)) { size_t max = PAGE_SIZE - offset_in_page(offset); while (len > max) { memcpy(to, from, max); kunmap_local(to); len -= max; from += max; offset += max; max = PAGE_SIZE; to = kmap_local_folio(folio, offset); } } memcpy(to, from, len); to = folio_zero_tail(folio, offset + len, to + len); kunmap_local(to); } /** * memcpy_from_file_folio - Copy some bytes from a file folio. * @to: The destination buffer. * @folio: The folio to copy from. * @pos: The position in the file. * @len: The maximum number of bytes to copy. * * Copy up to @len bytes from this folio. This may be limited by PAGE_SIZE * if the folio comes from HIGHMEM, and by the size of the folio. * * Return: The number of bytes copied from the folio. */ static inline size_t memcpy_from_file_folio(char *to, struct folio *folio, loff_t pos, size_t len) { size_t offset = offset_in_folio(folio, pos); char *from = kmap_local_folio(folio, offset); if (folio_test_partial_kmap(folio)) { offset = offset_in_page(offset); len = min_t(size_t, len, PAGE_SIZE - offset); } else len = min(len, folio_size(folio) - offset); memcpy(to, from, len); kunmap_local(from); return len; } /** * folio_zero_segments() - Zero two byte ranges in a folio. * @folio: The folio to write to. * @start1: The first byte to zero. * @xend1: One more than the last byte in the first range. * @start2: The first byte to zero in the second range. * @xend2: One more than the last byte in the second range. */ static inline void folio_zero_segments(struct folio *folio, size_t start1, size_t xend1, size_t start2, size_t xend2) { zero_user_segments(&folio->page, start1, xend1, start2, xend2); } /** * folio_zero_segment() - Zero a byte range in a folio. * @folio: The folio to write to. * @start: The first byte to zero. * @xend: One more than the last byte to zero. */ static inline void folio_zero_segment(struct folio *folio, size_t start, size_t xend) { zero_user_segments(&folio->page, start, xend, 0, 0); } /** * folio_zero_range() - Zero a byte range in a folio. * @folio: The folio to write to. * @start: The first byte to zero. * @length: The number of bytes to zero. */ static inline void folio_zero_range(struct folio *folio, size_t start, size_t length) { zero_user_segments(&folio->page, start, start + length, 0, 0); } /** * folio_release_kmap - Unmap a folio and drop a refcount. * @folio: The folio to release. * @addr: The address previously returned by a call to kmap_local_folio(). * * It is common, eg in directory handling to kmap a folio. This function * unmaps the folio and drops the refcount that was being held to keep the * folio alive while we accessed it. */ static inline void folio_release_kmap(struct folio *folio, void *addr) { kunmap_local(addr); folio_put(folio); } #endif /* _LINUX_HIGHMEM_H */ |
| 2 2 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 | /* SPDX-License-Identifier: GPL-2.0 */ /* Copyright (C) B.A.T.M.A.N. contributors: * * Marek Lindner, Simon Wunderlich */ #ifndef _NET_BATMAN_ADV_HARD_INTERFACE_H_ #define _NET_BATMAN_ADV_HARD_INTERFACE_H_ #include "main.h" #include <linux/compiler.h> #include <linux/kref.h> #include <linux/netdevice.h> #include <linux/notifier.h> #include <linux/rcupdate.h> #include <linux/stddef.h> #include <linux/types.h> /** * enum batadv_hard_if_state - State of a hard interface */ enum batadv_hard_if_state { /** * @BATADV_IF_NOT_IN_USE: interface is not used as slave interface of a * batman-adv mesh interface */ BATADV_IF_NOT_IN_USE, /** * @BATADV_IF_TO_BE_REMOVED: interface will be removed from mesh * interface */ BATADV_IF_TO_BE_REMOVED, /** @BATADV_IF_INACTIVE: interface is deactivated */ BATADV_IF_INACTIVE, /** @BATADV_IF_ACTIVE: interface is used */ BATADV_IF_ACTIVE, /** @BATADV_IF_TO_BE_ACTIVATED: interface is getting activated */ BATADV_IF_TO_BE_ACTIVATED, }; /** * enum batadv_hard_if_bcast - broadcast avoidance options */ enum batadv_hard_if_bcast { /** @BATADV_HARDIF_BCAST_OK: Do broadcast on according hard interface */ BATADV_HARDIF_BCAST_OK = 0, /** * @BATADV_HARDIF_BCAST_NORECIPIENT: Broadcast not needed, there is no * recipient */ BATADV_HARDIF_BCAST_NORECIPIENT, /** * @BATADV_HARDIF_BCAST_DUPFWD: There is just the neighbor we got it * from */ BATADV_HARDIF_BCAST_DUPFWD, /** @BATADV_HARDIF_BCAST_DUPORIG: There is just the originator */ BATADV_HARDIF_BCAST_DUPORIG, }; extern struct notifier_block batadv_hard_if_notifier; struct net_device *batadv_get_real_netdev(struct net_device *net_device); bool batadv_is_cfg80211_hardif(struct batadv_hard_iface *hard_iface); bool batadv_is_wifi_hardif(struct batadv_hard_iface *hard_iface); struct batadv_hard_iface* batadv_hardif_get_by_netdev(const struct net_device *net_dev); int batadv_hardif_enable_interface(struct batadv_hard_iface *hard_iface, struct net_device *mesh_iface); void batadv_hardif_disable_interface(struct batadv_hard_iface *hard_iface); int batadv_hardif_min_mtu(struct net_device *mesh_iface); void batadv_update_min_mtu(struct net_device *mesh_iface); void batadv_hardif_release(struct kref *ref); int batadv_hardif_no_broadcast(struct batadv_hard_iface *if_outgoing, u8 *orig_addr, u8 *orig_neigh); /** * batadv_hardif_put() - decrement the hard interface refcounter and possibly * release it * @hard_iface: the hard interface to free */ static inline void batadv_hardif_put(struct batadv_hard_iface *hard_iface) { if (!hard_iface) return; kref_put(&hard_iface->refcount, batadv_hardif_release); } /** * batadv_primary_if_get_selected() - Get reference to primary interface * @bat_priv: the bat priv with all the mesh interface information * * Return: primary interface (with increased refcnt), otherwise NULL */ static inline struct batadv_hard_iface * batadv_primary_if_get_selected(struct batadv_priv *bat_priv) { struct batadv_hard_iface *hard_iface; rcu_read_lock(); hard_iface = rcu_dereference(bat_priv->primary_if); if (!hard_iface) goto out; if (!kref_get_unless_zero(&hard_iface->refcount)) hard_iface = NULL; out: rcu_read_unlock(); return hard_iface; } #endif /* _NET_BATMAN_ADV_HARD_INTERFACE_H_ */ |
| 7 7 | 1 2 3 4 5 6 7 8 | // SPDX-License-Identifier: GPL-2.0 #include "two_state_shared_lock.h" void __bch2_two_state_lock(two_state_lock_t *lock, int s) { __wait_event(lock->wait, bch2_two_state_trylock(lock, s)); } |
| 14 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 | /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _linux_POSIX_TIMERS_H #define _linux_POSIX_TIMERS_H #include <linux/alarmtimer.h> #include <linux/list.h> #include <linux/mutex.h> #include <linux/pid.h> #include <linux/posix-timers_types.h> #include <linux/rcuref.h> #include <linux/spinlock.h> #include <linux/timerqueue.h> struct kernel_siginfo; struct task_struct; struct sigqueue; struct k_itimer; static inline clockid_t make_process_cpuclock(const unsigned int pid, const clockid_t clock) { return ((~pid) << 3) | clock; } static inline clockid_t make_thread_cpuclock(const unsigned int tid, const clockid_t clock) { return make_process_cpuclock(tid, clock | CPUCLOCK_PERTHREAD_MASK); } static inline clockid_t fd_to_clockid(const int fd) { return make_process_cpuclock((unsigned int) fd, CLOCKFD); } static inline int clockid_to_fd(const clockid_t clk) { return ~(clk >> 3); } static inline bool clockid_aux_valid(clockid_t id) { return IS_ENABLED(CONFIG_POSIX_AUX_CLOCKS) && id >= CLOCK_AUX && id <= CLOCK_AUX_LAST; } #ifdef CONFIG_POSIX_TIMERS #include <linux/signal_types.h> /** * cpu_timer - Posix CPU timer representation for k_itimer * @node: timerqueue node to queue in the task/sig * @head: timerqueue head on which this timer is queued * @pid: Pointer to target task PID * @elist: List head for the expiry list * @firing: Timer is currently firing * @nanosleep: Timer is used for nanosleep and is not a regular posix-timer * @handling: Pointer to the task which handles expiry */ struct cpu_timer { struct timerqueue_node node; struct timerqueue_head *head; struct pid *pid; struct list_head elist; bool firing; bool nanosleep; struct task_struct __rcu *handling; }; static inline bool cpu_timer_enqueue(struct timerqueue_head *head, struct cpu_timer *ctmr) { ctmr->head = head; return timerqueue_add(head, &ctmr->node); } static inline bool cpu_timer_queued(struct cpu_timer *ctmr) { return !!ctmr->head; } static inline bool cpu_timer_dequeue(struct cpu_timer *ctmr) { if (cpu_timer_queued(ctmr)) { timerqueue_del(ctmr->head, &ctmr->node); ctmr->head = NULL; return true; } return false; } static inline u64 cpu_timer_getexpires(struct cpu_timer *ctmr) { return ctmr->node.expires; } static inline void cpu_timer_setexpires(struct cpu_timer *ctmr, u64 exp) { ctmr->node.expires = exp; } static inline void posix_cputimers_init(struct posix_cputimers *pct) { memset(pct, 0, sizeof(*pct)); pct->bases[0].nextevt = U64_MAX; pct->bases[1].nextevt = U64_MAX; pct->bases[2].nextevt = U64_MAX; } void posix_cputimers_group_init(struct posix_cputimers *pct, u64 cpu_limit); static inline void posix_cputimers_rt_watchdog(struct posix_cputimers *pct, u64 runtime) { pct->bases[CPUCLOCK_SCHED].nextevt = runtime; } void posixtimer_rearm_itimer(struct task_struct *p); bool posixtimer_init_sigqueue(struct sigqueue *q); void posixtimer_send_sigqueue(struct k_itimer *tmr); bool posixtimer_deliver_signal(struct kernel_siginfo *info, struct sigqueue *timer_sigq); void posixtimer_free_timer(struct k_itimer *timer); long posixtimer_create_prctl(unsigned long ctrl); /* Init task static initializer */ #define INIT_CPU_TIMERBASE(b) { \ .nextevt = U64_MAX, \ } #define INIT_CPU_TIMERBASES(b) { \ INIT_CPU_TIMERBASE(b[0]), \ INIT_CPU_TIMERBASE(b[1]), \ INIT_CPU_TIMERBASE(b[2]), \ } #define INIT_CPU_TIMERS(s) \ .posix_cputimers = { \ .bases = INIT_CPU_TIMERBASES(s.posix_cputimers.bases), \ }, #else struct cpu_timer { }; #define INIT_CPU_TIMERS(s) static inline void posix_cputimers_init(struct posix_cputimers *pct) { } static inline void posix_cputimers_group_init(struct posix_cputimers *pct, u64 cpu_limit) { } static inline void posixtimer_rearm_itimer(struct task_struct *p) { } static inline bool posixtimer_deliver_signal(struct kernel_siginfo *info, struct sigqueue *timer_sigq) { return false; } static inline void posixtimer_free_timer(struct k_itimer *timer) { } static inline long posixtimer_create_prctl(unsigned long ctrl) { return -EINVAL; } #endif #ifdef CONFIG_POSIX_CPU_TIMERS_TASK_WORK void clear_posix_cputimers_work(struct task_struct *p); void posix_cputimers_init_work(void); #else static inline void clear_posix_cputimers_work(struct task_struct *p) { } static inline void posix_cputimers_init_work(void) { } #endif /** * struct k_itimer - POSIX.1b interval timer structure. * @list: List node for binding the timer to tsk::signal::posix_timers * @ignored_list: List node for tracking ignored timers in tsk::signal::ignored_posix_timers * @t_hash: Entry in the posix timer hash table * @it_lock: Lock protecting the timer * @kclock: Pointer to the k_clock struct handling this timer * @it_clock: The posix timer clock id * @it_id: The posix timer id for identifying the timer * @it_status: The status of the timer * @it_sig_periodic: The periodic status at signal delivery * @it_overrun: The overrun counter for pending signals * @it_overrun_last: The overrun at the time of the last delivered signal * @it_signal_seq: Sequence count to control signal delivery * @it_sigqueue_seq: The sequence count at the point where the signal was queued * @it_sigev_notify: The notify word of sigevent struct for signal delivery * @it_interval: The interval for periodic timers * @it_signal: Pointer to the creators signal struct * @it_pid: The pid of the process/task targeted by the signal * @it_process: The task to wakeup on clock_nanosleep (CPU timers) * @rcuref: Reference count for life time management * @sigq: Embedded sigqueue * @it: Union representing the various posix timer type * internals. * @rcu: RCU head for freeing the timer. */ struct k_itimer { /* 1st cacheline contains read-mostly fields */ struct hlist_node t_hash; struct hlist_node list; timer_t it_id; clockid_t it_clock; int it_sigev_notify; enum pid_type it_pid_type; struct signal_struct *it_signal; const struct k_clock *kclock; /* 2nd cacheline and above contain fields which are modified regularly */ spinlock_t it_lock; int it_status; bool it_sig_periodic; s64 it_overrun; s64 it_overrun_last; unsigned int it_signal_seq; unsigned int it_sigqueue_seq; ktime_t it_interval; struct hlist_node ignored_list; union { struct pid *it_pid; struct task_struct *it_process; }; struct sigqueue sigq; rcuref_t rcuref; union { struct { struct hrtimer timer; } real; struct cpu_timer cpu; struct { struct alarm alarmtimer; } alarm; } it; struct rcu_head rcu; } ____cacheline_aligned_in_smp; void run_posix_cpu_timers(void); void posix_cpu_timers_exit(struct task_struct *task); void posix_cpu_timers_exit_group(struct task_struct *task); void set_process_cpu_timer(struct task_struct *task, unsigned int clock_idx, u64 *newval, u64 *oldval); int update_rlimit_cpu(struct task_struct *task, unsigned long rlim_new); #ifdef CONFIG_POSIX_TIMERS static inline void posixtimer_putref(struct k_itimer *tmr) { if (rcuref_put(&tmr->rcuref)) posixtimer_free_timer(tmr); } static inline void posixtimer_sigqueue_getref(struct sigqueue *q) { struct k_itimer *tmr = container_of(q, struct k_itimer, sigq); WARN_ON_ONCE(!rcuref_get(&tmr->rcuref)); } static inline void posixtimer_sigqueue_putref(struct sigqueue *q) { struct k_itimer *tmr = container_of(q, struct k_itimer, sigq); posixtimer_putref(tmr); } static inline bool posixtimer_valid(const struct k_itimer *timer) { unsigned long val = (unsigned long)timer->it_signal; return !(val & 0x1UL); } #else /* CONFIG_POSIX_TIMERS */ static inline void posixtimer_sigqueue_getref(struct sigqueue *q) { } static inline void posixtimer_sigqueue_putref(struct sigqueue *q) { } #endif /* !CONFIG_POSIX_TIMERS */ #endif |
| 9 1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 | /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _ASM_X86_PKEYS_H #define _ASM_X86_PKEYS_H /* * If more than 16 keys are ever supported, a thorough audit * will be necessary to ensure that the types that store key * numbers and masks have sufficient capacity. */ #define arch_max_pkey() (cpu_feature_enabled(X86_FEATURE_OSPKE) ? 16 : 1) extern int arch_set_user_pkey_access(struct task_struct *tsk, int pkey, unsigned long init_val); static inline bool arch_pkeys_enabled(void) { return cpu_feature_enabled(X86_FEATURE_OSPKE); } /* * Try to dedicate one of the protection keys to be used as an * execute-only protection key. */ extern int __execute_only_pkey(struct mm_struct *mm); static inline int execute_only_pkey(struct mm_struct *mm) { if (!cpu_feature_enabled(X86_FEATURE_OSPKE)) return ARCH_DEFAULT_PKEY; return __execute_only_pkey(mm); } extern int __arch_override_mprotect_pkey(struct vm_area_struct *vma, int prot, int pkey); static inline int arch_override_mprotect_pkey(struct vm_area_struct *vma, int prot, int pkey) { if (!cpu_feature_enabled(X86_FEATURE_OSPKE)) return 0; return __arch_override_mprotect_pkey(vma, prot, pkey); } #define ARCH_VM_PKEY_FLAGS (VM_PKEY_BIT0 | VM_PKEY_BIT1 | VM_PKEY_BIT2 | VM_PKEY_BIT3) #define mm_pkey_allocation_map(mm) (mm->context.pkey_allocation_map) #define mm_set_pkey_allocated(mm, pkey) do { \ mm_pkey_allocation_map(mm) |= (1U << pkey); \ } while (0) #define mm_set_pkey_free(mm, pkey) do { \ mm_pkey_allocation_map(mm) &= ~(1U << pkey); \ } while (0) static inline bool mm_pkey_is_allocated(struct mm_struct *mm, int pkey) { /* * "Allocated" pkeys are those that have been returned * from pkey_alloc() or pkey 0 which is allocated * implicitly when the mm is created. */ if (pkey < 0) return false; if (pkey >= arch_max_pkey()) return false; /* * The exec-only pkey is set in the allocation map, but * is not available to any of the user interfaces like * mprotect_pkey(). */ if (pkey == mm->context.execute_only_pkey) return false; return mm_pkey_allocation_map(mm) & (1U << pkey); } /* * Returns a positive, 4-bit key on success, or -1 on failure. */ static inline int mm_pkey_alloc(struct mm_struct *mm) { /* * Note: this is the one and only place we make sure * that the pkey is valid as far as the hardware is * concerned. The rest of the kernel trusts that * only good, valid pkeys come out of here. */ u16 all_pkeys_mask = ((1U << arch_max_pkey()) - 1); int ret; /* * Are we out of pkeys? We must handle this specially * because ffz() behavior is undefined if there are no * zeros. */ if (mm_pkey_allocation_map(mm) == all_pkeys_mask) return -1; ret = ffz(mm_pkey_allocation_map(mm)); mm_set_pkey_allocated(mm, ret); return ret; } static inline int mm_pkey_free(struct mm_struct *mm, int pkey) { if (!mm_pkey_is_allocated(mm, pkey)) return -EINVAL; mm_set_pkey_free(mm, pkey); return 0; } static inline int vma_pkey(struct vm_area_struct *vma) { unsigned long vma_pkey_mask = VM_PKEY_BIT0 | VM_PKEY_BIT1 | VM_PKEY_BIT2 | VM_PKEY_BIT3; return (vma->vm_flags & vma_pkey_mask) >> VM_PKEY_SHIFT; } #endif /*_ASM_X86_PKEYS_H */ |
| 6 6 6 6 6 6 6 6 6 6 6 6 6 6 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 | // SPDX-License-Identifier: GPL-2.0 #include "bcachefs.h" #include "bcachefs_ioctl.h" #include "bkey_buf.h" #include "btree_cache.h" #include "btree_update.h" #include "buckets.h" #include "darray.h" #include "dirent.h" #include "error.h" #include "fs.h" #include "fsck.h" #include "inode.h" #include "io_misc.h" #include "keylist.h" #include "namei.h" #include "recovery_passes.h" #include "snapshot.h" #include "super.h" #include "thread_with_file.h" #include "xattr.h" #include <linux/bsearch.h> #include <linux/dcache.h> /* struct qstr */ static int dirent_points_to_inode_nowarn(struct bch_fs *c, struct bkey_s_c_dirent d, struct bch_inode_unpacked *inode) { if (d.v->d_type == DT_SUBVOL ? le32_to_cpu(d.v->d_child_subvol) == inode->bi_subvol : le64_to_cpu(d.v->d_inum) == inode->bi_inum) return 0; return bch_err_throw(c, ENOENT_dirent_doesnt_match_inode); } static void dirent_inode_mismatch_msg(struct printbuf *out, struct bch_fs *c, struct bkey_s_c_dirent dirent, struct bch_inode_unpacked *inode) { prt_str(out, "inode points to dirent that does not point back:"); prt_newline(out); bch2_bkey_val_to_text(out, c, dirent.s_c); prt_newline(out); bch2_inode_unpacked_to_text(out, inode); } static int dirent_points_to_inode(struct bch_fs *c, struct bkey_s_c_dirent dirent, struct bch_inode_unpacked *inode) { int ret = dirent_points_to_inode_nowarn(c, dirent, inode); if (ret) { struct printbuf buf = PRINTBUF; dirent_inode_mismatch_msg(&buf, c, dirent, inode); bch_warn(c, "%s", buf.buf); printbuf_exit(&buf); } return ret; } /* * XXX: this is handling transaction restarts without returning * -BCH_ERR_transaction_restart_nested, this is not how we do things anymore: */ static s64 bch2_count_inode_sectors(struct btree_trans *trans, u64 inum, u32 snapshot) { u64 sectors = 0; int ret = for_each_btree_key_max(trans, iter, BTREE_ID_extents, SPOS(inum, 0, snapshot), POS(inum, U64_MAX), 0, k, ({ if (bkey_extent_is_allocation(k.k)) sectors += k.k->size; 0; })); return ret ?: sectors; } static s64 bch2_count_subdirs(struct btree_trans *trans, u64 inum, u32 snapshot) { u64 subdirs = 0; int ret = for_each_btree_key_max(trans, iter, BTREE_ID_dirents, SPOS(inum, 0, snapshot), POS(inum, U64_MAX), 0, k, ({ if (k.k->type == KEY_TYPE_dirent && bkey_s_c_to_dirent(k).v->d_type == DT_DIR) subdirs++; 0; })); return ret ?: subdirs; } static int subvol_lookup(struct btree_trans *trans, u32 subvol, u32 *snapshot, u64 *inum) { struct bch_subvolume s; int ret = bch2_subvolume_get(trans, subvol, false, &s); *snapshot = le32_to_cpu(s.snapshot); *inum = le64_to_cpu(s.inode); return ret; } static int lookup_dirent_in_snapshot(struct btree_trans *trans, struct bch_hash_info hash_info, subvol_inum dir, struct qstr *name, u64 *target, unsigned *type, u32 snapshot) { struct btree_iter iter; struct bkey_s_c k = bch2_hash_lookup_in_snapshot(trans, &iter, bch2_dirent_hash_desc, &hash_info, dir, name, 0, snapshot); int ret = bkey_err(k); if (ret) return ret; struct bkey_s_c_dirent d = bkey_s_c_to_dirent(k); *target = le64_to_cpu(d.v->d_inum); *type = d.v->d_type; bch2_trans_iter_exit(trans, &iter); return 0; } /* * Find any subvolume associated with a tree of snapshots * We can't rely on master_subvol - it might have been deleted. */ static int find_snapshot_tree_subvol(struct btree_trans *trans, u32 tree_id, u32 *subvol) { struct btree_iter iter; struct bkey_s_c k; int ret; for_each_btree_key_norestart(trans, iter, BTREE_ID_snapshots, POS_MIN, 0, k, ret) { if (k.k->type != KEY_TYPE_snapshot) continue; struct bkey_s_c_snapshot s = bkey_s_c_to_snapshot(k); if (le32_to_cpu(s.v->tree) != tree_id) continue; if (s.v->subvol) { *subvol = le32_to_cpu(s.v->subvol); goto found; } } ret = bch_err_throw(trans->c, ENOENT_no_snapshot_tree_subvol); found: bch2_trans_iter_exit(trans, &iter); return ret; } /* Get lost+found, create if it doesn't exist: */ static int lookup_lostfound(struct btree_trans *trans, u32 snapshot, struct bch_inode_unpacked *lostfound, u64 reattaching_inum) { struct bch_fs *c = trans->c; struct qstr lostfound_str = QSTR("lost+found"); struct btree_iter lostfound_iter = {}; u64 inum = 0; unsigned d_type = 0; int ret; struct bch_snapshot_tree st; ret = bch2_snapshot_tree_lookup(trans, bch2_snapshot_tree(c, snapshot), &st); if (ret) return ret; u32 subvolid; ret = find_snapshot_tree_subvol(trans, bch2_snapshot_tree(c, snapshot), &subvolid); bch_err_msg(c, ret, "finding subvol associated with snapshot tree %u", bch2_snapshot_tree(c, snapshot)); if (ret) return ret; struct bch_subvolume subvol; ret = bch2_subvolume_get(trans, subvolid, false, &subvol); bch_err_msg(c, ret, "looking up subvol %u for snapshot %u", subvolid, snapshot); if (ret) return ret; if (!subvol.inode) { struct btree_iter iter; struct bkey_i_subvolume *subvol = bch2_bkey_get_mut_typed(trans, &iter, BTREE_ID_subvolumes, POS(0, subvolid), 0, subvolume); ret = PTR_ERR_OR_ZERO(subvol); if (ret) return ret; subvol->v.inode = cpu_to_le64(reattaching_inum); bch2_trans_iter_exit(trans, &iter); } subvol_inum root_inum = { .subvol = subvolid, .inum = le64_to_cpu(subvol.inode) }; struct bch_inode_unpacked root_inode; struct bch_hash_info root_hash_info; ret = bch2_inode_find_by_inum_snapshot(trans, root_inum.inum, snapshot, &root_inode, 0); bch_err_msg(c, ret, "looking up root inode %llu for subvol %u", root_inum.inum, subvolid); if (ret) return ret; root_hash_info = bch2_hash_info_init(c, &root_inode); ret = lookup_dirent_in_snapshot(trans, root_hash_info, root_inum, &lostfound_str, &inum, &d_type, snapshot); if (bch2_err_matches(ret, ENOENT)) goto create_lostfound; bch_err_fn(c, ret); if (ret) return ret; if (d_type != DT_DIR) { bch_err(c, "error looking up lost+found: not a directory"); return bch_err_throw(c, ENOENT_not_directory); } /* * The bch2_check_dirents pass has already run, dangling dirents * shouldn't exist here: */ ret = bch2_inode_find_by_inum_snapshot(trans, inum, snapshot, lostfound, 0); bch_err_msg(c, ret, "looking up lost+found %llu:%u in (root inode %llu, snapshot root %u)", inum, snapshot, root_inum.inum, bch2_snapshot_root(c, snapshot)); return ret; create_lostfound: /* * we always create lost+found in the root snapshot; we don't want * different branches of the snapshot tree to have different lost+found */ snapshot = le32_to_cpu(st.root_snapshot); /* * XXX: we could have a nicer log message here if we had a nice way to * walk backpointers to print a path */ struct printbuf path = PRINTBUF; ret = bch2_inum_to_path(trans, root_inum, &path); if (ret) goto err; bch_notice(c, "creating %s/lost+found in subvol %llu snapshot %u", path.buf, root_inum.subvol, snapshot); printbuf_exit(&path); u64 now = bch2_current_time(c); u64 cpu = raw_smp_processor_id(); bch2_inode_init_early(c, lostfound); bch2_inode_init_late(c, lostfound, now, 0, 0, S_IFDIR|0700, 0, &root_inode); lostfound->bi_dir = root_inode.bi_inum; lostfound->bi_snapshot = le32_to_cpu(st.root_snapshot); root_inode.bi_nlink++; ret = bch2_inode_create(trans, &lostfound_iter, lostfound, snapshot, cpu); if (ret) goto err; bch2_btree_iter_set_snapshot(trans, &lostfound_iter, snapshot); ret = bch2_btree_iter_traverse(trans, &lostfound_iter); if (ret) goto err; ret = bch2_dirent_create_snapshot(trans, 0, root_inode.bi_inum, snapshot, &root_hash_info, mode_to_type(lostfound->bi_mode), &lostfound_str, lostfound->bi_inum, &lostfound->bi_dir_offset, BTREE_UPDATE_internal_snapshot_node| STR_HASH_must_create) ?: bch2_inode_write_flags(trans, &lostfound_iter, lostfound, BTREE_UPDATE_internal_snapshot_node); err: bch_err_msg(c, ret, "creating lost+found"); bch2_trans_iter_exit(trans, &lostfound_iter); return ret; } static inline bool inode_should_reattach(struct bch_inode_unpacked *inode) { if (inode->bi_inum == BCACHEFS_ROOT_INO && inode->bi_subvol == BCACHEFS_ROOT_SUBVOL) return false; /* * Subvolume roots are special: older versions of subvolume roots may be * disconnected, it's only the newest version that matters. * * We only keep a single dirent pointing to a subvolume root, i.e. * older versions of snapshots will not have a different dirent pointing * to the same subvolume root. * * This is because dirents that point to subvolumes are only visible in * the parent subvolume - versioning is not needed - and keeping them * around would break fsck, because when we're crossing subvolumes we * don't have a consistent snapshot ID to do check the inode <-> dirent * relationships. * * Thus, a subvolume root that's been renamed after a snapshot will have * a disconnected older version - that's expected. * * Note that taking a snapshot always updates the root inode (to update * the dirent backpointer), so a subvolume root inode with * BCH_INODE_has_child_snapshot is never visible. */ if (inode->bi_subvol && (inode->bi_flags & BCH_INODE_has_child_snapshot)) return false; return !bch2_inode_has_backpointer(inode) && !(inode->bi_flags & BCH_INODE_unlinked); } static int maybe_delete_dirent(struct btree_trans *trans, struct bpos d_pos, u32 snapshot) { struct btree_iter iter; struct bkey_s_c k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_dirents, SPOS(d_pos.inode, d_pos.offset, snapshot), BTREE_ITER_intent| BTREE_ITER_with_updates); int ret = bkey_err(k); if (ret) return ret; if (bpos_eq(k.k->p, d_pos)) { /* * delet_at() doesn't work because the update path doesn't * internally use BTREE_ITER_with_updates yet */ struct bkey_i *k = bch2_trans_kmalloc(trans, sizeof(*k)); ret = PTR_ERR_OR_ZERO(k); if (ret) goto err; bkey_init(&k->k); k->k.type = KEY_TYPE_whiteout; k->k.p = iter.pos; ret = bch2_trans_update(trans, &iter, k, BTREE_UPDATE_internal_snapshot_node); } err: bch2_trans_iter_exit(trans, &iter); return ret; } static int reattach_inode(struct btree_trans *trans, struct bch_inode_unpacked *inode) { struct bch_fs *c = trans->c; struct bch_inode_unpacked lostfound; char name_buf[20]; int ret; u32 dirent_snapshot = inode->bi_snapshot; if (inode->bi_subvol) { inode->bi_parent_subvol = BCACHEFS_ROOT_SUBVOL; struct btree_iter subvol_iter; struct bkey_i_subvolume *subvol = bch2_bkey_get_mut_typed(trans, &subvol_iter, BTREE_ID_subvolumes, POS(0, inode->bi_subvol), 0, subvolume); ret = PTR_ERR_OR_ZERO(subvol); if (ret) return ret; subvol->v.fs_path_parent = BCACHEFS_ROOT_SUBVOL; bch2_trans_iter_exit(trans, &subvol_iter); u64 root_inum; ret = subvol_lookup(trans, inode->bi_parent_subvol, &dirent_snapshot, &root_inum); if (ret) return ret; snprintf(name_buf, sizeof(name_buf), "subvol-%u", inode->bi_subvol); } else { snprintf(name_buf, sizeof(name_buf), "%llu", inode->bi_inum); } ret = lookup_lostfound(trans, dirent_snapshot, &lostfound, inode->bi_inum); if (ret) return ret; bch_verbose(c, "got lostfound inum %llu", lostfound.bi_inum); lostfound.bi_nlink += S_ISDIR(inode->bi_mode); /* ensure lost+found inode is also present in inode snapshot */ if (!inode->bi_subvol) { BUG_ON(!bch2_snapshot_is_ancestor(c, inode->bi_snapshot, lostfound.bi_snapshot)); lostfound.bi_snapshot = inode->bi_snapshot; } ret = __bch2_fsck_write_inode(trans, &lostfound); if (ret) return ret; struct bch_hash_info dir_hash = bch2_hash_info_init(c, &lostfound); struct qstr name = QSTR(name_buf); inode->bi_dir = lostfound.bi_inum; ret = bch2_dirent_create_snapshot(trans, inode->bi_parent_subvol, lostfound.bi_inum, dirent_snapshot, &dir_hash, inode_d_type(inode), &name, inode->bi_subvol ?: inode->bi_inum, &inode->bi_dir_offset, BTREE_UPDATE_internal_snapshot_node| STR_HASH_must_create); if (ret) { bch_err_msg(c, ret, "error creating dirent"); return ret; } ret = __bch2_fsck_write_inode(trans, inode); if (ret) return ret; { CLASS(printbuf, buf)(); ret = bch2_inum_snapshot_to_path(trans, inode->bi_inum, inode->bi_snapshot, NULL, &buf); if (ret) return ret; bch_info(c, "reattached at %s", buf.buf); } /* * Fix up inodes in child snapshots: if they should also be reattached * update the backpointer field, if they should not be we need to emit * whiteouts for the dirent we just created. */ if (!inode->bi_subvol && bch2_snapshot_is_leaf(c, inode->bi_snapshot) <= 0) { snapshot_id_list whiteouts_done; struct btree_iter iter; struct bkey_s_c k; darray_init(&whiteouts_done); for_each_btree_key_reverse_norestart(trans, iter, BTREE_ID_inodes, SPOS(0, inode->bi_inum, inode->bi_snapshot - 1), BTREE_ITER_all_snapshots|BTREE_ITER_intent, k, ret) { if (k.k->p.offset != inode->bi_inum) break; if (!bkey_is_inode(k.k) || !bch2_snapshot_is_ancestor(c, k.k->p.snapshot, inode->bi_snapshot) || snapshot_list_has_ancestor(c, &whiteouts_done, k.k->p.snapshot)) continue; struct bch_inode_unpacked child_inode; ret = bch2_inode_unpack(k, &child_inode); if (ret) break; if (!inode_should_reattach(&child_inode)) { ret = maybe_delete_dirent(trans, SPOS(lostfound.bi_inum, inode->bi_dir_offset, dirent_snapshot), k.k->p.snapshot); if (ret) break; ret = snapshot_list_add(c, &whiteouts_done, k.k->p.snapshot); if (ret) break; } else { iter.snapshot = k.k->p.snapshot; child_inode.bi_dir = inode->bi_dir; child_inode.bi_dir_offset = inode->bi_dir_offset; ret = bch2_inode_write_flags(trans, &iter, &child_inode, BTREE_UPDATE_internal_snapshot_node); if (ret) break; } } darray_exit(&whiteouts_done); bch2_trans_iter_exit(trans, &iter); } return ret; } static struct bkey_s_c_dirent dirent_get_by_pos(struct btree_trans *trans, struct btree_iter *iter, struct bpos pos) { return bch2_bkey_get_iter_typed(trans, iter, BTREE_ID_dirents, pos, 0, dirent); } static int remove_backpointer(struct btree_trans *trans, struct bch_inode_unpacked *inode) { if (!bch2_inode_has_backpointer(inode)) return 0; u32 snapshot = inode->bi_snapshot; if (inode->bi_parent_subvol) { int ret = bch2_subvolume_get_snapshot(trans, inode->bi_parent_subvol, &snapshot); if (ret) return ret; } struct bch_fs *c = trans->c; struct btree_iter iter; struct bkey_s_c_dirent d = dirent_get_by_pos(trans, &iter, SPOS(inode->bi_dir, inode->bi_dir_offset, snapshot)); int ret = bkey_err(d) ?: dirent_points_to_inode(c, d, inode) ?: bch2_fsck_remove_dirent(trans, d.k->p); bch2_trans_iter_exit(trans, &iter); return ret; } static int reattach_subvol(struct btree_trans *trans, struct bkey_s_c_subvolume s) { struct bch_fs *c = trans->c; struct bch_inode_unpacked inode; int ret = bch2_inode_find_by_inum_trans(trans, (subvol_inum) { s.k->p.offset, le64_to_cpu(s.v->inode) }, &inode); if (ret) return ret; ret = remove_backpointer(trans, &inode); if (!bch2_err_matches(ret, ENOENT)) bch_err_msg(c, ret, "removing dirent"); if (ret) return ret; ret = reattach_inode(trans, &inode); bch_err_msg(c, ret, "reattaching inode %llu", inode.bi_inum); return ret; } static int reconstruct_subvol(struct btree_trans *trans, u32 snapshotid, u32 subvolid, u64 inum) { struct bch_fs *c = trans->c; if (!bch2_snapshot_is_leaf(c, snapshotid)) { bch_err(c, "need to reconstruct subvol, but have interior node snapshot"); return bch_err_throw(c, fsck_repair_unimplemented); } /* * If inum isn't set, that means we're being called from check_dirents, * not check_inodes - the root of this subvolume doesn't exist or we * would have found it there: */ if (!inum) { struct btree_iter inode_iter = {}; struct bch_inode_unpacked new_inode; u64 cpu = raw_smp_processor_id(); bch2_inode_init_early(c, &new_inode); bch2_inode_init_late(c, &new_inode, bch2_current_time(c), 0, 0, S_IFDIR|0755, 0, NULL); new_inode.bi_subvol = subvolid; int ret = bch2_inode_create(trans, &inode_iter, &new_inode, snapshotid, cpu) ?: bch2_btree_iter_traverse(trans, &inode_iter) ?: bch2_inode_write(trans, &inode_iter, &new_inode); bch2_trans_iter_exit(trans, &inode_iter); if (ret) return ret; inum = new_inode.bi_inum; } bch_info(c, "reconstructing subvol %u with root inode %llu", subvolid, inum); struct bkey_i_subvolume *new_subvol = bch2_trans_kmalloc(trans, sizeof(*new_subvol)); int ret = PTR_ERR_OR_ZERO(new_subvol); if (ret) return ret; bkey_subvolume_init(&new_subvol->k_i); new_subvol->k.p.offset = subvolid; new_subvol->v.snapshot = cpu_to_le32(snapshotid); new_subvol->v.inode = cpu_to_le64(inum); ret = bch2_btree_insert_trans(trans, BTREE_ID_subvolumes, &new_subvol->k_i, 0); if (ret) return ret; struct btree_iter iter; struct bkey_i_snapshot *s = bch2_bkey_get_mut_typed(trans, &iter, BTREE_ID_snapshots, POS(0, snapshotid), 0, snapshot); ret = PTR_ERR_OR_ZERO(s); bch_err_msg(c, ret, "getting snapshot %u", snapshotid); if (ret) return ret; u32 snapshot_tree = le32_to_cpu(s->v.tree); s->v.subvol = cpu_to_le32(subvolid); SET_BCH_SNAPSHOT_SUBVOL(&s->v, true); bch2_trans_iter_exit(trans, &iter); struct bkey_i_snapshot_tree *st = bch2_bkey_get_mut_typed(trans, &iter, BTREE_ID_snapshot_trees, POS(0, snapshot_tree), 0, snapshot_tree); ret = PTR_ERR_OR_ZERO(st); bch_err_msg(c, ret, "getting snapshot tree %u", snapshot_tree); if (ret) return ret; if (!st->v.master_subvol) st->v.master_subvol = cpu_to_le32(subvolid); bch2_trans_iter_exit(trans, &iter); return 0; } static int reconstruct_inode(struct btree_trans *trans, enum btree_id btree, u32 snapshot, u64 inum) { struct bch_fs *c = trans->c; unsigned i_mode = S_IFREG; u64 i_size = 0; switch (btree) { case BTREE_ID_extents: { struct btree_iter iter = {}; bch2_trans_iter_init(trans, &iter, BTREE_ID_extents, SPOS(inum, U64_MAX, snapshot), 0); struct bkey_s_c k = bch2_btree_iter_peek_prev_min(trans, &iter, POS(inum, 0)); bch2_trans_iter_exit(trans, &iter); int ret = bkey_err(k); if (ret) return ret; i_size = k.k->p.offset << 9; break; } case BTREE_ID_dirents: i_mode = S_IFDIR; break; case BTREE_ID_xattrs: break; default: BUG(); } struct bch_inode_unpacked new_inode; bch2_inode_init_early(c, &new_inode); bch2_inode_init_late(c, &new_inode, bch2_current_time(c), 0, 0, i_mode|0600, 0, NULL); new_inode.bi_size = i_size; new_inode.bi_inum = inum; new_inode.bi_snapshot = snapshot; return __bch2_fsck_write_inode(trans, &new_inode); } static inline void snapshots_seen_exit(struct snapshots_seen *s) { darray_exit(&s->ids); } static inline void snapshots_seen_init(struct snapshots_seen *s) { memset(s, 0, sizeof(*s)); } static int snapshots_seen_add_inorder(struct bch_fs *c, struct snapshots_seen *s, u32 id) { u32 *i; __darray_for_each(s->ids, i) { if (*i == id) return 0; if (*i > id) break; } int ret = darray_insert_item(&s->ids, i - s->ids.data, id); if (ret) bch_err(c, "error reallocating snapshots_seen table (size %zu)", s->ids.size); return ret; } static int snapshots_seen_update(struct bch_fs *c, struct snapshots_seen *s, enum btree_id btree_id, struct bpos pos) { if (!bkey_eq(s->pos, pos)) s->ids.nr = 0; s->pos = pos; return snapshot_list_add_nodup(c, &s->ids, pos.snapshot); } /** * key_visible_in_snapshot - returns true if @id is a descendent of @ancestor, * and @ancestor hasn't been overwritten in @seen * * @c: filesystem handle * @seen: list of snapshot ids already seen at current position * @id: descendent snapshot id * @ancestor: ancestor snapshot id * * Returns: whether key in @ancestor snapshot is visible in @id snapshot */ static bool key_visible_in_snapshot(struct bch_fs *c, struct snapshots_seen *seen, u32 id, u32 ancestor) { EBUG_ON(id > ancestor); if (id == ancestor) return true; if (!bch2_snapshot_is_ancestor(c, id, ancestor)) return false; /* * We know that @id is a descendant of @ancestor, we're checking if * we've seen a key that overwrote @ancestor - i.e. also a descendent of * @ascestor and with @id as a descendent. * * But we already know that we're scanning IDs between @id and @ancestor * numerically, since snapshot ID lists are kept sorted, so if we find * an id that's an ancestor of @id we're done: */ darray_for_each_reverse(seen->ids, i) if (*i != ancestor && bch2_snapshot_is_ancestor(c, id, *i)) return false; return true; } /** * ref_visible - given a key with snapshot id @src that points to a key with * snapshot id @dst, test whether there is some snapshot in which @dst is * visible. * * @c: filesystem handle * @s: list of snapshot IDs already seen at @src * @src: snapshot ID of src key * @dst: snapshot ID of dst key * Returns: true if there is some snapshot in which @dst is visible * * Assumes we're visiting @src keys in natural key order */ static bool ref_visible(struct bch_fs *c, struct snapshots_seen *s, u32 src, u32 dst) { return dst <= src ? key_visible_in_snapshot(c, s, dst, src) : bch2_snapshot_is_ancestor(c, src, dst); } static int ref_visible2(struct bch_fs *c, u32 src, struct snapshots_seen *src_seen, u32 dst, struct snapshots_seen *dst_seen) { if (dst > src) { swap(dst, src); swap(dst_seen, src_seen); } return key_visible_in_snapshot(c, src_seen, dst, src); } #define for_each_visible_inode(_c, _s, _w, _snapshot, _i) \ for (_i = (_w)->inodes.data; _i < (_w)->inodes.data + (_w)->inodes.nr && \ (_i)->inode.bi_snapshot <= (_snapshot); _i++) \ if (key_visible_in_snapshot(_c, _s, _i->inode.bi_snapshot, _snapshot)) struct inode_walker_entry { struct bch_inode_unpacked inode; bool whiteout; u64 count; u64 i_size; }; struct inode_walker { bool first_this_inode; bool have_inodes; bool recalculate_sums; struct bpos last_pos; DARRAY(struct inode_walker_entry) inodes; snapshot_id_list deletes; }; static void inode_walker_exit(struct inode_walker *w) { darray_exit(&w->inodes); darray_exit(&w->deletes); } static struct inode_walker inode_walker_init(void) { return (struct inode_walker) { 0, }; } static int add_inode(struct bch_fs *c, struct inode_walker *w, struct bkey_s_c inode) { int ret = darray_push(&w->inodes, ((struct inode_walker_entry) { .whiteout = !bkey_is_inode(inode.k), })); if (ret) return ret; struct inode_walker_entry *n = &darray_last(w->inodes); if (!n->whiteout) { return bch2_inode_unpack(inode, &n->inode); } else { n->inode.bi_inum = inode.k->p.offset; n->inode.bi_snapshot = inode.k->p.snapshot; return 0; } } static int get_inodes_all_snapshots(struct btree_trans *trans, struct inode_walker *w, u64 inum) { struct bch_fs *c = trans->c; struct btree_iter iter; struct bkey_s_c k; int ret; /* * We no longer have inodes for w->last_pos; clear this to avoid * screwing up check_i_sectors/check_subdir_count if we take a * transaction restart here: */ w->have_inodes = false; w->recalculate_sums = false; w->inodes.nr = 0; for_each_btree_key_max_norestart(trans, iter, BTREE_ID_inodes, POS(0, inum), SPOS(0, inum, U32_MAX), BTREE_ITER_all_snapshots, k, ret) { ret = add_inode(c, w, k); if (ret) break; } bch2_trans_iter_exit(trans, &iter); if (ret) return ret; w->first_this_inode = true; w->have_inodes = true; return 0; } static int get_visible_inodes(struct btree_trans *trans, struct inode_walker *w, struct snapshots_seen *s, u64 inum) { struct bch_fs *c = trans->c; struct btree_iter iter; struct bkey_s_c k; int ret; w->inodes.nr = 0; w->deletes.nr = 0; for_each_btree_key_reverse_norestart(trans, iter, BTREE_ID_inodes, SPOS(0, inum, s->pos.snapshot), BTREE_ITER_all_snapshots, k, ret) { if (k.k->p.offset != inum) break; if (!ref_visible(c, s, s->pos.snapshot, k.k->p.snapshot)) continue; if (snapshot_list_has_ancestor(c, &w->deletes, k.k->p.snapshot)) continue; ret = bkey_is_inode(k.k) ? add_inode(c, w, k) : snapshot_list_add(c, &w->deletes, k.k->p.snapshot); if (ret) break; } bch2_trans_iter_exit(trans, &iter); return ret; } static struct inode_walker_entry * lookup_inode_for_snapshot(struct btree_trans *trans, struct inode_walker *w, struct bkey_s_c k) { struct bch_fs *c = trans->c; struct inode_walker_entry *i = darray_find_p(w->inodes, i, bch2_snapshot_is_ancestor(c, k.k->p.snapshot, i->inode.bi_snapshot)); if (!i) return NULL; struct printbuf buf = PRINTBUF; int ret = 0; if (fsck_err_on(k.k->p.snapshot != i->inode.bi_snapshot, trans, snapshot_key_missing_inode_snapshot, "have key for inode %llu:%u but have inode in ancestor snapshot %u\n" "unexpected because we should always update the inode when we update a key in that inode\n" "%s", w->last_pos.inode, k.k->p.snapshot, i->inode.bi_snapshot, (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) { if (!i->whiteout) { struct bch_inode_unpacked new = i->inode; new.bi_snapshot = k.k->p.snapshot; ret = __bch2_fsck_write_inode(trans, &new); } else { struct bkey_i whiteout; bkey_init(&whiteout.k); whiteout.k.type = KEY_TYPE_whiteout; whiteout.k.p = SPOS(0, i->inode.bi_inum, k.k->p.snapshot); ret = bch2_btree_insert_nonextent(trans, BTREE_ID_inodes, &whiteout, BTREE_UPDATE_internal_snapshot_node); } if (ret) goto fsck_err; ret = bch2_trans_commit(trans, NULL, NULL, 0); if (ret) goto fsck_err; struct inode_walker_entry new_entry = *i; new_entry.inode.bi_snapshot = k.k->p.snapshot; new_entry.count = 0; new_entry.i_size = 0; while (i > w->inodes.data && i[-1].inode.bi_snapshot > k.k->p.snapshot) --i; size_t pos = i - w->inodes.data; ret = darray_insert_item(&w->inodes, pos, new_entry); if (ret) goto fsck_err; ret = bch_err_throw(c, transaction_restart_nested); goto fsck_err; } printbuf_exit(&buf); return i; fsck_err: printbuf_exit(&buf); return ERR_PTR(ret); } static struct inode_walker_entry *walk_inode(struct btree_trans *trans, struct inode_walker *w, struct bkey_s_c k) { if (w->last_pos.inode != k.k->p.inode) { int ret = get_inodes_all_snapshots(trans, w, k.k->p.inode); if (ret) return ERR_PTR(ret); } w->last_pos = k.k->p; return lookup_inode_for_snapshot(trans, w, k); } /* * Prefer to delete the first one, since that will be the one at the wrong * offset: * return value: 0 -> delete k1, 1 -> delete k2 */ int bch2_fsck_update_backpointers(struct btree_trans *trans, struct snapshots_seen *s, const struct bch_hash_desc desc, struct bch_hash_info *hash_info, struct bkey_i *new) { if (new->k.type != KEY_TYPE_dirent) return 0; struct bkey_i_dirent *d = bkey_i_to_dirent(new); struct inode_walker target = inode_walker_init(); int ret = 0; if (d->v.d_type == DT_SUBVOL) { bch_err(trans->c, "%s does not support DT_SUBVOL", __func__); ret = -BCH_ERR_fsck_repair_unimplemented; } else { ret = get_visible_inodes(trans, &target, s, le64_to_cpu(d->v.d_inum)); if (ret) goto err; darray_for_each(target.inodes, i) { i->inode.bi_dir_offset = d->k.p.offset; ret = __bch2_fsck_write_inode(trans, &i->inode); if (ret) goto err; } } err: inode_walker_exit(&target); return ret; } static struct bkey_s_c_dirent inode_get_dirent(struct btree_trans *trans, struct btree_iter *iter, struct bch_inode_unpacked *inode, u32 *snapshot) { if (inode->bi_subvol) { u64 inum; int ret = subvol_lookup(trans, inode->bi_parent_subvol, snapshot, &inum); if (ret) return ((struct bkey_s_c_dirent) { .k = ERR_PTR(ret) }); } return dirent_get_by_pos(trans, iter, SPOS(inode->bi_dir, inode->bi_dir_offset, *snapshot)); } static int check_inode_deleted_list(struct btree_trans *trans, struct bpos p) { struct btree_iter iter; struct bkey_s_c k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_deleted_inodes, p, 0); int ret = bkey_err(k) ?: k.k->type == KEY_TYPE_set; bch2_trans_iter_exit(trans, &iter); return ret; } static int check_inode_dirent_inode(struct btree_trans *trans, struct bch_inode_unpacked *inode, bool *write_inode) { struct bch_fs *c = trans->c; struct printbuf buf = PRINTBUF; u32 inode_snapshot = inode->bi_snapshot; struct btree_iter dirent_iter = {}; struct bkey_s_c_dirent d = inode_get_dirent(trans, &dirent_iter, inode, &inode_snapshot); int ret = bkey_err(d); if (ret && !bch2_err_matches(ret, ENOENT)) return ret; if ((ret || dirent_points_to_inode_nowarn(c, d, inode)) && inode->bi_subvol && (inode->bi_flags & BCH_INODE_has_child_snapshot)) { /* Older version of a renamed subvolume root: we won't have a * correct dirent for it. That's expected, see * inode_should_reattach(). * * We don't clear the backpointer field when doing the rename * because there might be arbitrarily many versions in older * snapshots. */ inode->bi_dir = 0; inode->bi_dir_offset = 0; *write_inode = true; goto out; } if (fsck_err_on(ret, trans, inode_points_to_missing_dirent, "inode points to missing dirent\n%s", (bch2_inode_unpacked_to_text(&buf, inode), buf.buf)) || fsck_err_on(!ret && dirent_points_to_inode_nowarn(c, d, inode), trans, inode_points_to_wrong_dirent, "%s", (printbuf_reset(&buf), dirent_inode_mismatch_msg(&buf, c, d, inode), buf.buf))) { /* * We just clear the backpointer fields for now. If we find a * dirent that points to this inode in check_dirents(), we'll * update it then; then when we get to check_path() if the * backpointer is still 0 we'll reattach it. */ inode->bi_dir = 0; inode->bi_dir_offset = 0; *write_inode = true; } out: ret = 0; fsck_err: bch2_trans_iter_exit(trans, &dirent_iter); printbuf_exit(&buf); bch_err_fn(c, ret); return ret; } static int check_inode(struct btree_trans *trans, struct btree_iter *iter, struct bkey_s_c k, struct bch_inode_unpacked *snapshot_root, struct snapshots_seen *s) { struct bch_fs *c = trans->c; struct printbuf buf = PRINTBUF; struct bch_inode_unpacked u; bool do_update = false; int ret; ret = bch2_check_key_has_snapshot(trans, iter, k); if (ret < 0) goto err; if (ret) return 0; ret = snapshots_seen_update(c, s, iter->btree_id, k.k->p); if (ret) goto err; if (!bkey_is_inode(k.k)) return 0; ret = bch2_inode_unpack(k, &u); if (ret) goto err; if (snapshot_root->bi_inum != u.bi_inum) { ret = bch2_inode_find_snapshot_root(trans, u.bi_inum, snapshot_root); if (ret) goto err; } if (u.bi_hash_seed != snapshot_root->bi_hash_seed || INODE_STR_HASH(&u) != INODE_STR_HASH(snapshot_root)) { ret = bch2_repair_inode_hash_info(trans, snapshot_root); BUG_ON(ret == -BCH_ERR_fsck_repair_unimplemented); if (ret) goto err; } ret = bch2_check_inode_has_case_insensitive(trans, &u, &s->ids, &do_update); if (ret) goto err; if (bch2_inode_has_backpointer(&u)) { ret = check_inode_dirent_inode(trans, &u, &do_update); if (ret) goto err; } if (fsck_err_on(bch2_inode_has_backpointer(&u) && (u.bi_flags & BCH_INODE_unlinked), trans, inode_unlinked_but_has_dirent, "inode unlinked but has dirent\n%s", (printbuf_reset(&buf), bch2_inode_unpacked_to_text(&buf, &u), buf.buf))) { u.bi_flags &= ~BCH_INODE_unlinked; do_update = true; } if (S_ISDIR(u.bi_mode) && (u.bi_flags & BCH_INODE_unlinked)) { /* Check for this early so that check_unreachable_inode() will reattach it */ ret = bch2_empty_dir_snapshot(trans, k.k->p.offset, 0, k.k->p.snapshot); if (ret && ret != -BCH_ERR_ENOTEMPTY_dir_not_empty) goto err; fsck_err_on(ret, trans, inode_dir_unlinked_but_not_empty, "dir unlinked but not empty\n%s", (printbuf_reset(&buf), bch2_inode_unpacked_to_text(&buf, &u), buf.buf)); u.bi_flags &= ~BCH_INODE_unlinked; do_update = true; ret = 0; } if (fsck_err_on(S_ISDIR(u.bi_mode) && u.bi_size, trans, inode_dir_has_nonzero_i_size, "directory %llu:%u with nonzero i_size %lli", u.bi_inum, u.bi_snapshot, u.bi_size)) { u.bi_size = 0; do_update = true; } ret = bch2_inode_has_child_snapshots(trans, k.k->p); if (ret < 0) goto err; if (fsck_err_on(ret != !!(u.bi_flags & BCH_INODE_has_child_snapshot), trans, inode_has_child_snapshots_wrong, "inode has_child_snapshots flag wrong (should be %u)\n%s", ret, (printbuf_reset(&buf), bch2_inode_unpacked_to_text(&buf, &u), buf.buf))) { if (ret) u.bi_flags |= BCH_INODE_has_child_snapshot; else u.bi_flags &= ~BCH_INODE_has_child_snapshot; do_update = true; } ret = 0; if ((u.bi_flags & BCH_INODE_unlinked) && !(u.bi_flags & BCH_INODE_has_child_snapshot)) { if (!test_bit(BCH_FS_started, &c->flags)) { /* * If we're not in online fsck, don't delete unlinked * inodes, just make sure they're on the deleted list. * * They might be referred to by a logged operation - * i.e. we might have crashed in the middle of a * truncate on an unlinked but open file - so we want to * let the delete_dead_inodes kill it after resuming * logged ops. */ ret = check_inode_deleted_list(trans, k.k->p); if (ret < 0) goto err_noprint; fsck_err_on(!ret, trans, unlinked_inode_not_on_deleted_list, "inode %llu:%u unlinked, but not on deleted list", u.bi_inum, k.k->p.snapshot); ret = bch2_btree_bit_mod_buffered(trans, BTREE_ID_deleted_inodes, k.k->p, 1); if (ret) goto err; } else { ret = bch2_inode_or_descendents_is_open(trans, k.k->p); if (ret < 0) goto err; if (fsck_err_on(!ret, trans, inode_unlinked_and_not_open, "inode %llu:%u unlinked and not open", u.bi_inum, u.bi_snapshot)) { ret = bch2_inode_rm_snapshot(trans, u.bi_inum, iter->pos.snapshot); bch_err_msg(c, ret, "in fsck deleting inode"); goto err_noprint; } ret = 0; } } if (fsck_err_on(u.bi_parent_subvol && (u.bi_subvol == 0 || u.bi_subvol == BCACHEFS_ROOT_SUBVOL), trans, inode_bi_parent_nonzero, "inode %llu:%u has subvol %u but nonzero parent subvol %u", u.bi_inum, k.k->p.snapshot, u.bi_subvol, u.bi_parent_subvol)) { u.bi_parent_subvol = 0; do_update = true; } if (u.bi_subvol) { struct bch_subvolume s; ret = bch2_subvolume_get(trans, u.bi_subvol, false, &s); if (ret && !bch2_err_matches(ret, ENOENT)) goto err; if (ret && (c->sb.btrees_lost_data & BIT_ULL(BTREE_ID_subvolumes))) { ret = reconstruct_subvol(trans, k.k->p.snapshot, u.bi_subvol, u.bi_inum); goto do_update; } if (fsck_err_on(ret, trans, inode_bi_subvol_missing, "inode %llu:%u bi_subvol points to missing subvolume %u", u.bi_inum, k.k->p.snapshot, u.bi_subvol) || fsck_err_on(le64_to_cpu(s.inode) != u.bi_inum || !bch2_snapshot_is_ancestor(c, le32_to_cpu(s.snapshot), k.k->p.snapshot), trans, inode_bi_subvol_wrong, "inode %llu:%u points to subvol %u, but subvol points to %llu:%u", u.bi_inum, k.k->p.snapshot, u.bi_subvol, le64_to_cpu(s.inode), le32_to_cpu(s.snapshot))) { u.bi_subvol = 0; u.bi_parent_subvol = 0; do_update = true; } } if (fsck_err_on(u.bi_journal_seq > journal_cur_seq(&c->journal), trans, inode_journal_seq_in_future, "inode journal seq in future (currently at %llu)\n%s", journal_cur_seq(&c->journal), (printbuf_reset(&buf), bch2_inode_unpacked_to_text(&buf, &u), buf.buf))) { u.bi_journal_seq = journal_cur_seq(&c->journal); do_update = true; } do_update: if (do_update) { ret = __bch2_fsck_write_inode(trans, &u); bch_err_msg(c, ret, "in fsck updating inode"); if (ret) goto err_noprint; } err: fsck_err: bch_err_fn(c, ret); err_noprint: printbuf_exit(&buf); return ret; } int bch2_check_inodes(struct bch_fs *c) { struct bch_inode_unpacked snapshot_root = {}; struct snapshots_seen s; snapshots_seen_init(&s); int ret = bch2_trans_run(c, for_each_btree_key_commit(trans, iter, BTREE_ID_inodes, POS_MIN, BTREE_ITER_prefetch|BTREE_ITER_all_snapshots, k, NULL, NULL, BCH_TRANS_COMMIT_no_enospc, check_inode(trans, &iter, k, &snapshot_root, &s))); snapshots_seen_exit(&s); bch_err_fn(c, ret); return ret; } static int find_oldest_inode_needs_reattach(struct btree_trans *trans, struct bch_inode_unpacked *inode) { struct bch_fs *c = trans->c; struct btree_iter iter; struct bkey_s_c k; int ret = 0; /* * We look for inodes to reattach in natural key order, leaves first, * but we should do the reattach at the oldest version that needs to be * reattached: */ for_each_btree_key_norestart(trans, iter, BTREE_ID_inodes, SPOS(0, inode->bi_inum, inode->bi_snapshot + 1), BTREE_ITER_all_snapshots, k, ret) { if (k.k->p.offset != inode->bi_inum) break; if (!bch2_snapshot_is_ancestor(c, inode->bi_snapshot, k.k->p.snapshot)) continue; if (!bkey_is_inode(k.k)) break; struct bch_inode_unpacked parent_inode; ret = bch2_inode_unpack(k, &parent_inode); if (ret) break; if (!inode_should_reattach(&parent_inode)) break; *inode = parent_inode; } bch2_trans_iter_exit(trans, &iter); return ret; } static int check_unreachable_inode(struct btree_trans *trans, struct btree_iter *iter, struct bkey_s_c k) { struct printbuf buf = PRINTBUF; int ret = 0; if (!bkey_is_inode(k.k)) return 0; struct bch_inode_unpacked inode; ret = bch2_inode_unpack(k, &inode); if (ret) return ret; if (!inode_should_reattach(&inode)) return 0; ret = find_oldest_inode_needs_reattach(trans, &inode); if (ret) return ret; if (fsck_err(trans, inode_unreachable, "unreachable inode:\n%s", (bch2_inode_unpacked_to_text(&buf, &inode), buf.buf))) ret = reattach_inode(trans, &inode); fsck_err: printbuf_exit(&buf); return ret; } /* * Reattach unreachable (but not unlinked) inodes * * Run after check_inodes() and check_dirents(), so we node that inode * backpointer fields point to valid dirents, and every inode that has a dirent * that points to it has its backpointer field set - so we're just looking for * non-unlinked inodes without backpointers: * * XXX: this is racy w.r.t. hardlink removal in online fsck */ int bch2_check_unreachable_inodes(struct bch_fs *c) { int ret = bch2_trans_run(c, for_each_btree_key_commit(trans, iter, BTREE_ID_inodes, POS_MIN, BTREE_ITER_prefetch|BTREE_ITER_all_snapshots, k, NULL, NULL, BCH_TRANS_COMMIT_no_enospc, check_unreachable_inode(trans, &iter, k))); bch_err_fn(c, ret); return ret; } static inline bool btree_matches_i_mode(enum btree_id btree, unsigned mode) { switch (btree) { case BTREE_ID_extents: return S_ISREG(mode) || S_ISLNK(mode); case BTREE_ID_dirents: return S_ISDIR(mode); case BTREE_ID_xattrs: return true; default: BUG(); } } static int check_key_has_inode(struct btree_trans *trans, struct btree_iter *iter, struct inode_walker *inode, struct inode_walker_entry *i, struct bkey_s_c k) { struct bch_fs *c = trans->c; struct printbuf buf = PRINTBUF; struct btree_iter iter2 = {}; int ret = PTR_ERR_OR_ZERO(i); if (ret) return ret; if (k.k->type == KEY_TYPE_whiteout) goto out; bool have_inode = i && !i->whiteout; if (!have_inode && (c->sb.btrees_lost_data & BIT_ULL(BTREE_ID_inodes))) goto reconstruct; if (have_inode && btree_matches_i_mode(iter->btree_id, i->inode.bi_mode)) goto out; prt_printf(&buf, ", "); bool have_old_inode = false; darray_for_each(inode->inodes, i2) if (!i2->whiteout && bch2_snapshot_is_ancestor(c, k.k->p.snapshot, i2->inode.bi_snapshot) && btree_matches_i_mode(iter->btree_id, i2->inode.bi_mode)) { prt_printf(&buf, "but found good inode in older snapshot\n"); bch2_inode_unpacked_to_text(&buf, &i2->inode); prt_newline(&buf); have_old_inode = true; break; } struct bkey_s_c k2; unsigned nr_keys = 0; prt_printf(&buf, "found keys:\n"); for_each_btree_key_max_norestart(trans, iter2, iter->btree_id, SPOS(k.k->p.inode, 0, k.k->p.snapshot), POS(k.k->p.inode, U64_MAX), 0, k2, ret) { nr_keys++; if (nr_keys <= 10) { bch2_bkey_val_to_text(&buf, c, k2); prt_newline(&buf); } if (nr_keys >= 100) break; } if (ret) goto err; if (nr_keys > 100) prt_printf(&buf, "found > %u keys for this missing inode\n", nr_keys); else if (nr_keys > 10) prt_printf(&buf, "found %u keys for this missing inode\n", nr_keys); if (!have_inode) { if (fsck_err_on(!have_inode, trans, key_in_missing_inode, "key in missing inode%s", buf.buf)) { /* * Maybe a deletion that raced with data move, or something * weird like that? But if we know the inode was deleted, or * it's just a few keys, we can safely delete them. * * If it's many keys, we should probably recreate the inode */ if (have_old_inode || nr_keys <= 2) goto delete; else goto reconstruct; } } else { /* * not autofix, this one would be a giant wtf - bit error in the * inode corrupting i_mode? * * may want to try repairing inode instead of deleting */ if (fsck_err_on(!btree_matches_i_mode(iter->btree_id, i->inode.bi_mode), trans, key_in_wrong_inode_type, "key for wrong inode mode %o%s", i->inode.bi_mode, buf.buf)) goto delete; } out: err: fsck_err: bch2_trans_iter_exit(trans, &iter2); printbuf_exit(&buf); bch_err_fn(c, ret); return ret; delete: /* * XXX: print out more info * count up extents for this inode, check if we have different inode in * an older snapshot version, perhaps decide if we want to reconstitute */ ret = bch2_btree_delete_at(trans, iter, BTREE_UPDATE_internal_snapshot_node); goto out; reconstruct: ret = reconstruct_inode(trans, iter->btree_id, k.k->p.snapshot, k.k->p.inode) ?: bch2_trans_commit(trans, NULL, NULL, BCH_TRANS_COMMIT_no_enospc); if (ret) goto err; inode->last_pos.inode--; ret = bch_err_throw(c, transaction_restart_nested); goto out; } static int check_i_sectors_notnested(struct btree_trans *trans, struct inode_walker *w) { struct bch_fs *c = trans->c; int ret = 0; s64 count2; darray_for_each(w->inodes, i) { if (i->inode.bi_sectors == i->count) continue; count2 = bch2_count_inode_sectors(trans, w->last_pos.inode, i->inode.bi_snapshot); if (w->recalculate_sums) i->count = count2; if (i->count != count2) { bch_err_ratelimited(c, "fsck counted i_sectors wrong for inode %llu:%u: got %llu should be %llu", w->last_pos.inode, i->inode.bi_snapshot, i->count, count2); i->count = count2; } if (fsck_err_on(!(i->inode.bi_flags & BCH_INODE_i_sectors_dirty), trans, inode_i_sectors_wrong, "inode %llu:%u has incorrect i_sectors: got %llu, should be %llu", w->last_pos.inode, i->inode.bi_snapshot, i->inode.bi_sectors, i->count)) { i->inode.bi_sectors = i->count; ret = bch2_fsck_write_inode(trans, &i->inode); if (ret) break; } } fsck_err: bch_err_fn(c, ret); return ret; } static int check_i_sectors(struct btree_trans *trans, struct inode_walker *w) { u32 restart_count = trans->restart_count; return check_i_sectors_notnested(trans, w) ?: trans_was_restarted(trans, restart_count); } struct extent_end { u32 snapshot; u64 offset; struct snapshots_seen seen; }; struct extent_ends { struct bpos last_pos; DARRAY(struct extent_end) e; }; static void extent_ends_reset(struct extent_ends *extent_ends) { darray_for_each(extent_ends->e, i) snapshots_seen_exit(&i->seen); extent_ends->e.nr = 0; } static void extent_ends_exit(struct extent_ends *extent_ends) { extent_ends_reset(extent_ends); darray_exit(&extent_ends->e); } static void extent_ends_init(struct extent_ends *extent_ends) { memset(extent_ends, 0, sizeof(*extent_ends)); } static int extent_ends_at(struct bch_fs *c, struct extent_ends *extent_ends, struct snapshots_seen *seen, struct bkey_s_c k) { struct extent_end *i, n = (struct extent_end) { .offset = k.k->p.offset, .snapshot = k.k->p.snapshot, .seen = *seen, }; n.seen.ids.data = kmemdup(seen->ids.data, sizeof(seen->ids.data[0]) * seen->ids.size, GFP_KERNEL); if (!n.seen.ids.data) return bch_err_throw(c, ENOMEM_fsck_extent_ends_at); __darray_for_each(extent_ends->e, i) { if (i->snapshot == k.k->p.snapshot) { snapshots_seen_exit(&i->seen); *i = n; return 0; } if (i->snapshot >= k.k->p.snapshot) break; } return darray_insert_item(&extent_ends->e, i - extent_ends->e.data, n); } static int overlapping_extents_found(struct btree_trans *trans, enum btree_id btree, struct bpos pos1, struct snapshots_seen *pos1_seen, struct bkey pos2, bool *fixed, struct extent_end *extent_end) { struct bch_fs *c = trans->c; struct printbuf buf = PRINTBUF; struct btree_iter iter1, iter2 = {}; struct bkey_s_c k1, k2; int ret; BUG_ON(bkey_le(pos1, bkey_start_pos(&pos2))); bch2_trans_iter_init(trans, &iter1, btree, pos1, BTREE_ITER_all_snapshots| BTREE_ITER_not_extents); k1 = bch2_btree_iter_peek_max(trans, &iter1, POS(pos1.inode, U64_MAX)); ret = bkey_err(k1); if (ret) goto err; prt_newline(&buf); bch2_bkey_val_to_text(&buf, c, k1); if (!bpos_eq(pos1, k1.k->p)) { prt_str(&buf, "\nwanted\n "); bch2_bpos_to_text(&buf, pos1); prt_str(&buf, "\n"); bch2_bkey_to_text(&buf, &pos2); bch_err(c, "%s: error finding first overlapping extent when repairing, got%s", __func__, buf.buf); ret = bch_err_throw(c, internal_fsck_err); goto err; } bch2_trans_copy_iter(trans, &iter2, &iter1); while (1) { bch2_btree_iter_advance(trans, &iter2); k2 = bch2_btree_iter_peek_max(trans, &iter2, POS(pos1.inode, U64_MAX)); ret = bkey_err(k2); if (ret) goto err; if (bpos_ge(k2.k->p, pos2.p)) break; } prt_newline(&buf); bch2_bkey_val_to_text(&buf, c, k2); if (bpos_gt(k2.k->p, pos2.p) || pos2.size != k2.k->size) { bch_err(c, "%s: error finding seconding overlapping extent when repairing%s", __func__, buf.buf); ret = bch_err_throw(c, internal_fsck_err); goto err; } prt_printf(&buf, "\noverwriting %s extent", pos1.snapshot >= pos2.p.snapshot ? "first" : "second"); if (fsck_err(trans, extent_overlapping, "overlapping extents%s", buf.buf)) { struct btree_iter *old_iter = &iter1; struct disk_reservation res = { 0 }; if (pos1.snapshot < pos2.p.snapshot) { old_iter = &iter2; swap(k1, k2); } trans->extra_disk_res += bch2_bkey_sectors_compressed(k2); ret = bch2_trans_update_extent_overwrite(trans, old_iter, BTREE_UPDATE_internal_snapshot_node, k1, k2) ?: bch2_trans_commit(trans, &res, NULL, BCH_TRANS_COMMIT_no_enospc); bch2_disk_reservation_put(c, &res); bch_info(c, "repair ret %s", bch2_err_str(ret)); if (ret) goto err; *fixed = true; if (pos1.snapshot == pos2.p.snapshot) { /* * We overwrote the first extent, and did the overwrite * in the same snapshot: */ extent_end->offset = bkey_start_offset(&pos2); } else if (pos1.snapshot > pos2.p.snapshot) { /* * We overwrote the first extent in pos2's snapshot: */ ret = snapshots_seen_add_inorder(c, pos1_seen, pos2.p.snapshot); } else { /* * We overwrote the second extent - restart * check_extent() from the top: */ ret = bch_err_throw(c, transaction_restart_nested); } } fsck_err: err: bch2_trans_iter_exit(trans, &iter2); bch2_trans_iter_exit(trans, &iter1); printbuf_exit(&buf); return ret; } static int check_overlapping_extents(struct btree_trans *trans, struct snapshots_seen *seen, struct extent_ends *extent_ends, struct bkey_s_c k, struct btree_iter *iter, bool *fixed) { struct bch_fs *c = trans->c; int ret = 0; /* transaction restart, running again */ if (bpos_eq(extent_ends->last_pos, k.k->p)) return 0; if (extent_ends->last_pos.inode != k.k->p.inode) extent_ends_reset(extent_ends); darray_for_each(extent_ends->e, i) { if (i->offset <= bkey_start_offset(k.k)) continue; if (!ref_visible2(c, k.k->p.snapshot, seen, i->snapshot, &i->seen)) continue; ret = overlapping_extents_found(trans, iter->btree_id, SPOS(iter->pos.inode, i->offset, i->snapshot), &i->seen, *k.k, fixed, i); if (ret) goto err; } extent_ends->last_pos = k.k->p; err: return ret; } static int check_extent_overbig(struct btree_trans *trans, struct btree_iter *iter, struct bkey_s_c k) { struct bch_fs *c = trans->c; struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k); struct bch_extent_crc_unpacked crc; const union bch_extent_entry *i; unsigned encoded_extent_max_sectors = c->opts.encoded_extent_max >> 9; bkey_for_each_crc(k.k, ptrs, crc, i) if (crc_is_encoded(crc) && crc.uncompressed_size > encoded_extent_max_sectors) { struct printbuf buf = PRINTBUF; bch2_bkey_val_to_text(&buf, c, k); bch_err(c, "overbig encoded extent, please report this:\n %s", buf.buf); printbuf_exit(&buf); } return 0; } static int check_extent(struct btree_trans *trans, struct btree_iter *iter, struct bkey_s_c k, struct inode_walker *inode, struct snapshots_seen *s, struct extent_ends *extent_ends, struct disk_reservation *res) { struct bch_fs *c = trans->c; struct printbuf buf = PRINTBUF; int ret = 0; ret = bch2_check_key_has_snapshot(trans, iter, k); if (ret) { ret = ret < 0 ? ret : 0; goto out; } if (inode->last_pos.inode != k.k->p.inode && inode->have_inodes) { ret = check_i_sectors(trans, inode); if (ret) goto err; } ret = snapshots_seen_update(c, s, iter->btree_id, k.k->p); if (ret) goto err; struct inode_walker_entry *extent_i = walk_inode(trans, inode, k); ret = PTR_ERR_OR_ZERO(extent_i); if (ret) goto err; ret = check_key_has_inode(trans, iter, inode, extent_i, k); if (ret) goto err; if (k.k->type != KEY_TYPE_whiteout) { ret = check_overlapping_extents(trans, s, extent_ends, k, iter, &inode->recalculate_sums); if (ret) goto err; /* * Check inodes in reverse order, from oldest snapshots to * newest, starting from the inode that matches this extent's * snapshot. If we didn't have one, iterate over all inodes: */ for (struct inode_walker_entry *i = extent_i ?: &darray_last(inode->inodes); inode->inodes.data && i >= inode->inodes.data; --i) { if (i->inode.bi_snapshot > k.k->p.snapshot || !key_visible_in_snapshot(c, s, i->inode.bi_snapshot, k.k->p.snapshot)) continue; u64 last_block = round_up(i->inode.bi_size, block_bytes(c)) >> 9; if (fsck_err_on(k.k->p.offset > last_block && !bkey_extent_is_reservation(k), trans, extent_past_end_of_inode, "extent type past end of inode %llu:%u, i_size %llu\n%s", i->inode.bi_inum, i->inode.bi_snapshot, i->inode.bi_size, (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) { ret = snapshots_seen_add_inorder(c, s, i->inode.bi_snapshot) ?: bch2_fpunch_snapshot(trans, SPOS(i->inode.bi_inum, last_block, i->inode.bi_snapshot), POS(i->inode.bi_inum, U64_MAX)); if (ret) goto err; iter->k.type = KEY_TYPE_whiteout; break; } } } ret = bch2_trans_commit(trans, res, NULL, BCH_TRANS_COMMIT_no_enospc); if (ret) goto err; if (bkey_extent_is_allocation(k.k)) { for (struct inode_walker_entry *i = extent_i ?: &darray_last(inode->inodes); inode->inodes.data && i >= inode->inodes.data; --i) { if (i->whiteout || i->inode.bi_snapshot > k.k->p.snapshot || !key_visible_in_snapshot(c, s, i->inode.bi_snapshot, k.k->p.snapshot)) continue; i->count += k.k->size; } } if (k.k->type != KEY_TYPE_whiteout) { ret = extent_ends_at(c, extent_ends, s, k); if (ret) goto err; } out: err: fsck_err: printbuf_exit(&buf); bch_err_fn(c, ret); return ret; } /* * Walk extents: verify that extents have a corresponding S_ISREG inode, and * that i_size an i_sectors are consistent */ int bch2_check_extents(struct bch_fs *c) { struct inode_walker w = inode_walker_init(); struct snapshots_seen s; struct extent_ends extent_ends; struct disk_reservation res = { 0 }; snapshots_seen_init(&s); extent_ends_init(&extent_ends); int ret = bch2_trans_run(c, for_each_btree_key(trans, iter, BTREE_ID_extents, POS(BCACHEFS_ROOT_INO, 0), BTREE_ITER_prefetch|BTREE_ITER_all_snapshots, k, ({ bch2_disk_reservation_put(c, &res); check_extent(trans, &iter, k, &w, &s, &extent_ends, &res) ?: check_extent_overbig(trans, &iter, k); })) ?: check_i_sectors_notnested(trans, &w)); bch2_disk_reservation_put(c, &res); extent_ends_exit(&extent_ends); inode_walker_exit(&w); snapshots_seen_exit(&s); bch_err_fn(c, ret); return ret; } int bch2_check_indirect_extents(struct bch_fs *c) { struct disk_reservation res = { 0 }; int ret = bch2_trans_run(c, for_each_btree_key_commit(trans, iter, BTREE_ID_reflink, POS_MIN, BTREE_ITER_prefetch, k, &res, NULL, BCH_TRANS_COMMIT_no_enospc, ({ bch2_disk_reservation_put(c, &res); check_extent_overbig(trans, &iter, k); }))); bch2_disk_reservation_put(c, &res); bch_err_fn(c, ret); return ret; } static int check_subdir_count_notnested(struct btree_trans *trans, struct inode_walker *w) { struct bch_fs *c = trans->c; int ret = 0; s64 count2; darray_for_each(w->inodes, i) { if (i->inode.bi_nlink == i->count) continue; count2 = bch2_count_subdirs(trans, w->last_pos.inode, i->inode.bi_snapshot); if (count2 < 0) return count2; if (i->count != count2) { bch_err_ratelimited(c, "fsck counted subdirectories wrong for inum %llu:%u: got %llu should be %llu", w->last_pos.inode, i->inode.bi_snapshot, i->count, count2); i->count = count2; if (i->inode.bi_nlink == i->count) continue; } if (i->inode.bi_nlink != i->count) { CLASS(printbuf, buf)(); lockrestart_do(trans, bch2_inum_snapshot_to_path(trans, w->last_pos.inode, i->inode.bi_snapshot, NULL, &buf)); if (fsck_err_on(i->inode.bi_nlink != i->count, trans, inode_dir_wrong_nlink, "directory with wrong i_nlink: got %u, should be %llu\n%s", i->inode.bi_nlink, i->count, buf.buf)) { i->inode.bi_nlink = i->count; ret = bch2_fsck_write_inode(trans, &i->inode); if (ret) break; } } } fsck_err: bch_err_fn(c, ret); return ret; } static int check_subdir_dirents_count(struct btree_trans *trans, struct inode_walker *w) { u32 restart_count = trans->restart_count; return check_subdir_count_notnested(trans, w) ?: trans_was_restarted(trans, restart_count); } /* find a subvolume that's a descendent of @snapshot: */ static int find_snapshot_subvol(struct btree_trans *trans, u32 snapshot, u32 *subvolid) { struct btree_iter iter; struct bkey_s_c k; int ret; for_each_btree_key_norestart(trans, iter, BTREE_ID_subvolumes, POS_MIN, 0, k, ret) { if (k.k->type != KEY_TYPE_subvolume) continue; struct bkey_s_c_subvolume s = bkey_s_c_to_subvolume(k); if (bch2_snapshot_is_ancestor(trans->c, le32_to_cpu(s.v->snapshot), snapshot)) { bch2_trans_iter_exit(trans, &iter); *subvolid = k.k->p.offset; goto found; } } if (!ret) ret = -ENOENT; found: bch2_trans_iter_exit(trans, &iter); return ret; } noinline_for_stack static int check_dirent_to_subvol(struct btree_trans *trans, struct btree_iter *iter, struct bkey_s_c_dirent d) { struct bch_fs *c = trans->c; struct btree_iter subvol_iter = {}; struct bch_inode_unpacked subvol_root; u32 parent_subvol = le32_to_cpu(d.v->d_parent_subvol); u32 target_subvol = le32_to_cpu(d.v->d_child_subvol); u32 parent_snapshot; u32 new_parent_subvol = 0; u64 parent_inum; struct printbuf buf = PRINTBUF; int ret = 0; ret = subvol_lookup(trans, parent_subvol, &parent_snapshot, &parent_inum); if (ret && !bch2_err_matches(ret, ENOENT)) return ret; if (ret || (!ret && !bch2_snapshot_is_ancestor(c, parent_snapshot, d.k->p.snapshot))) { int ret2 = find_snapshot_subvol(trans, d.k->p.snapshot, &new_parent_subvol); if (ret2 && !bch2_err_matches(ret, ENOENT)) return ret2; } if (ret && !new_parent_subvol && (c->sb.btrees_lost_data & BIT_ULL(BTREE_ID_subvolumes))) { /* * Couldn't find a subvol for dirent's snapshot - but we lost * subvols, so we need to reconstruct: */ ret = reconstruct_subvol(trans, d.k->p.snapshot, parent_subvol, 0); if (ret) return ret; parent_snapshot = d.k->p.snapshot; } if (fsck_err_on(ret, trans, dirent_to_missing_parent_subvol, "dirent parent_subvol points to missing subvolume\n%s", (bch2_bkey_val_to_text(&buf, c, d.s_c), buf.buf)) || fsck_err_on(!ret && !bch2_snapshot_is_ancestor(c, parent_snapshot, d.k->p.snapshot), trans, dirent_not_visible_in_parent_subvol, "dirent not visible in parent_subvol (not an ancestor of subvol snap %u)\n%s", parent_snapshot, (bch2_bkey_val_to_text(&buf, c, d.s_c), buf.buf))) { if (!new_parent_subvol) { bch_err(c, "could not find a subvol for snapshot %u", d.k->p.snapshot); return bch_err_throw(c, fsck_repair_unimplemented); } struct bkey_i_dirent *new_dirent = bch2_bkey_make_mut_typed(trans, iter, &d.s_c, 0, dirent); ret = PTR_ERR_OR_ZERO(new_dirent); if (ret) goto err; new_dirent->v.d_parent_subvol = cpu_to_le32(new_parent_subvol); } struct bkey_s_c_subvolume s = bch2_bkey_get_iter_typed(trans, &subvol_iter, BTREE_ID_subvolumes, POS(0, target_subvol), 0, subvolume); ret = bkey_err(s.s_c); if (ret && !bch2_err_matches(ret, ENOENT)) goto err; if (ret) { if (fsck_err(trans, dirent_to_missing_subvol, "dirent points to missing subvolume\n%s", (bch2_bkey_val_to_text(&buf, c, d.s_c), buf.buf))) return bch2_fsck_remove_dirent(trans, d.k->p); ret = 0; goto out; } if (le32_to_cpu(s.v->fs_path_parent) != parent_subvol) { printbuf_reset(&buf); prt_printf(&buf, "subvol with wrong fs_path_parent, should be be %u\n", parent_subvol); ret = bch2_inum_to_path(trans, (subvol_inum) { s.k->p.offset, le64_to_cpu(s.v->inode) }, &buf); if (ret) goto err; prt_newline(&buf); bch2_bkey_val_to_text(&buf, c, s.s_c); if (fsck_err(trans, subvol_fs_path_parent_wrong, "%s", buf.buf)) { struct bkey_i_subvolume *n = bch2_bkey_make_mut_typed(trans, &subvol_iter, &s.s_c, 0, subvolume); ret = PTR_ERR_OR_ZERO(n); if (ret) goto err; n->v.fs_path_parent = cpu_to_le32(parent_subvol); } } u64 target_inum = le64_to_cpu(s.v->inode); u32 target_snapshot = le32_to_cpu(s.v->snapshot); ret = bch2_inode_find_by_inum_snapshot(trans, target_inum, target_snapshot, &subvol_root, 0); if (ret && !bch2_err_matches(ret, ENOENT)) goto err; if (ret) { bch_err(c, "subvol %u points to missing inode root %llu", target_subvol, target_inum); ret = bch_err_throw(c, fsck_repair_unimplemented); goto err; } if (fsck_err_on(!ret && parent_subvol != subvol_root.bi_parent_subvol, trans, inode_bi_parent_wrong, "subvol root %llu has wrong bi_parent_subvol: got %u, should be %u", target_inum, subvol_root.bi_parent_subvol, parent_subvol)) { subvol_root.bi_parent_subvol = parent_subvol; subvol_root.bi_snapshot = le32_to_cpu(s.v->snapshot); ret = __bch2_fsck_write_inode(trans, &subvol_root); if (ret) goto err; } ret = bch2_check_dirent_target(trans, iter, d, &subvol_root, true); if (ret) goto err; out: err: fsck_err: bch2_trans_iter_exit(trans, &subvol_iter); printbuf_exit(&buf); return ret; } static int check_dirent(struct btree_trans *trans, struct btree_iter *iter, struct bkey_s_c k, struct bch_hash_info *hash_info, struct inode_walker *dir, struct inode_walker *target, struct snapshots_seen *s, bool *need_second_pass) { struct bch_fs *c = trans->c; struct inode_walker_entry *i; struct printbuf buf = PRINTBUF; int ret = 0; ret = bch2_check_key_has_snapshot(trans, iter, k); if (ret) { ret = ret < 0 ? ret : 0; goto out; } ret = snapshots_seen_update(c, s, iter->btree_id, k.k->p); if (ret) goto err; if (k.k->type == KEY_TYPE_whiteout) goto out; if (dir->last_pos.inode != k.k->p.inode && dir->have_inodes) { ret = check_subdir_dirents_count(trans, dir); if (ret) goto err; } i = walk_inode(trans, dir, k); ret = PTR_ERR_OR_ZERO(i); if (ret < 0) goto err; ret = check_key_has_inode(trans, iter, dir, i, k); if (ret) goto err; if (!i || i->whiteout) goto out; if (dir->first_this_inode) *hash_info = bch2_hash_info_init(c, &i->inode); dir->first_this_inode = false; hash_info->cf_encoding = bch2_inode_casefold(c, &i->inode) ? c->cf_encoding : NULL; ret = bch2_str_hash_check_key(trans, s, &bch2_dirent_hash_desc, hash_info, iter, k, need_second_pass); if (ret < 0) goto err; if (ret) { /* dirent has been deleted */ ret = 0; goto out; } if (k.k->type != KEY_TYPE_dirent) goto out; struct bkey_s_c_dirent d = bkey_s_c_to_dirent(k); /* check casefold */ if (fsck_err_on(d.v->d_casefold != !!hash_info->cf_encoding, trans, dirent_casefold_mismatch, "dirent casefold does not match dir casefold\n%s", (printbuf_reset(&buf), bch2_bkey_val_to_text(&buf, c, k), buf.buf))) { subvol_inum dir_inum = { .subvol = d.v->d_type == DT_SUBVOL ? le32_to_cpu(d.v->d_parent_subvol) : 0, }; u64 target = d.v->d_type == DT_SUBVOL ? le32_to_cpu(d.v->d_child_subvol) : le64_to_cpu(d.v->d_inum); struct qstr name = bch2_dirent_get_name(d); struct bkey_i_dirent *new_d = bch2_dirent_create_key(trans, hash_info, dir_inum, d.v->d_type, &name, NULL, target); ret = PTR_ERR_OR_ZERO(new_d); if (ret) goto out; new_d->k.p.inode = d.k->p.inode; new_d->k.p.snapshot = d.k->p.snapshot; struct btree_iter dup_iter = {}; ret = bch2_hash_delete_at(trans, bch2_dirent_hash_desc, hash_info, iter, BTREE_UPDATE_internal_snapshot_node) ?: bch2_str_hash_repair_key(trans, s, &bch2_dirent_hash_desc, hash_info, iter, bkey_i_to_s_c(&new_d->k_i), &dup_iter, bkey_s_c_null, need_second_pass); goto out; } if (d.v->d_type == DT_SUBVOL) { ret = check_dirent_to_subvol(trans, iter, d); if (ret) goto err; } else { ret = get_visible_inodes(trans, target, s, le64_to_cpu(d.v->d_inum)); if (ret) goto err; if (fsck_err_on(!target->inodes.nr, trans, dirent_to_missing_inode, "dirent points to missing inode:\n%s", (printbuf_reset(&buf), bch2_bkey_val_to_text(&buf, c, k), buf.buf))) { ret = bch2_fsck_remove_dirent(trans, d.k->p); if (ret) goto err; } darray_for_each(target->inodes, i) { ret = bch2_check_dirent_target(trans, iter, d, &i->inode, true); if (ret) goto err; } darray_for_each(target->deletes, i) if (fsck_err_on(!snapshot_list_has_id(&s->ids, *i), trans, dirent_to_overwritten_inode, "dirent points to inode overwritten in snapshot %u:\n%s", *i, (printbuf_reset(&buf), bch2_bkey_val_to_text(&buf, c, k), buf.buf))) { struct btree_iter delete_iter; bch2_trans_iter_init(trans, &delete_iter, BTREE_ID_dirents, SPOS(k.k->p.inode, k.k->p.offset, *i), BTREE_ITER_intent); ret = bch2_btree_iter_traverse(trans, &delete_iter) ?: bch2_hash_delete_at(trans, bch2_dirent_hash_desc, hash_info, &delete_iter, BTREE_UPDATE_internal_snapshot_node); bch2_trans_iter_exit(trans, &delete_iter); if (ret) goto err; } } ret = bch2_trans_commit(trans, NULL, NULL, BCH_TRANS_COMMIT_no_enospc); if (ret) goto err; for_each_visible_inode(c, s, dir, d.k->p.snapshot, i) { if (d.v->d_type == DT_DIR) i->count++; i->i_size += bkey_bytes(d.k); } out: err: fsck_err: printbuf_exit(&buf); return ret; } /* * Walk dirents: verify that they all have a corresponding S_ISDIR inode, * validate d_type */ int bch2_check_dirents(struct bch_fs *c) { struct inode_walker dir = inode_walker_init(); struct inode_walker target = inode_walker_init(); struct snapshots_seen s; struct bch_hash_info hash_info; bool need_second_pass = false, did_second_pass = false; int ret; snapshots_seen_init(&s); again: ret = bch2_trans_run(c, for_each_btree_key_commit(trans, iter, BTREE_ID_dirents, POS(BCACHEFS_ROOT_INO, 0), BTREE_ITER_prefetch|BTREE_ITER_all_snapshots, k, NULL, NULL, BCH_TRANS_COMMIT_no_enospc, check_dirent(trans, &iter, k, &hash_info, &dir, &target, &s, &need_second_pass)) ?: check_subdir_count_notnested(trans, &dir)); if (!ret && need_second_pass && !did_second_pass) { bch_info(c, "check_dirents requires second pass"); swap(did_second_pass, need_second_pass); goto again; } if (!ret && need_second_pass) { bch_err(c, "dirents not repairing"); ret = -EINVAL; } snapshots_seen_exit(&s); inode_walker_exit(&dir); inode_walker_exit(&target); bch_err_fn(c, ret); return ret; } static int check_xattr(struct btree_trans *trans, struct btree_iter *iter, struct bkey_s_c k, struct bch_hash_info *hash_info, struct inode_walker *inode) { struct bch_fs *c = trans->c; int ret = bch2_check_key_has_snapshot(trans, iter, k); if (ret < 0) return ret; if (ret) return 0; struct inode_walker_entry *i = walk_inode(trans, inode, k); ret = PTR_ERR_OR_ZERO(i); if (ret) return ret; ret = check_key_has_inode(trans, iter, inode, i, k); if (ret) return ret; if (!i || i->whiteout) return 0; if (inode->first_this_inode) *hash_info = bch2_hash_info_init(c, &i->inode); inode->first_this_inode = false; bool need_second_pass = false; return bch2_str_hash_check_key(trans, NULL, &bch2_xattr_hash_desc, hash_info, iter, k, &need_second_pass); } /* * Walk xattrs: verify that they all have a corresponding inode */ int bch2_check_xattrs(struct bch_fs *c) { struct inode_walker inode = inode_walker_init(); struct bch_hash_info hash_info; int ret = 0; ret = bch2_trans_run(c, for_each_btree_key_commit(trans, iter, BTREE_ID_xattrs, POS(BCACHEFS_ROOT_INO, 0), BTREE_ITER_prefetch|BTREE_ITER_all_snapshots, k, NULL, NULL, BCH_TRANS_COMMIT_no_enospc, check_xattr(trans, &iter, k, &hash_info, &inode))); inode_walker_exit(&inode); bch_err_fn(c, ret); return ret; } static int check_root_trans(struct btree_trans *trans) { struct bch_fs *c = trans->c; struct bch_inode_unpacked root_inode; u32 snapshot; u64 inum; int ret; ret = subvol_lookup(trans, BCACHEFS_ROOT_SUBVOL, &snapshot, &inum); if (ret && !bch2_err_matches(ret, ENOENT)) return ret; if (mustfix_fsck_err_on(ret, trans, root_subvol_missing, "root subvol missing")) { struct bkey_i_subvolume *root_subvol = bch2_trans_kmalloc(trans, sizeof(*root_subvol)); ret = PTR_ERR_OR_ZERO(root_subvol); if (ret) goto err; snapshot = U32_MAX; inum = BCACHEFS_ROOT_INO; bkey_subvolume_init(&root_subvol->k_i); root_subvol->k.p.offset = BCACHEFS_ROOT_SUBVOL; root_subvol->v.flags = 0; root_subvol->v.snapshot = cpu_to_le32(snapshot); root_subvol->v.inode = cpu_to_le64(inum); ret = bch2_btree_insert_trans(trans, BTREE_ID_subvolumes, &root_subvol->k_i, 0); bch_err_msg(c, ret, "writing root subvol"); if (ret) goto err; } ret = bch2_inode_find_by_inum_snapshot(trans, BCACHEFS_ROOT_INO, snapshot, &root_inode, 0); if (ret && !bch2_err_matches(ret, ENOENT)) return ret; if (mustfix_fsck_err_on(ret, trans, root_dir_missing, "root directory missing") || mustfix_fsck_err_on(!S_ISDIR(root_inode.bi_mode), trans, root_inode_not_dir, "root inode not a directory")) { bch2_inode_init(c, &root_inode, 0, 0, S_IFDIR|0755, 0, NULL); root_inode.bi_inum = inum; root_inode.bi_snapshot = snapshot; ret = __bch2_fsck_write_inode(trans, &root_inode); bch_err_msg(c, ret, "writing root inode"); } err: fsck_err: return ret; } /* Get root directory, create if it doesn't exist: */ int bch2_check_root(struct bch_fs *c) { int ret = bch2_trans_commit_do(c, NULL, NULL, BCH_TRANS_COMMIT_no_enospc, check_root_trans(trans)); bch_err_fn(c, ret); return ret; } static bool darray_u32_has(darray_u32 *d, u32 v) { darray_for_each(*d, i) if (*i == v) return true; return false; } static int check_subvol_path(struct btree_trans *trans, struct btree_iter *iter, struct bkey_s_c k) { struct bch_fs *c = trans->c; struct btree_iter parent_iter = {}; darray_u32 subvol_path = {}; struct printbuf buf = PRINTBUF; int ret = 0; if (k.k->type != KEY_TYPE_subvolume) return 0; subvol_inum start = { .subvol = k.k->p.offset, .inum = le64_to_cpu(bkey_s_c_to_subvolume(k).v->inode), }; while (k.k->p.offset != BCACHEFS_ROOT_SUBVOL) { ret = darray_push(&subvol_path, k.k->p.offset); if (ret) goto err; struct bkey_s_c_subvolume s = bkey_s_c_to_subvolume(k); struct bch_inode_unpacked subvol_root; ret = bch2_inode_find_by_inum_trans(trans, (subvol_inum) { s.k->p.offset, le64_to_cpu(s.v->inode) }, &subvol_root); if (ret) break; u32 parent = le32_to_cpu(s.v->fs_path_parent); if (darray_u32_has(&subvol_path, parent)) { printbuf_reset(&buf); prt_printf(&buf, "subvolume loop: "); ret = bch2_inum_to_path(trans, start, &buf); if (ret) goto err; if (fsck_err(trans, subvol_loop, "%s", buf.buf)) ret = reattach_subvol(trans, s); break; } bch2_trans_iter_exit(trans, &parent_iter); bch2_trans_iter_init(trans, &parent_iter, BTREE_ID_subvolumes, POS(0, parent), 0); k = bch2_btree_iter_peek_slot(trans, &parent_iter); ret = bkey_err(k); if (ret) goto err; if (fsck_err_on(k.k->type != KEY_TYPE_subvolume, trans, subvol_unreachable, "unreachable subvolume %s", (printbuf_reset(&buf), bch2_bkey_val_to_text(&buf, c, s.s_c), buf.buf))) { ret = reattach_subvol(trans, s); break; } } fsck_err: err: printbuf_exit(&buf); darray_exit(&subvol_path); bch2_trans_iter_exit(trans, &parent_iter); return ret; } int bch2_check_subvolume_structure(struct bch_fs *c) { int ret = bch2_trans_run(c, for_each_btree_key_commit(trans, iter, BTREE_ID_subvolumes, POS_MIN, BTREE_ITER_prefetch, k, NULL, NULL, BCH_TRANS_COMMIT_no_enospc, check_subvol_path(trans, &iter, k))); bch_err_fn(c, ret); return ret; } static int bch2_bi_depth_renumber_one(struct btree_trans *trans, u64 inum, u32 snapshot, u32 new_depth) { struct btree_iter iter; struct bkey_s_c k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_inodes, SPOS(0, inum, snapshot), 0); struct bch_inode_unpacked inode; int ret = bkey_err(k) ?: !bkey_is_inode(k.k) ? -BCH_ERR_ENOENT_inode : bch2_inode_unpack(k, &inode); if (ret) goto err; if (inode.bi_depth != new_depth) { inode.bi_depth = new_depth; ret = __bch2_fsck_write_inode(trans, &inode) ?: bch2_trans_commit(trans, NULL, NULL, 0); } err: bch2_trans_iter_exit(trans, &iter); return ret; } static int bch2_bi_depth_renumber(struct btree_trans *trans, darray_u64 *path, u32 snapshot, u32 new_bi_depth) { u32 restart_count = trans->restart_count; int ret = 0; darray_for_each_reverse(*path, i) { ret = nested_lockrestart_do(trans, bch2_bi_depth_renumber_one(trans, *i, snapshot, new_bi_depth)); bch_err_fn(trans->c, ret); if (ret) break; new_bi_depth++; } return ret ?: trans_was_restarted(trans, restart_count); } static int check_path_loop(struct btree_trans *trans, struct bkey_s_c inode_k) { struct bch_fs *c = trans->c; struct btree_iter inode_iter = {}; darray_u64 path = {}; struct printbuf buf = PRINTBUF; u32 snapshot = inode_k.k->p.snapshot; bool redo_bi_depth = false; u32 min_bi_depth = U32_MAX; int ret = 0; struct bpos start = inode_k.k->p; struct bch_inode_unpacked inode; ret = bch2_inode_unpack(inode_k, &inode); if (ret) return ret; /* * If we're running full fsck, check_dirents() will have already ran, * and we shouldn't see any missing backpointers here - otherwise that's * handled separately, by check_unreachable_inodes */ while (!inode.bi_subvol && bch2_inode_has_backpointer(&inode)) { struct btree_iter dirent_iter; struct bkey_s_c_dirent d; d = dirent_get_by_pos(trans, &dirent_iter, SPOS(inode.bi_dir, inode.bi_dir_offset, snapshot)); ret = bkey_err(d.s_c); if (ret && !bch2_err_matches(ret, ENOENT)) goto out; if (!ret && (ret = dirent_points_to_inode(c, d, &inode))) bch2_trans_iter_exit(trans, &dirent_iter); if (bch2_err_matches(ret, ENOENT)) { printbuf_reset(&buf); bch2_bkey_val_to_text(&buf, c, inode_k); bch_err(c, "unreachable inode in check_directory_structure: %s\n%s", bch2_err_str(ret), buf.buf); goto out; } bch2_trans_iter_exit(trans, &dirent_iter); ret = darray_push(&path, inode.bi_inum); if (ret) return ret; bch2_trans_iter_exit(trans, &inode_iter); inode_k = bch2_bkey_get_iter(trans, &inode_iter, BTREE_ID_inodes, SPOS(0, inode.bi_dir, snapshot), 0); struct bch_inode_unpacked parent_inode; ret = bkey_err(inode_k) ?: !bkey_is_inode(inode_k.k) ? -BCH_ERR_ENOENT_inode : bch2_inode_unpack(inode_k, &parent_inode); if (ret) { /* Should have been caught in dirents pass */ bch_err_msg(c, ret, "error looking up parent directory"); goto out; } min_bi_depth = parent_inode.bi_depth; if (parent_inode.bi_depth < inode.bi_depth && min_bi_depth < U16_MAX) break; inode = parent_inode; redo_bi_depth = true; if (darray_find(path, inode.bi_inum)) { printbuf_reset(&buf); prt_printf(&buf, "directory structure loop in snapshot %u: ", snapshot); ret = bch2_inum_snapshot_to_path(trans, start.offset, start.snapshot, NULL, &buf); if (ret) goto out; if (c->opts.verbose) { prt_newline(&buf); darray_for_each(path, i) prt_printf(&buf, "%llu ", *i); } if (fsck_err(trans, dir_loop, "%s", buf.buf)) { ret = remove_backpointer(trans, &inode); bch_err_msg(c, ret, "removing dirent"); if (ret) goto out; ret = reattach_inode(trans, &inode); bch_err_msg(c, ret, "reattaching inode %llu", inode.bi_inum); } goto out; } } if (inode.bi_subvol) min_bi_depth = 0; if (redo_bi_depth) ret = bch2_bi_depth_renumber(trans, &path, snapshot, min_bi_depth); out: fsck_err: bch2_trans_iter_exit(trans, &inode_iter); darray_exit(&path); printbuf_exit(&buf); bch_err_fn(c, ret); return ret; } /* * Check for loops in the directory structure: all other connectivity issues * have been fixed by prior passes */ int bch2_check_directory_structure(struct bch_fs *c) { int ret = bch2_trans_run(c, for_each_btree_key_reverse_commit(trans, iter, BTREE_ID_inodes, POS_MIN, BTREE_ITER_intent| BTREE_ITER_prefetch| BTREE_ITER_all_snapshots, k, NULL, NULL, BCH_TRANS_COMMIT_no_enospc, ({ if (!S_ISDIR(bkey_inode_mode(k))) continue; if (bch2_inode_flags(k) & BCH_INODE_unlinked) continue; check_path_loop(trans, k); }))); bch_err_fn(c, ret); return ret; } struct nlink_table { size_t nr; size_t size; struct nlink { u64 inum; u32 snapshot; u32 count; } *d; }; static int add_nlink(struct bch_fs *c, struct nlink_table *t, u64 inum, u32 snapshot) { if (t->nr == t->size) { size_t new_size = max_t(size_t, 128UL, t->size * 2); void *d = kvmalloc_array(new_size, sizeof(t->d[0]), GFP_KERNEL); if (!d) { bch_err(c, "fsck: error allocating memory for nlink_table, size %zu", new_size); return bch_err_throw(c, ENOMEM_fsck_add_nlink); } if (t->d) memcpy(d, t->d, t->size * sizeof(t->d[0])); kvfree(t->d); t->d = d; t->size = new_size; } t->d[t->nr++] = (struct nlink) { .inum = inum, .snapshot = snapshot, }; return 0; } static int nlink_cmp(const void *_l, const void *_r) { const struct nlink *l = _l; const struct nlink *r = _r; return cmp_int(l->inum, r->inum); } static void inc_link(struct bch_fs *c, struct snapshots_seen *s, struct nlink_table *links, u64 range_start, u64 range_end, u64 inum, u32 snapshot) { struct nlink *link, key = { .inum = inum, .snapshot = U32_MAX, }; if (inum < range_start || inum >= range_end) return; link = __inline_bsearch(&key, links->d, links->nr, sizeof(links->d[0]), nlink_cmp); if (!link) return; while (link > links->d && link[0].inum == link[-1].inum) --link; for (; link < links->d + links->nr && link->inum == inum; link++) if (ref_visible(c, s, snapshot, link->snapshot)) { link->count++; if (link->snapshot >= snapshot) break; } } noinline_for_stack static int check_nlinks_find_hardlinks(struct bch_fs *c, struct nlink_table *t, u64 start, u64 *end) { int ret = bch2_trans_run(c, for_each_btree_key(trans, iter, BTREE_ID_inodes, POS(0, start), BTREE_ITER_intent| BTREE_ITER_prefetch| BTREE_ITER_all_snapshots, k, ({ if (!bkey_is_inode(k.k)) continue; /* Should never fail, checked by bch2_inode_invalid: */ struct bch_inode_unpacked u; _ret3 = bch2_inode_unpack(k, &u); if (_ret3) break; /* * Backpointer and directory structure checks are sufficient for * directories, since they can't have hardlinks: */ if (S_ISDIR(u.bi_mode)) continue; /* * Previous passes ensured that bi_nlink is nonzero if * it had multiple hardlinks: */ if (!u.bi_nlink) continue; ret = add_nlink(c, t, k.k->p.offset, k.k->p.snapshot); if (ret) { *end = k.k->p.offset; ret = 0; break; } 0; }))); bch_err_fn(c, ret); return ret; } noinline_for_stack static int check_nlinks_walk_dirents(struct bch_fs *c, struct nlink_table *links, u64 range_start, u64 range_end) { struct snapshots_seen s; snapshots_seen_init(&s); int ret = bch2_trans_run(c, for_each_btree_key(trans, iter, BTREE_ID_dirents, POS_MIN, BTREE_ITER_intent| BTREE_ITER_prefetch| BTREE_ITER_all_snapshots, k, ({ ret = snapshots_seen_update(c, &s, iter.btree_id, k.k->p); if (ret) break; if (k.k->type == KEY_TYPE_dirent) { struct bkey_s_c_dirent d = bkey_s_c_to_dirent(k); if (d.v->d_type != DT_DIR && d.v->d_type != DT_SUBVOL) inc_link(c, &s, links, range_start, range_end, le64_to_cpu(d.v->d_inum), d.k->p.snapshot); } 0; }))); snapshots_seen_exit(&s); bch_err_fn(c, ret); return ret; } static int check_nlinks_update_inode(struct btree_trans *trans, struct btree_iter *iter, struct bkey_s_c k, struct nlink_table *links, size_t *idx, u64 range_end) { struct bch_inode_unpacked u; struct nlink *link = &links->d[*idx]; int ret = 0; if (k.k->p.offset >= range_end) return 1; if (!bkey_is_inode(k.k)) return 0; ret = bch2_inode_unpack(k, &u); if (ret) return ret; if (S_ISDIR(u.bi_mode)) return 0; if (!u.bi_nlink) return 0; while ((cmp_int(link->inum, k.k->p.offset) ?: cmp_int(link->snapshot, k.k->p.snapshot)) < 0) { BUG_ON(*idx == links->nr); link = &links->d[++*idx]; } if (fsck_err_on(bch2_inode_nlink_get(&u) != link->count, trans, inode_wrong_nlink, "inode %llu type %s has wrong i_nlink (%u, should be %u)", u.bi_inum, bch2_d_types[mode_to_type(u.bi_mode)], bch2_inode_nlink_get(&u), link->count)) { bch2_inode_nlink_set(&u, link->count); ret = __bch2_fsck_write_inode(trans, &u); } fsck_err: return ret; } noinline_for_stack static int check_nlinks_update_hardlinks(struct bch_fs *c, struct nlink_table *links, u64 range_start, u64 range_end) { size_t idx = 0; int ret = bch2_trans_run(c, for_each_btree_key_commit(trans, iter, BTREE_ID_inodes, POS(0, range_start), BTREE_ITER_intent|BTREE_ITER_prefetch|BTREE_ITER_all_snapshots, k, NULL, NULL, BCH_TRANS_COMMIT_no_enospc, check_nlinks_update_inode(trans, &iter, k, links, &idx, range_end))); if (ret < 0) { bch_err(c, "error in fsck walking inodes: %s", bch2_err_str(ret)); return ret; } return 0; } int bch2_check_nlinks(struct bch_fs *c) { struct nlink_table links = { 0 }; u64 this_iter_range_start, next_iter_range_start = 0; int ret = 0; do { this_iter_range_start = next_iter_range_start; next_iter_range_start = U64_MAX; ret = check_nlinks_find_hardlinks(c, &links, this_iter_range_start, &next_iter_range_start); ret = check_nlinks_walk_dirents(c, &links, this_iter_range_start, next_iter_range_start); if (ret) break; ret = check_nlinks_update_hardlinks(c, &links, this_iter_range_start, next_iter_range_start); if (ret) break; links.nr = 0; } while (next_iter_range_start != U64_MAX); kvfree(links.d); bch_err_fn(c, ret); return ret; } static int fix_reflink_p_key(struct btree_trans *trans, struct btree_iter *iter, struct bkey_s_c k) { struct bkey_s_c_reflink_p p; struct bkey_i_reflink_p *u; if (k.k->type != KEY_TYPE_reflink_p) return 0; p = bkey_s_c_to_reflink_p(k); if (!p.v->front_pad && !p.v->back_pad) return 0; u = bch2_trans_kmalloc(trans, sizeof(*u)); int ret = PTR_ERR_OR_ZERO(u); if (ret) return ret; bkey_reassemble(&u->k_i, k); u->v.front_pad = 0; u->v.back_pad = 0; return bch2_trans_update(trans, iter, &u->k_i, BTREE_TRIGGER_norun); } int bch2_fix_reflink_p(struct bch_fs *c) { if (c->sb.version >= bcachefs_metadata_version_reflink_p_fix) return 0; int ret = bch2_trans_run(c, for_each_btree_key_commit(trans, iter, BTREE_ID_extents, POS_MIN, BTREE_ITER_intent|BTREE_ITER_prefetch| BTREE_ITER_all_snapshots, k, NULL, NULL, BCH_TRANS_COMMIT_no_enospc, fix_reflink_p_key(trans, &iter, k))); bch_err_fn(c, ret); return ret; } #ifndef NO_BCACHEFS_CHARDEV struct fsck_thread { struct thread_with_stdio thr; struct bch_fs *c; struct bch_opts opts; }; static void bch2_fsck_thread_exit(struct thread_with_stdio *_thr) { struct fsck_thread *thr = container_of(_thr, struct fsck_thread, thr); kfree(thr); } static int bch2_fsck_offline_thread_fn(struct thread_with_stdio *stdio) { struct fsck_thread *thr = container_of(stdio, struct fsck_thread, thr); struct bch_fs *c = thr->c; int ret = PTR_ERR_OR_ZERO(c); if (ret) return ret; ret = bch2_fs_start(thr->c); if (ret) goto err; if (test_bit(BCH_FS_errors_fixed, &c->flags)) { bch2_stdio_redirect_printf(&stdio->stdio, false, "%s: errors fixed\n", c->name); ret |= 1; } if (test_bit(BCH_FS_error, &c->flags)) { bch2_stdio_redirect_printf(&stdio->stdio, false, "%s: still has errors\n", c->name); ret |= 4; } err: bch2_fs_stop(c); return ret; } static const struct thread_with_stdio_ops bch2_offline_fsck_ops = { .exit = bch2_fsck_thread_exit, .fn = bch2_fsck_offline_thread_fn, }; long bch2_ioctl_fsck_offline(struct bch_ioctl_fsck_offline __user *user_arg) { struct bch_ioctl_fsck_offline arg; struct fsck_thread *thr = NULL; darray_const_str devs = {}; long ret = 0; if (copy_from_user(&arg, user_arg, sizeof(arg))) return -EFAULT; if (arg.flags) return -EINVAL; if (!capable(CAP_SYS_ADMIN)) return -EPERM; for (size_t i = 0; i < arg.nr_devs; i++) { u64 dev_u64; ret = copy_from_user_errcode(&dev_u64, &user_arg->devs[i], sizeof(u64)); if (ret) goto err; char *dev_str = strndup_user((char __user *)(unsigned long) dev_u64, PATH_MAX); ret = PTR_ERR_OR_ZERO(dev_str); if (ret) goto err; ret = darray_push(&devs, dev_str); if (ret) { kfree(dev_str); goto err; } } thr = kzalloc(sizeof(*thr), GFP_KERNEL); if (!thr) { ret = -ENOMEM; goto err; } thr->opts = bch2_opts_empty(); if (arg.opts) { char *optstr = strndup_user((char __user *)(unsigned long) arg.opts, 1 << 16); ret = PTR_ERR_OR_ZERO(optstr) ?: bch2_parse_mount_opts(NULL, &thr->opts, NULL, optstr, false); if (!IS_ERR(optstr)) kfree(optstr); if (ret) goto err; } opt_set(thr->opts, stdio, (u64)(unsigned long)&thr->thr.stdio); opt_set(thr->opts, read_only, 1); opt_set(thr->opts, ratelimit_errors, 0); /* We need request_key() to be called before we punt to kthread: */ opt_set(thr->opts, nostart, true); bch2_thread_with_stdio_init(&thr->thr, &bch2_offline_fsck_ops); thr->c = bch2_fs_open(&devs, &thr->opts); if (!IS_ERR(thr->c) && thr->c->opts.errors == BCH_ON_ERROR_panic) thr->c->opts.errors = BCH_ON_ERROR_ro; ret = __bch2_run_thread_with_stdio(&thr->thr); out: darray_for_each(devs, i) kfree(*i); darray_exit(&devs); return ret; err: if (thr) bch2_fsck_thread_exit(&thr->thr); pr_err("ret %s", bch2_err_str(ret)); goto out; } static int bch2_fsck_online_thread_fn(struct thread_with_stdio *stdio) { struct fsck_thread *thr = container_of(stdio, struct fsck_thread, thr); struct bch_fs *c = thr->c; c->stdio_filter = current; c->stdio = &thr->thr.stdio; /* * XXX: can we figure out a way to do this without mucking with c->opts? */ unsigned old_fix_errors = c->opts.fix_errors; if (opt_defined(thr->opts, fix_errors)) c->opts.fix_errors = thr->opts.fix_errors; else c->opts.fix_errors = FSCK_FIX_ask; c->opts.fsck = true; set_bit(BCH_FS_in_fsck, &c->flags); int ret = bch2_run_online_recovery_passes(c, ~0ULL); clear_bit(BCH_FS_in_fsck, &c->flags); bch_err_fn(c, ret); c->stdio = NULL; c->stdio_filter = NULL; c->opts.fix_errors = old_fix_errors; up(&c->recovery.run_lock); bch2_ro_ref_put(c); return ret; } static const struct thread_with_stdio_ops bch2_online_fsck_ops = { .exit = bch2_fsck_thread_exit, .fn = bch2_fsck_online_thread_fn, }; long bch2_ioctl_fsck_online(struct bch_fs *c, struct bch_ioctl_fsck_online arg) { struct fsck_thread *thr = NULL; long ret = 0; if (arg.flags) return -EINVAL; if (!capable(CAP_SYS_ADMIN)) return -EPERM; if (!bch2_ro_ref_tryget(c)) return -EROFS; if (down_trylock(&c->recovery.run_lock)) { bch2_ro_ref_put(c); return -EAGAIN; } thr = kzalloc(sizeof(*thr), GFP_KERNEL); if (!thr) { ret = -ENOMEM; goto err; } thr->c = c; thr->opts = bch2_opts_empty(); if (arg.opts) { char *optstr = strndup_user((char __user *)(unsigned long) arg.opts, 1 << 16); ret = PTR_ERR_OR_ZERO(optstr) ?: bch2_parse_mount_opts(c, &thr->opts, NULL, optstr, false); if (!IS_ERR(optstr)) kfree(optstr); if (ret) goto err; } ret = bch2_run_thread_with_stdio(&thr->thr, &bch2_online_fsck_ops); err: if (ret < 0) { bch_err_fn(c, ret); if (thr) bch2_fsck_thread_exit(&thr->thr); up(&c->recovery.run_lock); bch2_ro_ref_put(c); } return ret; } #endif /* NO_BCACHEFS_CHARDEV */ |
| 66 66 80 22 87 87 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 | /* SPDX-License-Identifier: GPL-2.0-only */ #undef TRACE_SYSTEM #define TRACE_SYSTEM erofs #if !defined(_TRACE_EROFS_H) || defined(TRACE_HEADER_MULTI_READ) #define _TRACE_EROFS_H #include <linux/tracepoint.h> #include <linux/fs.h> struct erofs_map_blocks; #define show_dev(dev) MAJOR(dev), MINOR(dev) #define show_dev_nid(entry) show_dev(entry->dev), entry->nid #define show_file_type(type) \ __print_symbolic(type, \ { 0, "FILE" }, \ { 1, "DIR" }) #define show_map_flags(flags) __print_flags(flags, "|", \ { EROFS_GET_BLOCKS_FIEMAP, "FIEMAP" }, \ { EROFS_GET_BLOCKS_READMORE, "READMORE" }, \ { EROFS_GET_BLOCKS_FINDTAIL, "FINDTAIL" }) #define show_mflags(flags) __print_flags(flags, "", \ { EROFS_MAP_MAPPED, "M" }, \ { EROFS_MAP_META, "I" }, \ { EROFS_MAP_ENCODED, "E" }, \ { EROFS_MAP_FULL_MAPPED, "F" }, \ { EROFS_MAP_FRAGMENT, "R" }, \ { EROFS_MAP_PARTIAL_REF, "P" }) TRACE_EVENT(erofs_lookup, TP_PROTO(struct inode *dir, struct dentry *dentry, unsigned int flags), TP_ARGS(dir, dentry, flags), TP_STRUCT__entry( __field(dev_t, dev ) __field(erofs_nid_t, nid ) __string(name, dentry->d_name.name ) __field(unsigned int, flags ) ), TP_fast_assign( __entry->dev = dir->i_sb->s_dev; __entry->nid = EROFS_I(dir)->nid; __assign_str(name); __entry->flags = flags; ), TP_printk("dev = (%d,%d), pnid = %llu, name:%s, flags:%x", show_dev_nid(__entry), __get_str(name), __entry->flags) ); TRACE_EVENT(erofs_fill_inode, TP_PROTO(struct inode *inode), TP_ARGS(inode), TP_STRUCT__entry( __field(dev_t, dev ) __field(erofs_nid_t, nid ) __field(erofs_blk_t, blkaddr ) __field(unsigned int, ofs ) ), TP_fast_assign( __entry->dev = inode->i_sb->s_dev; __entry->nid = EROFS_I(inode)->nid; __entry->blkaddr = erofs_blknr(inode->i_sb, erofs_iloc(inode)); __entry->ofs = erofs_blkoff(inode->i_sb, erofs_iloc(inode)); ), TP_printk("dev = (%d,%d), nid = %llu, blkaddr %llu ofs %u", show_dev_nid(__entry), __entry->blkaddr, __entry->ofs) ); TRACE_EVENT(erofs_read_folio, TP_PROTO(struct folio *folio, bool raw), TP_ARGS(folio, raw), TP_STRUCT__entry( __field(dev_t, dev ) __field(erofs_nid_t, nid ) __field(int, dir ) __field(pgoff_t, index ) __field(int, uptodate) __field(bool, raw ) ), TP_fast_assign( __entry->dev = folio->mapping->host->i_sb->s_dev; __entry->nid = EROFS_I(folio->mapping->host)->nid; __entry->dir = S_ISDIR(folio->mapping->host->i_mode); __entry->index = folio->index; __entry->uptodate = folio_test_uptodate(folio); __entry->raw = raw; ), TP_printk("dev = (%d,%d), nid = %llu, %s, index = %lu, uptodate = %d " "raw = %d", show_dev_nid(__entry), show_file_type(__entry->dir), (unsigned long)__entry->index, __entry->uptodate, __entry->raw) ); TRACE_EVENT(erofs_readahead, TP_PROTO(struct inode *inode, pgoff_t start, unsigned int nrpage, bool raw), TP_ARGS(inode, start, nrpage, raw), TP_STRUCT__entry( __field(dev_t, dev ) __field(erofs_nid_t, nid ) __field(pgoff_t, start ) __field(unsigned int, nrpage ) __field(bool, raw ) ), TP_fast_assign( __entry->dev = inode->i_sb->s_dev; __entry->nid = EROFS_I(inode)->nid; __entry->start = start; __entry->nrpage = nrpage; __entry->raw = raw; ), TP_printk("dev = (%d,%d), nid = %llu, start = %lu nrpage = %u raw = %d", show_dev_nid(__entry), (unsigned long)__entry->start, __entry->nrpage, __entry->raw) ); TRACE_EVENT(erofs_map_blocks_enter, TP_PROTO(struct inode *inode, struct erofs_map_blocks *map, unsigned int flags), TP_ARGS(inode, map, flags), TP_STRUCT__entry( __field( dev_t, dev ) __field( erofs_nid_t, nid ) __field( erofs_off_t, la ) __field( u64, llen ) __field( unsigned int, flags ) ), TP_fast_assign( __entry->dev = inode->i_sb->s_dev; __entry->nid = EROFS_I(inode)->nid; __entry->la = map->m_la; __entry->llen = map->m_llen; __entry->flags = flags; ), TP_printk("dev = (%d,%d), nid = %llu, la %llu llen %llu flags %s", show_dev_nid(__entry), __entry->la, __entry->llen, __entry->flags ? show_map_flags(__entry->flags) : "NULL") ); TRACE_EVENT(erofs_map_blocks_exit, TP_PROTO(struct inode *inode, struct erofs_map_blocks *map, unsigned int flags, int ret), TP_ARGS(inode, map, flags, ret), TP_STRUCT__entry( __field( dev_t, dev ) __field( erofs_nid_t, nid ) __field( unsigned int, flags ) __field( erofs_off_t, la ) __field( erofs_off_t, pa ) __field( u64, llen ) __field( u64, plen ) __field( unsigned int, mflags ) __field( int, ret ) ), TP_fast_assign( __entry->dev = inode->i_sb->s_dev; __entry->nid = EROFS_I(inode)->nid; __entry->flags = flags; __entry->la = map->m_la; __entry->pa = map->m_pa; __entry->llen = map->m_llen; __entry->plen = map->m_plen; __entry->mflags = map->m_flags; __entry->ret = ret; ), TP_printk("dev = (%d,%d), nid = %llu, flags %s " "la %llu pa %llu llen %llu plen %llu mflags %s ret %d", show_dev_nid(__entry), __entry->flags ? show_map_flags(__entry->flags) : "NULL", __entry->la, __entry->pa, __entry->llen, __entry->plen, show_mflags(__entry->mflags), __entry->ret) ); #endif /* _TRACE_EROFS_H */ /* This part must be outside protection */ #include <trace/define_trace.h> |
| 1 1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 | // SPDX-License-Identifier: GPL-2.0-or-later /* * INET An implementation of the TCP/IP protocol suite for the LINUX * operating system. INET is implemented using the BSD Socket * interface as the means of communication with the user level. * * PF_INET protocol family socket handler. * * Authors: Ross Biro * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> * Florian La Roche, <flla@stud.uni-sb.de> * Alan Cox, <A.Cox@swansea.ac.uk> * * Changes (see also sock.c) * * piggy, * Karl Knutson : Socket protocol table * A.N.Kuznetsov : Socket death error in accept(). * John Richardson : Fix non blocking error in connect() * so sockets that fail to connect * don't return -EINPROGRESS. * Alan Cox : Asynchronous I/O support * Alan Cox : Keep correct socket pointer on sock * structures * when accept() ed * Alan Cox : Semantics of SO_LINGER aren't state * moved to close when you look carefully. * With this fixed and the accept bug fixed * some RPC stuff seems happier. * Niibe Yutaka : 4.4BSD style write async I/O * Alan Cox, * Tony Gale : Fixed reuse semantics. * Alan Cox : bind() shouldn't abort existing but dead * sockets. Stops FTP netin:.. I hope. * Alan Cox : bind() works correctly for RAW sockets. * Note that FreeBSD at least was broken * in this respect so be careful with * compatibility tests... * Alan Cox : routing cache support * Alan Cox : memzero the socket structure for * compactness. * Matt Day : nonblock connect error handler * Alan Cox : Allow large numbers of pending sockets * (eg for big web sites), but only if * specifically application requested. * Alan Cox : New buffering throughout IP. Used * dumbly. * Alan Cox : New buffering now used smartly. * Alan Cox : BSD rather than common sense * interpretation of listen. * Germano Caronni : Assorted small races. * Alan Cox : sendmsg/recvmsg basic support. * Alan Cox : Only sendmsg/recvmsg now supported. * Alan Cox : Locked down bind (see security list). * Alan Cox : Loosened bind a little. * Mike McLagan : ADD/DEL DLCI Ioctls * Willy Konynenberg : Transparent proxying support. * David S. Miller : New socket lookup architecture. * Some other random speedups. * Cyrus Durgin : Cleaned up file for kmod hacks. * Andi Kleen : Fix inet_stream_connect TCP race. */ #define pr_fmt(fmt) "IPv4: " fmt #include <linux/err.h> #include <linux/errno.h> #include <linux/types.h> #include <linux/socket.h> #include <linux/in.h> #include <linux/kernel.h> #include <linux/kmod.h> #include <linux/sched.h> #include <linux/timer.h> #include <linux/string.h> #include <linux/sockios.h> #include <linux/net.h> #include <linux/capability.h> #include <linux/fcntl.h> #include <linux/mm.h> #include <linux/interrupt.h> #include <linux/stat.h> #include <linux/init.h> #include <linux/poll.h> #include <linux/netfilter_ipv4.h> #include <linux/random.h> #include <linux/slab.h> #include <linux/uaccess.h> #include <linux/inet.h> #include <linux/igmp.h> #include <linux/inetdevice.h> #include <linux/netdevice.h> #include <net/checksum.h> #include <net/ip.h> #include <net/protocol.h> #include <net/arp.h> #include <net/route.h> #include <net/ip_fib.h> #include <net/inet_connection_sock.h> #include <net/gro.h> #include <net/gso.h> #include <net/tcp.h> #include <net/udp.h> #include <net/udplite.h> #include <net/ping.h> #include <linux/skbuff.h> #include <net/sock.h> #include <net/raw.h> #include <net/icmp.h> #include <net/inet_common.h> #include <net/ip_tunnels.h> #include <net/xfrm.h> #include <net/net_namespace.h> #include <net/secure_seq.h> #ifdef CONFIG_IP_MROUTE #include <linux/mroute.h> #endif #include <net/l3mdev.h> #include <net/compat.h> #include <net/rps.h> #include <trace/events/sock.h> /* The inetsw table contains everything that inet_create needs to * build a new socket. */ static struct list_head inetsw[SOCK_MAX]; static DEFINE_SPINLOCK(inetsw_lock); /* New destruction routine */ void inet_sock_destruct(struct sock *sk) { struct inet_sock *inet = inet_sk(sk); __skb_queue_purge(&sk->sk_receive_queue); __skb_queue_purge(&sk->sk_error_queue); sk_mem_reclaim_final(sk); if (sk->sk_type == SOCK_STREAM && sk->sk_state != TCP_CLOSE) { pr_err("Attempt to release TCP socket in state %d %p\n", sk->sk_state, sk); return; } if (!sock_flag(sk, SOCK_DEAD)) { pr_err("Attempt to release alive inet socket %p\n", sk); return; } WARN_ON_ONCE(atomic_read(&sk->sk_rmem_alloc)); WARN_ON_ONCE(refcount_read(&sk->sk_wmem_alloc)); WARN_ON_ONCE(sk->sk_wmem_queued); WARN_ON_ONCE(sk->sk_forward_alloc); kfree(rcu_dereference_protected(inet->inet_opt, 1)); dst_release(rcu_dereference_protected(sk->sk_dst_cache, 1)); dst_release(rcu_dereference_protected(sk->sk_rx_dst, 1)); } EXPORT_SYMBOL(inet_sock_destruct); /* * The routines beyond this point handle the behaviour of an AF_INET * socket object. Mostly it punts to the subprotocols of IP to do * the work. */ /* * Automatically bind an unbound socket. */ static int inet_autobind(struct sock *sk) { struct inet_sock *inet; /* We may need to bind the socket. */ lock_sock(sk); inet = inet_sk(sk); if (!inet->inet_num) { if (sk->sk_prot->get_port(sk, 0)) { release_sock(sk); return -EAGAIN; } inet->inet_sport = htons(inet->inet_num); } release_sock(sk); return 0; } int __inet_listen_sk(struct sock *sk, int backlog) { unsigned char old_state = sk->sk_state; int err, tcp_fastopen; if (!((1 << old_state) & (TCPF_CLOSE | TCPF_LISTEN))) return -EINVAL; WRITE_ONCE(sk->sk_max_ack_backlog, backlog); /* Really, if the socket is already in listen state * we can only allow the backlog to be adjusted. */ if (old_state != TCP_LISTEN) { /* Enable TFO w/o requiring TCP_FASTOPEN socket option. * Note that only TCP sockets (SOCK_STREAM) will reach here. * Also fastopen backlog may already been set via the option * because the socket was in TCP_LISTEN state previously but * was shutdown() rather than close(). */ tcp_fastopen = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_fastopen); if ((tcp_fastopen & TFO_SERVER_WO_SOCKOPT1) && (tcp_fastopen & TFO_SERVER_ENABLE) && !inet_csk(sk)->icsk_accept_queue.fastopenq.max_qlen) { fastopen_queue_tune(sk, backlog); tcp_fastopen_init_key_once(sock_net(sk)); } err = inet_csk_listen_start(sk); if (err) return err; tcp_call_bpf(sk, BPF_SOCK_OPS_TCP_LISTEN_CB, 0, NULL); } return 0; } /* * Move a socket into listening state. */ int inet_listen(struct socket *sock, int backlog) { struct sock *sk = sock->sk; int err = -EINVAL; lock_sock(sk); if (sock->state != SS_UNCONNECTED || sock->type != SOCK_STREAM) goto out; err = __inet_listen_sk(sk, backlog); out: release_sock(sk); return err; } EXPORT_SYMBOL(inet_listen); /* * Create an inet socket. */ static int inet_create(struct net *net, struct socket *sock, int protocol, int kern) { struct sock *sk; struct inet_protosw *answer; struct inet_sock *inet; struct proto *answer_prot; unsigned char answer_flags; int try_loading_module = 0; int err; if (protocol < 0 || protocol >= IPPROTO_MAX) return -EINVAL; sock->state = SS_UNCONNECTED; /* Look for the requested type/protocol pair. */ lookup_protocol: err = -ESOCKTNOSUPPORT; rcu_read_lock(); list_for_each_entry_rcu(answer, &inetsw[sock->type], list) { err = 0; /* Check the non-wild match. */ if (protocol == answer->protocol) { if (protocol != IPPROTO_IP) break; } else { /* Check for the two wild cases. */ if (IPPROTO_IP == protocol) { protocol = answer->protocol; break; } if (IPPROTO_IP == answer->protocol) break; } err = -EPROTONOSUPPORT; } if (unlikely(err)) { if (try_loading_module < 2) { rcu_read_unlock(); /* * Be more specific, e.g. net-pf-2-proto-132-type-1 * (net-pf-PF_INET-proto-IPPROTO_SCTP-type-SOCK_STREAM) */ if (++try_loading_module == 1) request_module("net-pf-%d-proto-%d-type-%d", PF_INET, protocol, sock->type); /* * Fall back to generic, e.g. net-pf-2-proto-132 * (net-pf-PF_INET-proto-IPPROTO_SCTP) */ else request_module("net-pf-%d-proto-%d", PF_INET, protocol); goto lookup_protocol; } else goto out_rcu_unlock; } err = -EPERM; if (sock->type == SOCK_RAW && !kern && !ns_capable(net->user_ns, CAP_NET_RAW)) goto out_rcu_unlock; sock->ops = answer->ops; answer_prot = answer->prot; answer_flags = answer->flags; rcu_read_unlock(); WARN_ON(!answer_prot->slab); err = -ENOMEM; sk = sk_alloc(net, PF_INET, GFP_KERNEL, answer_prot, kern); if (!sk) goto out; err = 0; if (INET_PROTOSW_REUSE & answer_flags) sk->sk_reuse = SK_CAN_REUSE; if (INET_PROTOSW_ICSK & answer_flags) inet_init_csk_locks(sk); inet = inet_sk(sk); inet_assign_bit(IS_ICSK, sk, INET_PROTOSW_ICSK & answer_flags); inet_clear_bit(NODEFRAG, sk); if (SOCK_RAW == sock->type) { inet->inet_num = protocol; if (IPPROTO_RAW == protocol) inet_set_bit(HDRINCL, sk); } if (READ_ONCE(net->ipv4.sysctl_ip_no_pmtu_disc)) inet->pmtudisc = IP_PMTUDISC_DONT; else inet->pmtudisc = IP_PMTUDISC_WANT; atomic_set(&inet->inet_id, 0); sock_init_data(sock, sk); sk->sk_destruct = inet_sock_destruct; sk->sk_protocol = protocol; sk->sk_backlog_rcv = sk->sk_prot->backlog_rcv; sk->sk_txrehash = READ_ONCE(net->core.sysctl_txrehash); inet->uc_ttl = -1; inet_set_bit(MC_LOOP, sk); inet->mc_ttl = 1; inet_set_bit(MC_ALL, sk); inet->mc_index = 0; inet->mc_list = NULL; inet->rcv_tos = 0; if (inet->inet_num) { /* It assumes that any protocol which allows * the user to assign a number at socket * creation time automatically * shares. */ inet->inet_sport = htons(inet->inet_num); /* Add to protocol hash chains. */ err = sk->sk_prot->hash(sk); if (err) goto out_sk_release; } if (sk->sk_prot->init) { err = sk->sk_prot->init(sk); if (err) goto out_sk_release; } if (!kern) { err = BPF_CGROUP_RUN_PROG_INET_SOCK(sk); if (err) goto out_sk_release; } out: return err; out_rcu_unlock: rcu_read_unlock(); goto out; out_sk_release: sk_common_release(sk); sock->sk = NULL; goto out; } /* * The peer socket should always be NULL (or else). When we call this * function we are destroying the object and from then on nobody * should refer to it. */ int inet_release(struct socket *sock) { struct sock *sk = sock->sk; if (sk) { long timeout; if (!sk->sk_kern_sock) BPF_CGROUP_RUN_PROG_INET_SOCK_RELEASE(sk); /* Applications forget to leave groups before exiting */ ip_mc_drop_socket(sk); /* If linger is set, we don't return until the close * is complete. Otherwise we return immediately. The * actually closing is done the same either way. * * If the close is due to the process exiting, we never * linger.. */ timeout = 0; if (sock_flag(sk, SOCK_LINGER) && !(current->flags & PF_EXITING)) timeout = sk->sk_lingertime; sk->sk_prot->close(sk, timeout); sock->sk = NULL; } return 0; } EXPORT_SYMBOL(inet_release); int inet_bind_sk(struct sock *sk, struct sockaddr *uaddr, int addr_len) { u32 flags = BIND_WITH_LOCK; int err; /* If the socket has its own bind function then use it. (RAW) */ if (sk->sk_prot->bind) { return sk->sk_prot->bind(sk, uaddr, addr_len); } if (addr_len < sizeof(struct sockaddr_in)) return -EINVAL; /* BPF prog is run before any checks are done so that if the prog * changes context in a wrong way it will be caught. */ err = BPF_CGROUP_RUN_PROG_INET_BIND_LOCK(sk, uaddr, &addr_len, CGROUP_INET4_BIND, &flags); if (err) return err; return __inet_bind(sk, uaddr, addr_len, flags); } int inet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) { return inet_bind_sk(sock->sk, uaddr, addr_len); } EXPORT_SYMBOL(inet_bind); int __inet_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len, u32 flags) { struct sockaddr_in *addr = (struct sockaddr_in *)uaddr; struct inet_sock *inet = inet_sk(sk); struct net *net = sock_net(sk); unsigned short snum; int chk_addr_ret; u32 tb_id = RT_TABLE_LOCAL; int err; if (addr->sin_family != AF_INET) { /* Compatibility games : accept AF_UNSPEC (mapped to AF_INET) * only if s_addr is INADDR_ANY. */ err = -EAFNOSUPPORT; if (addr->sin_family != AF_UNSPEC || addr->sin_addr.s_addr != htonl(INADDR_ANY)) goto out; } tb_id = l3mdev_fib_table_by_index(net, sk->sk_bound_dev_if) ? : tb_id; chk_addr_ret = inet_addr_type_table(net, addr->sin_addr.s_addr, tb_id); /* Not specified by any standard per-se, however it breaks too * many applications when removed. It is unfortunate since * allowing applications to make a non-local bind solves * several problems with systems using dynamic addressing. * (ie. your servers still start up even if your ISDN link * is temporarily down) */ err = -EADDRNOTAVAIL; if (!inet_addr_valid_or_nonlocal(net, inet, addr->sin_addr.s_addr, chk_addr_ret)) goto out; snum = ntohs(addr->sin_port); err = -EACCES; if (!(flags & BIND_NO_CAP_NET_BIND_SERVICE) && snum && inet_port_requires_bind_service(net, snum) && !ns_capable(net->user_ns, CAP_NET_BIND_SERVICE)) goto out; /* We keep a pair of addresses. rcv_saddr is the one * used by hash lookups, and saddr is used for transmit. * * In the BSD API these are the same except where it * would be illegal to use them (multicast/broadcast) in * which case the sending device address is used. */ if (flags & BIND_WITH_LOCK) lock_sock(sk); /* Check these errors (active socket, double bind). */ err = -EINVAL; if (sk->sk_state != TCP_CLOSE || inet->inet_num) goto out_release_sock; inet->inet_rcv_saddr = inet->inet_saddr = addr->sin_addr.s_addr; if (chk_addr_ret == RTN_MULTICAST || chk_addr_ret == RTN_BROADCAST) inet->inet_saddr = 0; /* Use device */ /* Make sure we are allowed to bind here. */ if (snum || !(inet_test_bit(BIND_ADDRESS_NO_PORT, sk) || (flags & BIND_FORCE_ADDRESS_NO_PORT))) { err = sk->sk_prot->get_port(sk, snum); if (err) { inet->inet_saddr = inet->inet_rcv_saddr = 0; goto out_release_sock; } if (!(flags & BIND_FROM_BPF)) { err = BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk); if (err) { inet->inet_saddr = inet->inet_rcv_saddr = 0; if (sk->sk_prot->put_port) sk->sk_prot->put_port(sk); goto out_release_sock; } } } if (inet->inet_rcv_saddr) sk->sk_userlocks |= SOCK_BINDADDR_LOCK; if (snum) sk->sk_userlocks |= SOCK_BINDPORT_LOCK; inet->inet_sport = htons(inet->inet_num); inet->inet_daddr = 0; inet->inet_dport = 0; sk_dst_reset(sk); err = 0; out_release_sock: if (flags & BIND_WITH_LOCK) release_sock(sk); out: return err; } int inet_dgram_connect(struct socket *sock, struct sockaddr *uaddr, int addr_len, int flags) { struct sock *sk = sock->sk; const struct proto *prot; int err; if (addr_len < sizeof(uaddr->sa_family)) return -EINVAL; /* IPV6_ADDRFORM can change sk->sk_prot under us. */ prot = READ_ONCE(sk->sk_prot); if (uaddr->sa_family == AF_UNSPEC) return prot->disconnect(sk, flags); if (BPF_CGROUP_PRE_CONNECT_ENABLED(sk)) { err = prot->pre_connect(sk, uaddr, addr_len); if (err) return err; } if (data_race(!inet_sk(sk)->inet_num) && inet_autobind(sk)) return -EAGAIN; return prot->connect(sk, uaddr, addr_len); } EXPORT_SYMBOL(inet_dgram_connect); static long inet_wait_for_connect(struct sock *sk, long timeo, int writebias) { DEFINE_WAIT_FUNC(wait, woken_wake_function); add_wait_queue(sk_sleep(sk), &wait); sk->sk_write_pending += writebias; /* Basic assumption: if someone sets sk->sk_err, he _must_ * change state of the socket from TCP_SYN_*. * Connect() does not allow to get error notifications * without closing the socket. */ while ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) { release_sock(sk); timeo = wait_woken(&wait, TASK_INTERRUPTIBLE, timeo); lock_sock(sk); if (signal_pending(current) || !timeo) break; } remove_wait_queue(sk_sleep(sk), &wait); sk->sk_write_pending -= writebias; return timeo; } /* * Connect to a remote host. There is regrettably still a little * TCP 'magic' in here. */ int __inet_stream_connect(struct socket *sock, struct sockaddr *uaddr, int addr_len, int flags, int is_sendmsg) { struct sock *sk = sock->sk; int err; long timeo; /* * uaddr can be NULL and addr_len can be 0 if: * sk is a TCP fastopen active socket and * TCP_FASTOPEN_CONNECT sockopt is set and * we already have a valid cookie for this socket. * In this case, user can call write() after connect(). * write() will invoke tcp_sendmsg_fastopen() which calls * __inet_stream_connect(). */ if (uaddr) { if (addr_len < sizeof(uaddr->sa_family)) return -EINVAL; if (uaddr->sa_family == AF_UNSPEC) { sk->sk_disconnects++; err = sk->sk_prot->disconnect(sk, flags); sock->state = err ? SS_DISCONNECTING : SS_UNCONNECTED; goto out; } } switch (sock->state) { default: err = -EINVAL; goto out; case SS_CONNECTED: err = -EISCONN; goto out; case SS_CONNECTING: if (inet_test_bit(DEFER_CONNECT, sk)) err = is_sendmsg ? -EINPROGRESS : -EISCONN; else err = -EALREADY; /* Fall out of switch with err, set for this state */ break; case SS_UNCONNECTED: err = -EISCONN; if (sk->sk_state != TCP_CLOSE) goto out; if (BPF_CGROUP_PRE_CONNECT_ENABLED(sk)) { err = sk->sk_prot->pre_connect(sk, uaddr, addr_len); if (err) goto out; } err = sk->sk_prot->connect(sk, uaddr, addr_len); if (err < 0) goto out; sock->state = SS_CONNECTING; if (!err && inet_test_bit(DEFER_CONNECT, sk)) goto out; /* Just entered SS_CONNECTING state; the only * difference is that return value in non-blocking * case is EINPROGRESS, rather than EALREADY. */ err = -EINPROGRESS; break; } timeo = sock_sndtimeo(sk, flags & O_NONBLOCK); if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) { int writebias = (sk->sk_protocol == IPPROTO_TCP) && tcp_sk(sk)->fastopen_req && tcp_sk(sk)->fastopen_req->data ? 1 : 0; int dis = sk->sk_disconnects; /* Error code is set above */ if (!timeo || !inet_wait_for_connect(sk, timeo, writebias)) goto out; err = sock_intr_errno(timeo); if (signal_pending(current)) goto out; if (dis != sk->sk_disconnects) { err = -EPIPE; goto out; } } /* Connection was closed by RST, timeout, ICMP error * or another process disconnected us. */ if (sk->sk_state == TCP_CLOSE) goto sock_error; /* sk->sk_err may be not zero now, if RECVERR was ordered by user * and error was received after socket entered established state. * Hence, it is handled normally after connect() return successfully. */ sock->state = SS_CONNECTED; err = 0; out: return err; sock_error: err = sock_error(sk) ? : -ECONNABORTED; sock->state = SS_UNCONNECTED; sk->sk_disconnects++; if (sk->sk_prot->disconnect(sk, flags)) sock->state = SS_DISCONNECTING; goto out; } EXPORT_SYMBOL(__inet_stream_connect); int inet_stream_connect(struct socket *sock, struct sockaddr *uaddr, int addr_len, int flags) { int err; lock_sock(sock->sk); err = __inet_stream_connect(sock, uaddr, addr_len, flags, 0); release_sock(sock->sk); return err; } EXPORT_SYMBOL(inet_stream_connect); void __inet_accept(struct socket *sock, struct socket *newsock, struct sock *newsk) { sock_rps_record_flow(newsk); WARN_ON(!((1 << newsk->sk_state) & (TCPF_ESTABLISHED | TCPF_SYN_RECV | TCPF_FIN_WAIT1 | TCPF_FIN_WAIT2 | TCPF_CLOSING | TCPF_CLOSE_WAIT | TCPF_CLOSE))); if (test_bit(SOCK_SUPPORT_ZC, &sock->flags)) set_bit(SOCK_SUPPORT_ZC, &newsock->flags); sock_graft(newsk, newsock); newsock->state = SS_CONNECTED; } /* * Accept a pending connection. The TCP layer now gives BSD semantics. */ int inet_accept(struct socket *sock, struct socket *newsock, struct proto_accept_arg *arg) { struct sock *sk1 = sock->sk, *sk2; /* IPV6_ADDRFORM can change sk->sk_prot under us. */ arg->err = -EINVAL; sk2 = READ_ONCE(sk1->sk_prot)->accept(sk1, arg); if (!sk2) return arg->err; lock_sock(sk2); __inet_accept(sock, newsock, sk2); release_sock(sk2); return 0; } EXPORT_SYMBOL(inet_accept); /* * This does both peername and sockname. */ int inet_getname(struct socket *sock, struct sockaddr *uaddr, int peer) { struct sock *sk = sock->sk; struct inet_sock *inet = inet_sk(sk); DECLARE_SOCKADDR(struct sockaddr_in *, sin, uaddr); int sin_addr_len = sizeof(*sin); sin->sin_family = AF_INET; lock_sock(sk); if (peer) { if (!inet->inet_dport || (((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_SYN_SENT)) && peer == 1)) { release_sock(sk); return -ENOTCONN; } sin->sin_port = inet->inet_dport; sin->sin_addr.s_addr = inet->inet_daddr; BPF_CGROUP_RUN_SA_PROG(sk, (struct sockaddr *)sin, &sin_addr_len, CGROUP_INET4_GETPEERNAME); } else { __be32 addr = inet->inet_rcv_saddr; if (!addr) addr = inet->inet_saddr; sin->sin_port = inet->inet_sport; sin->sin_addr.s_addr = addr; BPF_CGROUP_RUN_SA_PROG(sk, (struct sockaddr *)sin, &sin_addr_len, CGROUP_INET4_GETSOCKNAME); } release_sock(sk); memset(sin->sin_zero, 0, sizeof(sin->sin_zero)); return sin_addr_len; } EXPORT_SYMBOL(inet_getname); int inet_send_prepare(struct sock *sk) { sock_rps_record_flow(sk); /* We may need to bind the socket. */ if (data_race(!inet_sk(sk)->inet_num) && !sk->sk_prot->no_autobind && inet_autobind(sk)) return -EAGAIN; return 0; } EXPORT_SYMBOL_GPL(inet_send_prepare); int inet_sendmsg(struct socket *sock, struct msghdr *msg, size_t size) { struct sock *sk = sock->sk; if (unlikely(inet_send_prepare(sk))) return -EAGAIN; return INDIRECT_CALL_2(sk->sk_prot->sendmsg, tcp_sendmsg, udp_sendmsg, sk, msg, size); } EXPORT_SYMBOL(inet_sendmsg); void inet_splice_eof(struct socket *sock) { const struct proto *prot; struct sock *sk = sock->sk; if (unlikely(inet_send_prepare(sk))) return; /* IPV6_ADDRFORM can change sk->sk_prot under us. */ prot = READ_ONCE(sk->sk_prot); if (prot->splice_eof) prot->splice_eof(sock); } EXPORT_SYMBOL_GPL(inet_splice_eof); INDIRECT_CALLABLE_DECLARE(int udp_recvmsg(struct sock *, struct msghdr *, size_t, int, int *)); int inet_recvmsg(struct socket *sock, struct msghdr *msg, size_t size, int flags) { struct sock *sk = sock->sk; int addr_len = 0; int err; if (likely(!(flags & MSG_ERRQUEUE))) sock_rps_record_flow(sk); err = INDIRECT_CALL_2(sk->sk_prot->recvmsg, tcp_recvmsg, udp_recvmsg, sk, msg, size, flags, &addr_len); if (err >= 0) msg->msg_namelen = addr_len; return err; } EXPORT_SYMBOL(inet_recvmsg); int inet_shutdown(struct socket *sock, int how) { struct sock *sk = sock->sk; int err = 0; /* This should really check to make sure * the socket is a TCP socket. (WHY AC...) */ how++; /* maps 0->1 has the advantage of making bit 1 rcvs and 1->2 bit 2 snds. 2->3 */ if ((how & ~SHUTDOWN_MASK) || !how) /* MAXINT->0 */ return -EINVAL; lock_sock(sk); if (sock->state == SS_CONNECTING) { if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV | TCPF_CLOSE)) sock->state = SS_DISCONNECTING; else sock->state = SS_CONNECTED; } switch (sk->sk_state) { case TCP_CLOSE: err = -ENOTCONN; /* Hack to wake up other listeners, who can poll for EPOLLHUP, even on eg. unconnected UDP sockets -- RR */ fallthrough; default: WRITE_ONCE(sk->sk_shutdown, sk->sk_shutdown | how); if (sk->sk_prot->shutdown) sk->sk_prot->shutdown(sk, how); break; /* Remaining two branches are temporary solution for missing * close() in multithreaded environment. It is _not_ a good idea, * but we have no choice until close() is repaired at VFS level. */ case TCP_LISTEN: if (!(how & RCV_SHUTDOWN)) break; fallthrough; case TCP_SYN_SENT: err = sk->sk_prot->disconnect(sk, O_NONBLOCK); sock->state = err ? SS_DISCONNECTING : SS_UNCONNECTED; break; } /* Wake up anyone sleeping in poll. */ sk->sk_state_change(sk); release_sock(sk); return err; } EXPORT_SYMBOL(inet_shutdown); /* * ioctl() calls you can issue on an INET socket. Most of these are * device configuration and stuff and very rarely used. Some ioctls * pass on to the socket itself. * * NOTE: I like the idea of a module for the config stuff. ie ifconfig * loads the devconfigure module does its configuring and unloads it. * There's a good 20K of config code hanging around the kernel. */ int inet_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) { struct sock *sk = sock->sk; int err = 0; struct net *net = sock_net(sk); void __user *p = (void __user *)arg; struct ifreq ifr; struct rtentry rt; switch (cmd) { case SIOCADDRT: case SIOCDELRT: if (copy_from_user(&rt, p, sizeof(struct rtentry))) return -EFAULT; err = ip_rt_ioctl(net, cmd, &rt); break; case SIOCRTMSG: err = -EINVAL; break; case SIOCDARP: case SIOCGARP: case SIOCSARP: err = arp_ioctl(net, cmd, (void __user *)arg); break; case SIOCGIFADDR: case SIOCGIFBRDADDR: case SIOCGIFNETMASK: case SIOCGIFDSTADDR: case SIOCGIFPFLAGS: if (get_user_ifreq(&ifr, NULL, p)) return -EFAULT; err = devinet_ioctl(net, cmd, &ifr); if (!err && put_user_ifreq(&ifr, p)) err = -EFAULT; break; case SIOCSIFADDR: case SIOCSIFBRDADDR: case SIOCSIFNETMASK: case SIOCSIFDSTADDR: case SIOCSIFPFLAGS: case SIOCSIFFLAGS: if (get_user_ifreq(&ifr, NULL, p)) return -EFAULT; err = devinet_ioctl(net, cmd, &ifr); break; default: if (sk->sk_prot->ioctl) err = sk_ioctl(sk, cmd, (void __user *)arg); else err = -ENOIOCTLCMD; break; } return err; } EXPORT_SYMBOL(inet_ioctl); #ifdef CONFIG_COMPAT static int inet_compat_routing_ioctl(struct sock *sk, unsigned int cmd, struct compat_rtentry __user *ur) { compat_uptr_t rtdev; struct rtentry rt; if (copy_from_user(&rt.rt_dst, &ur->rt_dst, 3 * sizeof(struct sockaddr)) || get_user(rt.rt_flags, &ur->rt_flags) || get_user(rt.rt_metric, &ur->rt_metric) || get_user(rt.rt_mtu, &ur->rt_mtu) || get_user(rt.rt_window, &ur->rt_window) || get_user(rt.rt_irtt, &ur->rt_irtt) || get_user(rtdev, &ur->rt_dev)) return -EFAULT; rt.rt_dev = compat_ptr(rtdev); return ip_rt_ioctl(sock_net(sk), cmd, &rt); } static int inet_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) { void __user *argp = compat_ptr(arg); struct sock *sk = sock->sk; switch (cmd) { case SIOCADDRT: case SIOCDELRT: return inet_compat_routing_ioctl(sk, cmd, argp); default: if (!sk->sk_prot->compat_ioctl) return -ENOIOCTLCMD; return sk->sk_prot->compat_ioctl(sk, cmd, arg); } } #endif /* CONFIG_COMPAT */ const struct proto_ops inet_stream_ops = { .family = PF_INET, .owner = THIS_MODULE, .release = inet_release, .bind = inet_bind, .connect = inet_stream_connect, .socketpair = sock_no_socketpair, .accept = inet_accept, .getname = inet_getname, .poll = tcp_poll, .ioctl = inet_ioctl, .gettstamp = sock_gettstamp, .listen = inet_listen, .shutdown = inet_shutdown, .setsockopt = sock_common_setsockopt, .getsockopt = sock_common_getsockopt, .sendmsg = inet_sendmsg, .recvmsg = inet_recvmsg, #ifdef CONFIG_MMU .mmap = tcp_mmap, #endif .splice_eof = inet_splice_eof, .splice_read = tcp_splice_read, .set_peek_off = sk_set_peek_off, .read_sock = tcp_read_sock, .read_skb = tcp_read_skb, .sendmsg_locked = tcp_sendmsg_locked, .peek_len = tcp_peek_len, #ifdef CONFIG_COMPAT .compat_ioctl = inet_compat_ioctl, #endif .set_rcvlowat = tcp_set_rcvlowat, }; EXPORT_SYMBOL(inet_stream_ops); const struct proto_ops inet_dgram_ops = { .family = PF_INET, .owner = THIS_MODULE, .release = inet_release, .bind = inet_bind, .connect = inet_dgram_connect, .socketpair = sock_no_socketpair, .accept = sock_no_accept, .getname = inet_getname, .poll = udp_poll, .ioctl = inet_ioctl, .gettstamp = sock_gettstamp, .listen = sock_no_listen, .shutdown = inet_shutdown, .setsockopt = sock_common_setsockopt, .getsockopt = sock_common_getsockopt, .sendmsg = inet_sendmsg, .read_skb = udp_read_skb, .recvmsg = inet_recvmsg, .mmap = sock_no_mmap, .splice_eof = inet_splice_eof, .set_peek_off = udp_set_peek_off, #ifdef CONFIG_COMPAT .compat_ioctl = inet_compat_ioctl, #endif }; EXPORT_SYMBOL(inet_dgram_ops); /* * For SOCK_RAW sockets; should be the same as inet_dgram_ops but without * udp_poll */ static const struct proto_ops inet_sockraw_ops = { .family = PF_INET, .owner = THIS_MODULE, .release = inet_release, .bind = inet_bind, .connect = inet_dgram_connect, .socketpair = sock_no_socketpair, .accept = sock_no_accept, .getname = inet_getname, .poll = datagram_poll, .ioctl = inet_ioctl, .gettstamp = sock_gettstamp, .listen = sock_no_listen, .shutdown = inet_shutdown, .setsockopt = sock_common_setsockopt, .getsockopt = sock_common_getsockopt, .sendmsg = inet_sendmsg, .recvmsg = inet_recvmsg, .mmap = sock_no_mmap, .splice_eof = inet_splice_eof, #ifdef CONFIG_COMPAT .compat_ioctl = inet_compat_ioctl, #endif }; static const struct net_proto_family inet_family_ops = { .family = PF_INET, .create = inet_create, .owner = THIS_MODULE, }; /* Upon startup we insert all the elements in inetsw_array[] into * the linked list inetsw. */ static struct inet_protosw inetsw_array[] = { { .type = SOCK_STREAM, .protocol = IPPROTO_TCP, .prot = &tcp_prot, .ops = &inet_stream_ops, .flags = INET_PROTOSW_PERMANENT | INET_PROTOSW_ICSK, }, { .type = SOCK_DGRAM, .protocol = IPPROTO_UDP, .prot = &udp_prot, .ops = &inet_dgram_ops, .flags = INET_PROTOSW_PERMANENT, }, { .type = SOCK_DGRAM, .protocol = IPPROTO_ICMP, .prot = &ping_prot, .ops = &inet_sockraw_ops, .flags = INET_PROTOSW_REUSE, }, { .type = SOCK_RAW, .protocol = IPPROTO_IP, /* wild card */ .prot = &raw_prot, .ops = &inet_sockraw_ops, .flags = INET_PROTOSW_REUSE, } }; #define INETSW_ARRAY_LEN ARRAY_SIZE(inetsw_array) void inet_register_protosw(struct inet_protosw *p) { struct list_head *lh; struct inet_protosw *answer; int protocol = p->protocol; struct list_head *last_perm; spin_lock_bh(&inetsw_lock); if (p->type >= SOCK_MAX) goto out_illegal; /* If we are trying to override a permanent protocol, bail. */ last_perm = &inetsw[p->type]; list_for_each(lh, &inetsw[p->type]) { answer = list_entry(lh, struct inet_protosw, list); /* Check only the non-wild match. */ if ((INET_PROTOSW_PERMANENT & answer->flags) == 0) break; if (protocol == answer->protocol) goto out_permanent; last_perm = lh; } /* Add the new entry after the last permanent entry if any, so that * the new entry does not override a permanent entry when matched with * a wild-card protocol. But it is allowed to override any existing * non-permanent entry. This means that when we remove this entry, the * system automatically returns to the old behavior. */ list_add_rcu(&p->list, last_perm); out: spin_unlock_bh(&inetsw_lock); return; out_permanent: pr_err("Attempt to override permanent protocol %d\n", protocol); goto out; out_illegal: pr_err("Ignoring attempt to register invalid socket type %d\n", p->type); goto out; } EXPORT_SYMBOL(inet_register_protosw); void inet_unregister_protosw(struct inet_protosw *p) { if (INET_PROTOSW_PERMANENT & p->flags) { pr_err("Attempt to unregister permanent protocol %d\n", p->protocol); } else { spin_lock_bh(&inetsw_lock); list_del_rcu(&p->list); spin_unlock_bh(&inetsw_lock); synchronize_net(); } } EXPORT_SYMBOL(inet_unregister_protosw); static int inet_sk_reselect_saddr(struct sock *sk) { struct inet_sock *inet = inet_sk(sk); __be32 old_saddr = inet->inet_saddr; __be32 daddr = inet->inet_daddr; struct flowi4 *fl4; struct rtable *rt; __be32 new_saddr; struct ip_options_rcu *inet_opt; int err; inet_opt = rcu_dereference_protected(inet->inet_opt, lockdep_sock_is_held(sk)); if (inet_opt && inet_opt->opt.srr) daddr = inet_opt->opt.faddr; /* Query new route. */ fl4 = &inet->cork.fl.u.ip4; rt = ip_route_connect(fl4, daddr, 0, sk->sk_bound_dev_if, sk->sk_protocol, inet->inet_sport, inet->inet_dport, sk); if (IS_ERR(rt)) return PTR_ERR(rt); new_saddr = fl4->saddr; if (new_saddr == old_saddr) { sk_setup_caps(sk, &rt->dst); return 0; } err = inet_bhash2_update_saddr(sk, &new_saddr, AF_INET); if (err) { ip_rt_put(rt); return err; } sk_setup_caps(sk, &rt->dst); if (READ_ONCE(sock_net(sk)->ipv4.sysctl_ip_dynaddr) > 1) { pr_info("%s(): shifting inet->saddr from %pI4 to %pI4\n", __func__, &old_saddr, &new_saddr); } /* * XXX The only one ugly spot where we need to * XXX really change the sockets identity after * XXX it has entered the hashes. -DaveM * * Besides that, it does not check for connection * uniqueness. Wait for troubles. */ return __sk_prot_rehash(sk); } int inet_sk_rebuild_header(struct sock *sk) { struct rtable *rt = dst_rtable(__sk_dst_check(sk, 0)); struct inet_sock *inet = inet_sk(sk); struct flowi4 *fl4; int err; /* Route is OK, nothing to do. */ if (rt) return 0; /* Reroute. */ fl4 = &inet->cork.fl.u.ip4; inet_sk_init_flowi4(inet, fl4); rt = ip_route_output_flow(sock_net(sk), fl4, sk); if (!IS_ERR(rt)) { err = 0; sk_setup_caps(sk, &rt->dst); } else { err = PTR_ERR(rt); /* Routing failed... */ sk->sk_route_caps = 0; if (!READ_ONCE(sock_net(sk)->ipv4.sysctl_ip_dynaddr) || sk->sk_state != TCP_SYN_SENT || (sk->sk_userlocks & SOCK_BINDADDR_LOCK) || (err = inet_sk_reselect_saddr(sk)) != 0) WRITE_ONCE(sk->sk_err_soft, -err); } return err; } EXPORT_SYMBOL(inet_sk_rebuild_header); void inet_sk_set_state(struct sock *sk, int state) { trace_inet_sock_set_state(sk, sk->sk_state, state); sk->sk_state = state; } EXPORT_SYMBOL(inet_sk_set_state); void inet_sk_state_store(struct sock *sk, int newstate) { trace_inet_sock_set_state(sk, sk->sk_state, newstate); smp_store_release(&sk->sk_state, newstate); } struct sk_buff *inet_gso_segment(struct sk_buff *skb, netdev_features_t features) { bool udpfrag = false, fixedid = false, gso_partial, encap; struct sk_buff *segs = ERR_PTR(-EINVAL); const struct net_offload *ops; unsigned int offset = 0; struct iphdr *iph; int proto, tot_len; int nhoff; int ihl; int id; skb_reset_network_header(skb); nhoff = skb_network_header(skb) - skb_mac_header(skb); if (unlikely(!pskb_may_pull(skb, sizeof(*iph)))) goto out; iph = ip_hdr(skb); ihl = iph->ihl * 4; if (ihl < sizeof(*iph)) goto out; id = ntohs(iph->id); proto = iph->protocol; /* Warning: after this point, iph might be no longer valid */ if (unlikely(!pskb_may_pull(skb, ihl))) goto out; __skb_pull(skb, ihl); encap = SKB_GSO_CB(skb)->encap_level > 0; if (encap) features &= skb->dev->hw_enc_features; SKB_GSO_CB(skb)->encap_level += ihl; skb_reset_transport_header(skb); segs = ERR_PTR(-EPROTONOSUPPORT); if (!skb->encapsulation || encap) { udpfrag = !!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP); fixedid = !!(skb_shinfo(skb)->gso_type & SKB_GSO_TCP_FIXEDID); /* fixed ID is invalid if DF bit is not set */ if (fixedid && !(ip_hdr(skb)->frag_off & htons(IP_DF))) goto out; } ops = rcu_dereference(inet_offloads[proto]); if (likely(ops && ops->callbacks.gso_segment)) { segs = ops->callbacks.gso_segment(skb, features); if (!segs) skb->network_header = skb_mac_header(skb) + nhoff - skb->head; } if (IS_ERR_OR_NULL(segs)) goto out; gso_partial = !!(skb_shinfo(segs)->gso_type & SKB_GSO_PARTIAL); skb = segs; do { iph = (struct iphdr *)(skb_mac_header(skb) + nhoff); if (udpfrag) { iph->frag_off = htons(offset >> 3); if (skb->next) iph->frag_off |= htons(IP_MF); offset += skb->len - nhoff - ihl; tot_len = skb->len - nhoff; } else if (skb_is_gso(skb)) { if (!fixedid) { iph->id = htons(id); id += skb_shinfo(skb)->gso_segs; } if (gso_partial) tot_len = skb_shinfo(skb)->gso_size + SKB_GSO_CB(skb)->data_offset + skb->head - (unsigned char *)iph; else tot_len = skb->len - nhoff; } else { if (!fixedid) iph->id = htons(id++); tot_len = skb->len - nhoff; } iph->tot_len = htons(tot_len); ip_send_check(iph); if (encap) skb_reset_inner_headers(skb); skb->network_header = (u8 *)iph - skb->head; skb_reset_mac_len(skb); } while ((skb = skb->next)); out: return segs; } static struct sk_buff *ipip_gso_segment(struct sk_buff *skb, netdev_features_t features) { if (!(skb_shinfo(skb)->gso_type & SKB_GSO_IPXIP4)) return ERR_PTR(-EINVAL); return inet_gso_segment(skb, features); } struct sk_buff *inet_gro_receive(struct list_head *head, struct sk_buff *skb) { const struct net_offload *ops; struct sk_buff *pp = NULL; const struct iphdr *iph; struct sk_buff *p; unsigned int hlen; unsigned int off; int flush = 1; int proto; off = skb_gro_offset(skb); hlen = off + sizeof(*iph); iph = skb_gro_header(skb, hlen, off); if (unlikely(!iph)) goto out; proto = iph->protocol; ops = rcu_dereference(inet_offloads[proto]); if (!ops || !ops->callbacks.gro_receive) goto out; if (*(u8 *)iph != 0x45) goto out; if (ip_is_fragment(iph)) goto out; if (unlikely(ip_fast_csum((u8 *)iph, 5))) goto out; NAPI_GRO_CB(skb)->proto = proto; flush = (u16)((ntohl(*(__be32 *)iph) ^ skb_gro_len(skb)) | (ntohl(*(__be32 *)&iph->id) & ~IP_DF)); list_for_each_entry(p, head, list) { struct iphdr *iph2; if (!NAPI_GRO_CB(p)->same_flow) continue; iph2 = (struct iphdr *)(p->data + off); /* The above works because, with the exception of the top * (inner most) layer, we only aggregate pkts with the same * hdr length so all the hdrs we'll need to verify will start * at the same offset. */ if ((iph->protocol ^ iph2->protocol) | ((__force u32)iph->saddr ^ (__force u32)iph2->saddr) | ((__force u32)iph->daddr ^ (__force u32)iph2->daddr)) { NAPI_GRO_CB(p)->same_flow = 0; continue; } } NAPI_GRO_CB(skb)->flush |= flush; NAPI_GRO_CB(skb)->network_offsets[NAPI_GRO_CB(skb)->encap_mark] = off; /* Note : No need to call skb_gro_postpull_rcsum() here, * as we already checked checksum over ipv4 header was 0 */ skb_gro_pull(skb, sizeof(*iph)); skb_set_transport_header(skb, skb_gro_offset(skb)); pp = indirect_call_gro_receive(tcp4_gro_receive, udp4_gro_receive, ops->callbacks.gro_receive, head, skb); out: skb_gro_flush_final(skb, pp, flush); return pp; } static struct sk_buff *ipip_gro_receive(struct list_head *head, struct sk_buff *skb) { if (NAPI_GRO_CB(skb)->encap_mark) { NAPI_GRO_CB(skb)->flush = 1; return NULL; } NAPI_GRO_CB(skb)->encap_mark = 1; return inet_gro_receive(head, skb); } #define SECONDS_PER_DAY 86400 /* inet_current_timestamp - Return IP network timestamp * * Return milliseconds since midnight in network byte order. */ __be32 inet_current_timestamp(void) { u32 secs; u32 msecs; struct timespec64 ts; ktime_get_real_ts64(&ts); /* Get secs since midnight. */ (void)div_u64_rem(ts.tv_sec, SECONDS_PER_DAY, &secs); /* Convert to msecs. */ msecs = secs * MSEC_PER_SEC; /* Convert nsec to msec. */ msecs += (u32)ts.tv_nsec / NSEC_PER_MSEC; /* Convert to network byte order. */ return htonl(msecs); } EXPORT_SYMBOL(inet_current_timestamp); int inet_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len) { unsigned int family = READ_ONCE(sk->sk_family); if (family == AF_INET) return ip_recv_error(sk, msg, len, addr_len); #if IS_ENABLED(CONFIG_IPV6) if (family == AF_INET6) return pingv6_ops.ipv6_recv_error(sk, msg, len, addr_len); #endif return -EINVAL; } EXPORT_SYMBOL(inet_recv_error); int inet_gro_complete(struct sk_buff *skb, int nhoff) { struct iphdr *iph = (struct iphdr *)(skb->data + nhoff); const struct net_offload *ops; __be16 totlen = iph->tot_len; int proto = iph->protocol; int err = -ENOSYS; if (skb->encapsulation) { skb_set_inner_protocol(skb, cpu_to_be16(ETH_P_IP)); skb_set_inner_network_header(skb, nhoff); } iph_set_totlen(iph, skb->len - nhoff); csum_replace2(&iph->check, totlen, iph->tot_len); ops = rcu_dereference(inet_offloads[proto]); if (WARN_ON(!ops || !ops->callbacks.gro_complete)) goto out; /* Only need to add sizeof(*iph) to get to the next hdr below * because any hdr with option will have been flushed in * inet_gro_receive(). */ err = INDIRECT_CALL_2(ops->callbacks.gro_complete, tcp4_gro_complete, udp4_gro_complete, skb, nhoff + sizeof(*iph)); out: return err; } static int ipip_gro_complete(struct sk_buff *skb, int nhoff) { skb->encapsulation = 1; skb_shinfo(skb)->gso_type |= SKB_GSO_IPXIP4; return inet_gro_complete(skb, nhoff); } int inet_ctl_sock_create(struct sock **sk, unsigned short family, unsigned short type, unsigned char protocol, struct net *net) { struct socket *sock; int rc = sock_create_kern(net, family, type, protocol, &sock); if (rc == 0) { *sk = sock->sk; (*sk)->sk_allocation = GFP_ATOMIC; (*sk)->sk_use_task_frag = false; /* * Unhash it so that IP input processing does not even see it, * we do not wish this socket to see incoming packets. */ (*sk)->sk_prot->unhash(*sk); } return rc; } EXPORT_SYMBOL_GPL(inet_ctl_sock_create); unsigned long snmp_fold_field(void __percpu *mib, int offt) { unsigned long res = 0; int i; for_each_possible_cpu(i) res += snmp_get_cpu_field(mib, i, offt); return res; } EXPORT_SYMBOL_GPL(snmp_fold_field); #if BITS_PER_LONG==32 u64 snmp_get_cpu_field64(void __percpu *mib, int cpu, int offt, size_t syncp_offset) { void *bhptr; struct u64_stats_sync *syncp; u64 v; unsigned int start; bhptr = per_cpu_ptr(mib, cpu); syncp = (struct u64_stats_sync *)(bhptr + syncp_offset); do { start = u64_stats_fetch_begin(syncp); v = *(((u64 *)bhptr) + offt); } while (u64_stats_fetch_retry(syncp, start)); return v; } EXPORT_SYMBOL_GPL(snmp_get_cpu_field64); u64 snmp_fold_field64(void __percpu *mib, int offt, size_t syncp_offset) { u64 res = 0; int cpu; for_each_possible_cpu(cpu) { res += snmp_get_cpu_field64(mib, cpu, offt, syncp_offset); } return res; } EXPORT_SYMBOL_GPL(snmp_fold_field64); #endif #ifdef CONFIG_IP_MULTICAST static const struct net_protocol igmp_protocol = { .handler = igmp_rcv, }; #endif static const struct net_protocol icmp_protocol = { .handler = icmp_rcv, .err_handler = icmp_err, .no_policy = 1, }; static __net_init int ipv4_mib_init_net(struct net *net) { int i; net->mib.tcp_statistics = alloc_percpu(struct tcp_mib); if (!net->mib.tcp_statistics) goto err_tcp_mib; net->mib.ip_statistics = alloc_percpu(struct ipstats_mib); if (!net->mib.ip_statistics) goto err_ip_mib; for_each_possible_cpu(i) { struct ipstats_mib *af_inet_stats; af_inet_stats = per_cpu_ptr(net->mib.ip_statistics, i); u64_stats_init(&af_inet_stats->syncp); } net->mib.net_statistics = alloc_percpu(struct linux_mib); if (!net->mib.net_statistics) goto err_net_mib; net->mib.udp_statistics = alloc_percpu(struct udp_mib); if (!net->mib.udp_statistics) goto err_udp_mib; net->mib.udplite_statistics = alloc_percpu(struct udp_mib); if (!net->mib.udplite_statistics) goto err_udplite_mib; net->mib.icmp_statistics = alloc_percpu(struct icmp_mib); if (!net->mib.icmp_statistics) goto err_icmp_mib; net->mib.icmpmsg_statistics = kzalloc(sizeof(struct icmpmsg_mib), GFP_KERNEL); if (!net->mib.icmpmsg_statistics) goto err_icmpmsg_mib; tcp_mib_init(net); return 0; err_icmpmsg_mib: free_percpu(net->mib.icmp_statistics); err_icmp_mib: free_percpu(net->mib.udplite_statistics); err_udplite_mib: free_percpu(net->mib.udp_statistics); err_udp_mib: free_percpu(net->mib.net_statistics); err_net_mib: free_percpu(net->mib.ip_statistics); err_ip_mib: free_percpu(net->mib.tcp_statistics); err_tcp_mib: return -ENOMEM; } static __net_exit void ipv4_mib_exit_net(struct net *net) { kfree(net->mib.icmpmsg_statistics); free_percpu(net->mib.icmp_statistics); free_percpu(net->mib.udplite_statistics); free_percpu(net->mib.udp_statistics); free_percpu(net->mib.net_statistics); free_percpu(net->mib.ip_statistics); free_percpu(net->mib.tcp_statistics); #ifdef CONFIG_MPTCP /* allocated on demand, see mptcp_init_sock() */ free_percpu(net->mib.mptcp_statistics); #endif } static __net_initdata struct pernet_operations ipv4_mib_ops = { .init = ipv4_mib_init_net, .exit = ipv4_mib_exit_net, }; static int __init init_ipv4_mibs(void) { return register_pernet_subsys(&ipv4_mib_ops); } static __net_init int inet_init_net(struct net *net) { /* * Set defaults for local port range */ net->ipv4.ip_local_ports.range = 60999u << 16 | 32768u; seqlock_init(&net->ipv4.ping_group_range.lock); /* * Sane defaults - nobody may create ping sockets. * Boot scripts should set this to distro-specific group. */ net->ipv4.ping_group_range.range[0] = make_kgid(&init_user_ns, 1); net->ipv4.ping_group_range.range[1] = make_kgid(&init_user_ns, 0); /* Default values for sysctl-controlled parameters. * We set them here, in case sysctl is not compiled. */ net->ipv4.sysctl_ip_default_ttl = IPDEFTTL; net->ipv4.sysctl_ip_fwd_update_priority = 1; net->ipv4.sysctl_ip_dynaddr = 0; net->ipv4.sysctl_ip_early_demux = 1; net->ipv4.sysctl_udp_early_demux = 1; net->ipv4.sysctl_tcp_early_demux = 1; net->ipv4.sysctl_nexthop_compat_mode = 1; #ifdef CONFIG_SYSCTL net->ipv4.sysctl_ip_prot_sock = PROT_SOCK; #endif /* Some igmp sysctl, whose values are always used */ net->ipv4.sysctl_igmp_max_memberships = 20; net->ipv4.sysctl_igmp_max_msf = 10; /* IGMP reports for link-local multicast groups are enabled by default */ net->ipv4.sysctl_igmp_llm_reports = 1; net->ipv4.sysctl_igmp_qrv = 2; net->ipv4.sysctl_fib_notify_on_flag_change = 0; return 0; } static __net_initdata struct pernet_operations af_inet_ops = { .init = inet_init_net, }; static int __init init_inet_pernet_ops(void) { return register_pernet_subsys(&af_inet_ops); } static int ipv4_proc_init(void); /* * IP protocol layer initialiser */ static const struct net_offload ipip_offload = { .callbacks = { .gso_segment = ipip_gso_segment, .gro_receive = ipip_gro_receive, .gro_complete = ipip_gro_complete, }, }; static int __init ipip_offload_init(void) { return inet_add_offload(&ipip_offload, IPPROTO_IPIP); } static int __init ipv4_offload_init(void) { /* * Add offloads */ if (udpv4_offload_init() < 0) pr_crit("%s: Cannot add UDP protocol offload\n", __func__); if (tcpv4_offload_init() < 0) pr_crit("%s: Cannot add TCP protocol offload\n", __func__); if (ipip_offload_init() < 0) pr_crit("%s: Cannot add IPIP protocol offload\n", __func__); net_hotdata.ip_packet_offload = (struct packet_offload) { .type = cpu_to_be16(ETH_P_IP), .callbacks = { .gso_segment = inet_gso_segment, .gro_receive = inet_gro_receive, .gro_complete = inet_gro_complete, }, }; dev_add_offload(&net_hotdata.ip_packet_offload); return 0; } fs_initcall(ipv4_offload_init); static struct packet_type ip_packet_type __read_mostly = { .type = cpu_to_be16(ETH_P_IP), .func = ip_rcv, .list_func = ip_list_rcv, }; static int __init inet_init(void) { struct inet_protosw *q; struct list_head *r; int rc; sock_skb_cb_check_size(sizeof(struct inet_skb_parm)); raw_hashinfo_init(&raw_v4_hashinfo); rc = proto_register(&tcp_prot, 1); if (rc) goto out; rc = proto_register(&udp_prot, 1); if (rc) goto out_unregister_tcp_proto; rc = proto_register(&raw_prot, 1); if (rc) goto out_unregister_udp_proto; rc = proto_register(&ping_prot, 1); if (rc) goto out_unregister_raw_proto; /* * Tell SOCKET that we are alive... */ (void)sock_register(&inet_family_ops); #ifdef CONFIG_SYSCTL ip_static_sysctl_init(); #endif /* * Add all the base protocols. */ if (inet_add_protocol(&icmp_protocol, IPPROTO_ICMP) < 0) pr_crit("%s: Cannot add ICMP protocol\n", __func__); net_hotdata.udp_protocol = (struct net_protocol) { .handler = udp_rcv, .err_handler = udp_err, .no_policy = 1, }; if (inet_add_protocol(&net_hotdata.udp_protocol, IPPROTO_UDP) < 0) pr_crit("%s: Cannot add UDP protocol\n", __func__); net_hotdata.tcp_protocol = (struct net_protocol) { .handler = tcp_v4_rcv, .err_handler = tcp_v4_err, .no_policy = 1, .icmp_strict_tag_validation = 1, }; if (inet_add_protocol(&net_hotdata.tcp_protocol, IPPROTO_TCP) < 0) pr_crit("%s: Cannot add TCP protocol\n", __func__); #ifdef CONFIG_IP_MULTICAST if (inet_add_protocol(&igmp_protocol, IPPROTO_IGMP) < 0) pr_crit("%s: Cannot add IGMP protocol\n", __func__); #endif /* Register the socket-side information for inet_create. */ for (r = &inetsw[0]; r < &inetsw[SOCK_MAX]; ++r) INIT_LIST_HEAD(r); for (q = inetsw_array; q < &inetsw_array[INETSW_ARRAY_LEN]; ++q) inet_register_protosw(q); /* * Set the ARP module up */ arp_init(); /* * Set the IP module up */ ip_init(); /* Initialise per-cpu ipv4 mibs */ if (init_ipv4_mibs()) panic("%s: Cannot init ipv4 mibs\n", __func__); /* Setup TCP slab cache for open requests. */ tcp_init(); /* Setup UDP memory threshold */ udp_init(); /* Add UDP-Lite (RFC 3828) */ udplite4_register(); raw_init(); ping_init(); /* * Set the ICMP layer up */ if (icmp_init() < 0) panic("Failed to create the ICMP control socket.\n"); /* * Initialise the multicast router */ #if defined(CONFIG_IP_MROUTE) if (ip_mr_init()) pr_crit("%s: Cannot init ipv4 mroute\n", __func__); #endif if (init_inet_pernet_ops()) pr_crit("%s: Cannot init ipv4 inet pernet ops\n", __func__); ipv4_proc_init(); ipfrag_init(); dev_add_pack(&ip_packet_type); ip_tunnel_core_init(); rc = 0; out: return rc; out_unregister_raw_proto: proto_unregister(&raw_prot); out_unregister_udp_proto: proto_unregister(&udp_prot); out_unregister_tcp_proto: proto_unregister(&tcp_prot); goto out; } fs_initcall(inet_init); /* ------------------------------------------------------------------------ */ #ifdef CONFIG_PROC_FS static int __init ipv4_proc_init(void) { int rc = 0; if (raw_proc_init()) goto out_raw; if (tcp4_proc_init()) goto out_tcp; if (udp4_proc_init()) goto out_udp; if (ping_proc_init()) goto out_ping; if (ip_misc_proc_init()) goto out_misc; out: return rc; out_misc: ping_proc_exit(); out_ping: udp4_proc_exit(); out_udp: tcp4_proc_exit(); out_tcp: raw_proc_exit(); out_raw: rc = -ENOMEM; goto out; } #else /* CONFIG_PROC_FS */ static int __init ipv4_proc_init(void) { return 0; } #endif /* CONFIG_PROC_FS */ |
| 3984 7 4541 211 79 5438 5451 5458 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 | /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_LIST_BL_H #define _LINUX_LIST_BL_H #include <linux/list.h> #include <linux/bit_spinlock.h> /* * Special version of lists, where head of the list has a lock in the lowest * bit. This is useful for scalable hash tables without increasing memory * footprint overhead. * * For modification operations, the 0 bit of hlist_bl_head->first * pointer must be set. * * With some small modifications, this can easily be adapted to store several * arbitrary bits (not just a single lock bit), if the need arises to store * some fast and compact auxiliary data. */ #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) #define LIST_BL_LOCKMASK 1UL #else #define LIST_BL_LOCKMASK 0UL #endif #ifdef CONFIG_DEBUG_LIST #define LIST_BL_BUG_ON(x) BUG_ON(x) #else #define LIST_BL_BUG_ON(x) #endif struct hlist_bl_head { struct hlist_bl_node *first; }; struct hlist_bl_node { struct hlist_bl_node *next, **pprev; }; #define INIT_HLIST_BL_HEAD(ptr) \ ((ptr)->first = NULL) static inline void INIT_HLIST_BL_NODE(struct hlist_bl_node *h) { h->next = NULL; h->pprev = NULL; } #define hlist_bl_entry(ptr, type, member) container_of(ptr,type,member) static inline bool hlist_bl_unhashed(const struct hlist_bl_node *h) { return !h->pprev; } static inline struct hlist_bl_node *hlist_bl_first(struct hlist_bl_head *h) { return (struct hlist_bl_node *) ((unsigned long)h->first & ~LIST_BL_LOCKMASK); } static inline void hlist_bl_set_first(struct hlist_bl_head *h, struct hlist_bl_node *n) { LIST_BL_BUG_ON((unsigned long)n & LIST_BL_LOCKMASK); LIST_BL_BUG_ON(((unsigned long)h->first & LIST_BL_LOCKMASK) != LIST_BL_LOCKMASK); h->first = (struct hlist_bl_node *)((unsigned long)n | LIST_BL_LOCKMASK); } static inline bool hlist_bl_empty(const struct hlist_bl_head *h) { return !((unsigned long)READ_ONCE(h->first) & ~LIST_BL_LOCKMASK); } static inline void hlist_bl_add_head(struct hlist_bl_node *n, struct hlist_bl_head *h) { struct hlist_bl_node *first = hlist_bl_first(h); n->next = first; if (first) first->pprev = &n->next; n->pprev = &h->first; hlist_bl_set_first(h, n); } static inline void hlist_bl_add_before(struct hlist_bl_node *n, struct hlist_bl_node *next) { struct hlist_bl_node **pprev = next->pprev; n->pprev = pprev; n->next = next; next->pprev = &n->next; /* pprev may be `first`, so be careful not to lose the lock bit */ WRITE_ONCE(*pprev, (struct hlist_bl_node *) ((uintptr_t)n | ((uintptr_t)*pprev & LIST_BL_LOCKMASK))); } static inline void hlist_bl_add_behind(struct hlist_bl_node *n, struct hlist_bl_node *prev) { n->next = prev->next; n->pprev = &prev->next; prev->next = n; if (n->next) n->next->pprev = &n->next; } static inline void __hlist_bl_del(struct hlist_bl_node *n) { struct hlist_bl_node *next = n->next; struct hlist_bl_node **pprev = n->pprev; LIST_BL_BUG_ON((unsigned long)n & LIST_BL_LOCKMASK); /* pprev may be `first`, so be careful not to lose the lock bit */ WRITE_ONCE(*pprev, (struct hlist_bl_node *) ((unsigned long)next | ((unsigned long)*pprev & LIST_BL_LOCKMASK))); if (next) next->pprev = pprev; } static inline void hlist_bl_del(struct hlist_bl_node *n) { __hlist_bl_del(n); n->next = LIST_POISON1; n->pprev = LIST_POISON2; } static inline void hlist_bl_del_init(struct hlist_bl_node *n) { if (!hlist_bl_unhashed(n)) { __hlist_bl_del(n); INIT_HLIST_BL_NODE(n); } } static inline void hlist_bl_lock(struct hlist_bl_head *b) { bit_spin_lock(0, (unsigned long *)b); } static inline void hlist_bl_unlock(struct hlist_bl_head *b) { __bit_spin_unlock(0, (unsigned long *)b); } static inline bool hlist_bl_is_locked(struct hlist_bl_head *b) { return bit_spin_is_locked(0, (unsigned long *)b); } /** * hlist_bl_for_each_entry - iterate over list of given type * @tpos: the type * to use as a loop cursor. * @pos: the &struct hlist_node to use as a loop cursor. * @head: the head for your list. * @member: the name of the hlist_node within the struct. * */ #define hlist_bl_for_each_entry(tpos, pos, head, member) \ for (pos = hlist_bl_first(head); \ pos && \ ({ tpos = hlist_bl_entry(pos, typeof(*tpos), member); 1;}); \ pos = pos->next) /** * hlist_bl_for_each_entry_safe - iterate over list of given type safe against removal of list entry * @tpos: the type * to use as a loop cursor. * @pos: the &struct hlist_node to use as a loop cursor. * @n: another &struct hlist_node to use as temporary storage * @head: the head for your list. * @member: the name of the hlist_node within the struct. */ #define hlist_bl_for_each_entry_safe(tpos, pos, n, head, member) \ for (pos = hlist_bl_first(head); \ pos && ({ n = pos->next; 1; }) && \ ({ tpos = hlist_bl_entry(pos, typeof(*tpos), member); 1;}); \ pos = n) #endif |
| 30 4 4 4 4 63 4 4 4 4 4 4 3 1 4 4 4 4 4 4 4 9 9 9 2 7 9 9 5 81 24 2 26 60 38 30 30 30 30 29 30 16 1 5 30 8 22 22 8 22 8 22 22 8 30 30 30 30 30 30 9 21 30 38 60 22 22 22 22 22 22 22 22 13 1 12 340 341 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 | // SPDX-License-Identifier: GPL-2.0 /* * Some low level IO code, and hacks for various block layer limitations * * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com> * Copyright 2012 Google, Inc. */ #include "bcachefs.h" #include "alloc_background.h" #include "alloc_foreground.h" #include "async_objs.h" #include "btree_update.h" #include "buckets.h" #include "checksum.h" #include "clock.h" #include "compress.h" #include "data_update.h" #include "disk_groups.h" #include "ec.h" #include "enumerated_ref.h" #include "error.h" #include "io_read.h" #include "io_misc.h" #include "io_write.h" #include "reflink.h" #include "subvolume.h" #include "trace.h" #include <linux/moduleparam.h> #include <linux/random.h> #include <linux/sched/mm.h> #ifdef CONFIG_BCACHEFS_DEBUG static unsigned bch2_read_corrupt_ratio; module_param_named(read_corrupt_ratio, bch2_read_corrupt_ratio, uint, 0644); MODULE_PARM_DESC(read_corrupt_ratio, ""); #endif static bool bch2_poison_extents_on_checksum_error; module_param_named(poison_extents_on_checksum_error, bch2_poison_extents_on_checksum_error, bool, 0644); MODULE_PARM_DESC(poison_extents_on_checksum_error, "Extents with checksum errors are marked as poisoned - unsafe without read fua support"); #ifndef CONFIG_BCACHEFS_NO_LATENCY_ACCT static bool bch2_target_congested(struct bch_fs *c, u16 target) { const struct bch_devs_mask *devs; unsigned d, nr = 0, total = 0; u64 now = local_clock(), last; s64 congested; struct bch_dev *ca; if (!target) return false; guard(rcu)(); devs = bch2_target_to_mask(c, target) ?: &c->rw_devs[BCH_DATA_user]; for_each_set_bit(d, devs->d, BCH_SB_MEMBERS_MAX) { ca = rcu_dereference(c->devs[d]); if (!ca) continue; congested = atomic_read(&ca->congested); last = READ_ONCE(ca->congested_last); if (time_after64(now, last)) congested -= (now - last) >> 12; total += max(congested, 0LL); nr++; } return get_random_u32_below(nr * CONGESTED_MAX) < total; } #else static bool bch2_target_congested(struct bch_fs *c, u16 target) { return false; } #endif /* Cache promotion on read */ static const struct rhashtable_params bch_promote_params = { .head_offset = offsetof(struct promote_op, hash), .key_offset = offsetof(struct promote_op, pos), .key_len = sizeof(struct bpos), .automatic_shrinking = true, }; static inline bool have_io_error(struct bch_io_failures *failed) { return failed && failed->nr; } static inline struct data_update *rbio_data_update(struct bch_read_bio *rbio) { EBUG_ON(rbio->split); return rbio->data_update ? container_of(rbio, struct data_update, rbio) : NULL; } static bool ptr_being_rewritten(struct bch_read_bio *orig, unsigned dev) { struct data_update *u = rbio_data_update(orig); if (!u) return false; struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(bkey_i_to_s_c(u->k.k)); unsigned i = 0; bkey_for_each_ptr(ptrs, ptr) { if (ptr->dev == dev && u->data_opts.rewrite_ptrs & BIT(i)) return true; i++; } return false; } static inline int should_promote(struct bch_fs *c, struct bkey_s_c k, struct bpos pos, struct bch_io_opts opts, unsigned flags, struct bch_io_failures *failed) { if (!have_io_error(failed)) { BUG_ON(!opts.promote_target); if (!(flags & BCH_READ_may_promote)) return bch_err_throw(c, nopromote_may_not); if (bch2_bkey_has_target(c, k, opts.promote_target)) return bch_err_throw(c, nopromote_already_promoted); if (bkey_extent_is_unwritten(k)) return bch_err_throw(c, nopromote_unwritten); if (bch2_target_congested(c, opts.promote_target)) return bch_err_throw(c, nopromote_congested); } if (rhashtable_lookup_fast(&c->promote_table, &pos, bch_promote_params)) return bch_err_throw(c, nopromote_in_flight); return 0; } static noinline void promote_free(struct bch_read_bio *rbio) { struct promote_op *op = container_of(rbio, struct promote_op, write.rbio); struct bch_fs *c = rbio->c; int ret = rhashtable_remove_fast(&c->promote_table, &op->hash, bch_promote_params); BUG_ON(ret); async_object_list_del(c, promote, op->list_idx); async_object_list_del(c, rbio, rbio->list_idx); bch2_data_update_exit(&op->write); enumerated_ref_put(&c->writes, BCH_WRITE_REF_promote); kfree_rcu(op, rcu); } static void promote_done(struct bch_write_op *wop) { struct promote_op *op = container_of(wop, struct promote_op, write.op); struct bch_fs *c = op->write.rbio.c; bch2_time_stats_update(&c->times[BCH_TIME_data_promote], op->start_time); promote_free(&op->write.rbio); } static void promote_start_work(struct work_struct *work) { struct promote_op *op = container_of(work, struct promote_op, work); bch2_data_update_read_done(&op->write); } static noinline void promote_start(struct bch_read_bio *rbio) { struct promote_op *op = container_of(rbio, struct promote_op, write.rbio); trace_and_count(op->write.op.c, io_read_promote, &rbio->bio); INIT_WORK(&op->work, promote_start_work); queue_work(rbio->c->write_ref_wq, &op->work); } static struct bch_read_bio *__promote_alloc(struct btree_trans *trans, enum btree_id btree_id, struct bkey_s_c k, struct bpos pos, struct extent_ptr_decoded *pick, unsigned sectors, struct bch_read_bio *orig, struct bch_io_failures *failed) { struct bch_fs *c = trans->c; int ret; struct data_update_opts update_opts = { .write_flags = BCH_WRITE_alloc_nowait }; if (!have_io_error(failed)) { update_opts.target = orig->opts.promote_target; update_opts.extra_replicas = 1; update_opts.write_flags |= BCH_WRITE_cached; update_opts.write_flags |= BCH_WRITE_only_specified_devs; } else { update_opts.target = orig->opts.foreground_target; struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k); unsigned ptr_bit = 1; bkey_for_each_ptr(ptrs, ptr) { if (bch2_dev_io_failures(failed, ptr->dev) && !ptr_being_rewritten(orig, ptr->dev)) update_opts.rewrite_ptrs |= ptr_bit; ptr_bit <<= 1; } if (!update_opts.rewrite_ptrs) return NULL; } if (!enumerated_ref_tryget(&c->writes, BCH_WRITE_REF_promote)) return ERR_PTR(-BCH_ERR_nopromote_no_writes); struct promote_op *op = kzalloc(sizeof(*op), GFP_KERNEL); if (!op) { ret = bch_err_throw(c, nopromote_enomem); goto err_put; } op->start_time = local_clock(); op->pos = pos; if (rhashtable_lookup_insert_fast(&c->promote_table, &op->hash, bch_promote_params)) { ret = bch_err_throw(c, nopromote_in_flight); goto err; } ret = async_object_list_add(c, promote, op, &op->list_idx); if (ret < 0) goto err_remove_hash; ret = bch2_data_update_init(trans, NULL, NULL, &op->write, writepoint_hashed((unsigned long) current), &orig->opts, update_opts, btree_id, k); op->write.type = BCH_DATA_UPDATE_promote; /* * possible errors: -BCH_ERR_nocow_lock_blocked, * -BCH_ERR_ENOSPC_disk_reservation: */ if (ret) goto err_remove_list; rbio_init_fragment(&op->write.rbio.bio, orig); op->write.rbio.bounce = true; op->write.rbio.promote = true; op->write.op.end_io = promote_done; return &op->write.rbio; err_remove_list: async_object_list_del(c, promote, op->list_idx); err_remove_hash: BUG_ON(rhashtable_remove_fast(&c->promote_table, &op->hash, bch_promote_params)); err: bio_free_pages(&op->write.op.wbio.bio); /* We may have added to the rhashtable and thus need rcu freeing: */ kfree_rcu(op, rcu); err_put: enumerated_ref_put(&c->writes, BCH_WRITE_REF_promote); return ERR_PTR(ret); } noinline static struct bch_read_bio *promote_alloc(struct btree_trans *trans, struct bvec_iter iter, struct bkey_s_c k, struct extent_ptr_decoded *pick, unsigned flags, struct bch_read_bio *orig, bool *bounce, bool *read_full, struct bch_io_failures *failed) { /* * We're in the retry path, but we don't know what to repair yet, and we * don't want to do a promote here: */ if (failed && !failed->nr) return NULL; struct bch_fs *c = trans->c; /* * if failed != NULL we're not actually doing a promote, we're * recovering from an io/checksum error */ bool promote_full = (have_io_error(failed) || *read_full || READ_ONCE(c->opts.promote_whole_extents)); /* data might have to be decompressed in the write path: */ unsigned sectors = promote_full ? max(pick->crc.compressed_size, pick->crc.live_size) : bvec_iter_sectors(iter); struct bpos pos = promote_full ? bkey_start_pos(k.k) : POS(k.k->p.inode, iter.bi_sector); int ret; ret = should_promote(c, k, pos, orig->opts, flags, failed); if (ret) goto nopromote; struct bch_read_bio *promote = __promote_alloc(trans, k.k->type == KEY_TYPE_reflink_v ? BTREE_ID_reflink : BTREE_ID_extents, k, pos, pick, sectors, orig, failed); if (!promote) return NULL; ret = PTR_ERR_OR_ZERO(promote); if (ret) goto nopromote; *bounce = true; *read_full = promote_full; if (have_io_error(failed)) orig->self_healing = true; return promote; nopromote: trace_io_read_nopromote(c, ret); return NULL; } void bch2_promote_op_to_text(struct printbuf *out, struct promote_op *op) { if (!op->write.read_done) { prt_printf(out, "parent read: %px\n", op->write.rbio.parent); printbuf_indent_add(out, 2); bch2_read_bio_to_text(out, op->write.rbio.parent); printbuf_indent_sub(out, 2); } bch2_data_update_to_text(out, &op->write); } /* Read */ static int bch2_read_err_msg_trans(struct btree_trans *trans, struct printbuf *out, struct bch_read_bio *rbio, struct bpos read_pos) { int ret = lockrestart_do(trans, bch2_inum_offset_err_msg_trans(trans, out, (subvol_inum) { rbio->subvol, read_pos.inode }, read_pos.offset << 9)); if (ret) return ret; if (rbio->data_update) prt_str(out, "(internal move) "); return 0; } static void bch2_read_err_msg(struct bch_fs *c, struct printbuf *out, struct bch_read_bio *rbio, struct bpos read_pos) { bch2_trans_run(c, bch2_read_err_msg_trans(trans, out, rbio, read_pos)); } enum rbio_context { RBIO_CONTEXT_NULL, RBIO_CONTEXT_HIGHPRI, RBIO_CONTEXT_UNBOUND, }; static inline struct bch_read_bio * bch2_rbio_parent(struct bch_read_bio *rbio) { return rbio->split ? rbio->parent : rbio; } __always_inline static void bch2_rbio_punt(struct bch_read_bio *rbio, work_func_t fn, enum rbio_context context, struct workqueue_struct *wq) { if (context <= rbio->context) { fn(&rbio->work); } else { rbio->work.func = fn; rbio->context = context; queue_work(wq, &rbio->work); } } static inline struct bch_read_bio *bch2_rbio_free(struct bch_read_bio *rbio) { BUG_ON(rbio->bounce && !rbio->split); if (rbio->have_ioref) { struct bch_dev *ca = bch2_dev_have_ref(rbio->c, rbio->pick.ptr.dev); enumerated_ref_put(&ca->io_ref[READ], BCH_DEV_READ_REF_io_read); } if (rbio->split) { struct bch_read_bio *parent = rbio->parent; if (unlikely(rbio->promote)) { if (!rbio->bio.bi_status) promote_start(rbio); else promote_free(rbio); } else { async_object_list_del(rbio->c, rbio, rbio->list_idx); if (rbio->bounce) bch2_bio_free_pages_pool(rbio->c, &rbio->bio); bio_put(&rbio->bio); } rbio = parent; } return rbio; } /* * Only called on a top level bch_read_bio to complete an entire read request, * not a split: */ static void bch2_rbio_done(struct bch_read_bio *rbio) { if (rbio->start_time) bch2_time_stats_update(&rbio->c->times[BCH_TIME_data_read], rbio->start_time); #ifdef CONFIG_BCACHEFS_ASYNC_OBJECT_LISTS if (rbio->list_idx) async_object_list_del(rbio->c, rbio, rbio->list_idx); #endif bio_endio(&rbio->bio); } static void get_rbio_extent(struct btree_trans *trans, struct bch_read_bio *rbio, struct bkey_buf *sk) { struct btree_iter iter; struct bkey_s_c k; int ret = lockrestart_do(trans, bkey_err(k = bch2_bkey_get_iter(trans, &iter, rbio->data_btree, rbio->data_pos, 0))); if (ret) return; struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k); bkey_for_each_ptr(ptrs, ptr) if (bch2_extent_ptr_eq(*ptr, rbio->pick.ptr)) { bch2_bkey_buf_reassemble(sk, trans->c, k); break; } bch2_trans_iter_exit(trans, &iter); } static noinline int maybe_poison_extent(struct btree_trans *trans, struct bch_read_bio *rbio, enum btree_id btree, struct bkey_s_c read_k) { if (!bch2_poison_extents_on_checksum_error) return 0; struct bch_fs *c = trans->c; struct data_update *u = rbio_data_update(rbio); if (u) read_k = bkey_i_to_s_c(u->k.k); u64 flags = bch2_bkey_extent_flags(read_k); if (flags & BIT_ULL(BCH_EXTENT_FLAG_poisoned)) return 0; struct btree_iter iter; struct bkey_s_c k = bch2_bkey_get_iter(trans, &iter, btree, bkey_start_pos(read_k.k), BTREE_ITER_intent); int ret = bkey_err(k); if (ret) return ret; if (!bkey_and_val_eq(k, read_k)) goto out; struct bkey_i *new = bch2_trans_kmalloc(trans, bkey_bytes(k.k) + sizeof(struct bch_extent_flags)); ret = PTR_ERR_OR_ZERO(new) ?: (bkey_reassemble(new, k), 0) ?: bch2_bkey_extent_flags_set(c, new, flags|BIT_ULL(BCH_EXTENT_FLAG_poisoned)) ?: bch2_trans_update(trans, &iter, new, BTREE_UPDATE_internal_snapshot_node) ?: bch2_trans_commit(trans, NULL, NULL, 0); /* * Propagate key change back to data update path, in particular so it * knows the extent has been poisoned and it's safe to change the * checksum */ if (u && !ret) bch2_bkey_buf_copy(&u->k, c, new); out: bch2_trans_iter_exit(trans, &iter); return ret; } static noinline int bch2_read_retry_nodecode(struct btree_trans *trans, struct bch_read_bio *rbio, struct bvec_iter bvec_iter, struct bch_io_failures *failed, unsigned flags) { struct data_update *u = container_of(rbio, struct data_update, rbio); retry: bch2_trans_begin(trans); struct btree_iter iter; struct bkey_s_c k; int ret = lockrestart_do(trans, bkey_err(k = bch2_bkey_get_iter(trans, &iter, u->btree_id, bkey_start_pos(&u->k.k->k), 0))); if (ret) goto err; if (!bkey_and_val_eq(k, bkey_i_to_s_c(u->k.k))) { /* extent we wanted to read no longer exists: */ rbio->ret = bch_err_throw(trans->c, data_read_key_overwritten); goto err; } ret = __bch2_read_extent(trans, rbio, bvec_iter, bkey_start_pos(&u->k.k->k), u->btree_id, bkey_i_to_s_c(u->k.k), 0, failed, flags, -1); err: bch2_trans_iter_exit(trans, &iter); if (bch2_err_matches(ret, BCH_ERR_transaction_restart) || bch2_err_matches(ret, BCH_ERR_data_read_retry)) goto retry; if (ret) { rbio->bio.bi_status = BLK_STS_IOERR; rbio->ret = ret; } BUG_ON(atomic_read(&rbio->bio.__bi_remaining) != 1); return ret; } static void bch2_rbio_retry(struct work_struct *work) { struct bch_read_bio *rbio = container_of(work, struct bch_read_bio, work); struct bch_fs *c = rbio->c; struct bvec_iter iter = rbio->bvec_iter; unsigned flags = rbio->flags; subvol_inum inum = { .subvol = rbio->subvol, .inum = rbio->read_pos.inode, }; struct bch_io_failures failed = { .nr = 0 }; struct btree_trans *trans = bch2_trans_get(c); struct bkey_buf sk; bch2_bkey_buf_init(&sk); bkey_init(&sk.k->k); trace_io_read_retry(&rbio->bio); this_cpu_add(c->counters[BCH_COUNTER_io_read_retry], bvec_iter_sectors(rbio->bvec_iter)); get_rbio_extent(trans, rbio, &sk); if (!bkey_deleted(&sk.k->k) && bch2_err_matches(rbio->ret, BCH_ERR_data_read_retry_avoid)) bch2_mark_io_failure(&failed, &rbio->pick, rbio->ret == -BCH_ERR_data_read_retry_csum_err); if (!rbio->split) { rbio->bio.bi_status = 0; rbio->ret = 0; } unsigned subvol = rbio->subvol; struct bpos read_pos = rbio->read_pos; rbio = bch2_rbio_free(rbio); flags |= BCH_READ_in_retry; flags &= ~BCH_READ_may_promote; flags &= ~BCH_READ_last_fragment; flags |= BCH_READ_must_clone; int ret = rbio->data_update ? bch2_read_retry_nodecode(trans, rbio, iter, &failed, flags) : __bch2_read(trans, rbio, iter, inum, &failed, &sk, flags); if (ret) { rbio->ret = ret; rbio->bio.bi_status = BLK_STS_IOERR; } if (failed.nr || ret) { struct printbuf buf = PRINTBUF; bch2_log_msg_start(c, &buf); lockrestart_do(trans, bch2_inum_offset_err_msg_trans(trans, &buf, (subvol_inum) { subvol, read_pos.inode }, read_pos.offset << 9)); if (rbio->data_update) prt_str(&buf, "(internal move) "); prt_str(&buf, "data read error, "); if (!ret) { prt_str(&buf, "successful retry"); if (rbio->self_healing) prt_str(&buf, ", self healing"); } else prt_str(&buf, bch2_err_str(ret)); prt_newline(&buf); if (!bkey_deleted(&sk.k->k)) { bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(sk.k)); prt_newline(&buf); } bch2_io_failures_to_text(&buf, c, &failed); bch2_print_str_ratelimited(c, KERN_ERR, buf.buf); printbuf_exit(&buf); } bch2_rbio_done(rbio); bch2_bkey_buf_exit(&sk, c); bch2_trans_put(trans); } static void bch2_rbio_error(struct bch_read_bio *rbio, int ret, blk_status_t blk_error) { BUG_ON(ret >= 0); rbio->ret = ret; rbio->bio.bi_status = blk_error; bch2_rbio_parent(rbio)->saw_error = true; if (rbio->flags & BCH_READ_in_retry) return; if (bch2_err_matches(ret, BCH_ERR_data_read_retry)) { bch2_rbio_punt(rbio, bch2_rbio_retry, RBIO_CONTEXT_UNBOUND, system_unbound_wq); } else { rbio = bch2_rbio_free(rbio); rbio->ret = ret; rbio->bio.bi_status = blk_error; bch2_rbio_done(rbio); } } static int __bch2_rbio_narrow_crcs(struct btree_trans *trans, struct bch_read_bio *rbio) { struct bch_fs *c = rbio->c; u64 data_offset = rbio->data_pos.offset - rbio->pick.crc.offset; struct bch_extent_crc_unpacked new_crc; struct btree_iter iter; struct bkey_i *new; struct bkey_s_c k; int ret = 0; if (crc_is_compressed(rbio->pick.crc)) return 0; k = bch2_bkey_get_iter(trans, &iter, rbio->data_btree, rbio->data_pos, BTREE_ITER_slots|BTREE_ITER_intent); if ((ret = bkey_err(k))) goto out; if (bversion_cmp(k.k->bversion, rbio->version) || !bch2_bkey_matches_ptr(c, k, rbio->pick.ptr, data_offset)) goto out; /* Extent was merged? */ if (bkey_start_offset(k.k) < data_offset || k.k->p.offset > data_offset + rbio->pick.crc.uncompressed_size) goto out; if (bch2_rechecksum_bio(c, &rbio->bio, rbio->version, rbio->pick.crc, NULL, &new_crc, bkey_start_offset(k.k) - data_offset, k.k->size, rbio->pick.crc.csum_type)) { bch_err(c, "error verifying existing checksum while narrowing checksum (memory corruption?)"); ret = 0; goto out; } /* * going to be temporarily appending another checksum entry: */ new = bch2_trans_kmalloc(trans, bkey_bytes(k.k) + sizeof(struct bch_extent_crc128)); if ((ret = PTR_ERR_OR_ZERO(new))) goto out; bkey_reassemble(new, k); if (!bch2_bkey_narrow_crcs(new, new_crc)) goto out; ret = bch2_trans_update(trans, &iter, new, BTREE_UPDATE_internal_snapshot_node); out: bch2_trans_iter_exit(trans, &iter); return ret; } static noinline void bch2_rbio_narrow_crcs(struct bch_read_bio *rbio) { bch2_trans_commit_do(rbio->c, NULL, NULL, BCH_TRANS_COMMIT_no_enospc, __bch2_rbio_narrow_crcs(trans, rbio)); } static void bch2_read_decompress_err(struct work_struct *work) { struct bch_read_bio *rbio = container_of(work, struct bch_read_bio, work); struct bch_fs *c = rbio->c; struct printbuf buf = PRINTBUF; bch2_read_err_msg(c, &buf, rbio, rbio->read_pos); prt_str(&buf, "decompression error"); struct bch_dev *ca = rbio->have_ioref ? bch2_dev_have_ref(c, rbio->pick.ptr.dev) : NULL; if (ca) bch_err_ratelimited(ca, "%s", buf.buf); else bch_err_ratelimited(c, "%s", buf.buf); bch2_rbio_error(rbio, -BCH_ERR_data_read_decompress_err, BLK_STS_IOERR); printbuf_exit(&buf); } static void bch2_read_decrypt_err(struct work_struct *work) { struct bch_read_bio *rbio = container_of(work, struct bch_read_bio, work); struct bch_fs *c = rbio->c; struct printbuf buf = PRINTBUF; bch2_read_err_msg(c, &buf, rbio, rbio->read_pos); prt_str(&buf, "decrypt error"); struct bch_dev *ca = rbio->have_ioref ? bch2_dev_have_ref(c, rbio->pick.ptr.dev) : NULL; if (ca) bch_err_ratelimited(ca, "%s", buf.buf); else bch_err_ratelimited(c, "%s", buf.buf); bch2_rbio_error(rbio, -BCH_ERR_data_read_decrypt_err, BLK_STS_IOERR); printbuf_exit(&buf); } /* Inner part that may run in process context */ static void __bch2_read_endio(struct work_struct *work) { struct bch_read_bio *rbio = container_of(work, struct bch_read_bio, work); struct bch_fs *c = rbio->c; struct bch_dev *ca = rbio->have_ioref ? bch2_dev_have_ref(c, rbio->pick.ptr.dev) : NULL; struct bch_read_bio *parent = bch2_rbio_parent(rbio); struct bio *src = &rbio->bio; struct bio *dst = &parent->bio; struct bvec_iter dst_iter = rbio->bvec_iter; struct bch_extent_crc_unpacked crc = rbio->pick.crc; struct nonce nonce = extent_nonce(rbio->version, crc); unsigned nofs_flags; struct bch_csum csum; int ret; nofs_flags = memalloc_nofs_save(); /* Reset iterator for checksumming and copying bounced data: */ if (rbio->bounce) { src->bi_iter.bi_size = crc.compressed_size << 9; src->bi_iter.bi_idx = 0; src->bi_iter.bi_bvec_done = 0; } else { src->bi_iter = rbio->bvec_iter; } bch2_maybe_corrupt_bio(src, bch2_read_corrupt_ratio); csum = bch2_checksum_bio(c, crc.csum_type, nonce, src); bool csum_good = !bch2_crc_cmp(csum, rbio->pick.crc.csum) || c->opts.no_data_io; /* * Checksum error: if the bio wasn't bounced, we may have been * reading into buffers owned by userspace (that userspace can * scribble over) - retry the read, bouncing it this time: */ if (!csum_good && !rbio->bounce && (rbio->flags & BCH_READ_user_mapped)) { rbio->flags |= BCH_READ_must_bounce; bch2_rbio_error(rbio, -BCH_ERR_data_read_retry_csum_err_maybe_userspace, BLK_STS_IOERR); goto out; } bch2_account_io_completion(ca, BCH_MEMBER_ERROR_checksum, 0, csum_good); if (!csum_good) goto csum_err; /* * XXX * We need to rework the narrow_crcs path to deliver the read completion * first, and then punt to a different workqueue, otherwise we're * holding up reads while doing btree updates which is bad for memory * reclaim. */ if (unlikely(rbio->narrow_crcs)) bch2_rbio_narrow_crcs(rbio); if (likely(!parent->data_update)) { /* Adjust crc to point to subset of data we want: */ crc.offset += rbio->offset_into_extent; crc.live_size = bvec_iter_sectors(rbio->bvec_iter); if (crc_is_compressed(crc)) { ret = bch2_encrypt_bio(c, crc.csum_type, nonce, src); if (ret) goto decrypt_err; if (bch2_bio_uncompress(c, src, dst, dst_iter, crc) && !c->opts.no_data_io) goto decompression_err; } else { /* don't need to decrypt the entire bio: */ nonce = nonce_add(nonce, crc.offset << 9); bio_advance(src, crc.offset << 9); BUG_ON(src->bi_iter.bi_size < dst_iter.bi_size); src->bi_iter.bi_size = dst_iter.bi_size; ret = bch2_encrypt_bio(c, crc.csum_type, nonce, src); if (ret) goto decrypt_err; if (rbio->bounce) { struct bvec_iter src_iter = src->bi_iter; bio_copy_data_iter(dst, &dst_iter, src, &src_iter); } } } else { if (rbio->split) rbio->parent->pick = rbio->pick; if (rbio->bounce) { struct bvec_iter src_iter = src->bi_iter; bio_copy_data_iter(dst, &dst_iter, src, &src_iter); } } if (rbio->promote) { /* * Re encrypt data we decrypted, so it's consistent with * rbio->crc: */ ret = bch2_encrypt_bio(c, crc.csum_type, nonce, src); if (ret) goto decrypt_err; } if (likely(!(rbio->flags & BCH_READ_in_retry))) { rbio = bch2_rbio_free(rbio); bch2_rbio_done(rbio); } out: memalloc_nofs_restore(nofs_flags); return; csum_err: bch2_rbio_error(rbio, -BCH_ERR_data_read_retry_csum_err, BLK_STS_IOERR); goto out; decompression_err: bch2_rbio_punt(rbio, bch2_read_decompress_err, RBIO_CONTEXT_UNBOUND, system_unbound_wq); goto out; decrypt_err: bch2_rbio_punt(rbio, bch2_read_decrypt_err, RBIO_CONTEXT_UNBOUND, system_unbound_wq); goto out; } static void bch2_read_endio(struct bio *bio) { struct bch_read_bio *rbio = container_of(bio, struct bch_read_bio, bio); struct bch_fs *c = rbio->c; struct bch_dev *ca = rbio->have_ioref ? bch2_dev_have_ref(c, rbio->pick.ptr.dev) : NULL; struct workqueue_struct *wq = NULL; enum rbio_context context = RBIO_CONTEXT_NULL; bch2_account_io_completion(ca, BCH_MEMBER_ERROR_read, rbio->submit_time, !bio->bi_status); if (!rbio->split) rbio->bio.bi_end_io = rbio->end_io; if (unlikely(bio->bi_status)) { bch2_rbio_error(rbio, -BCH_ERR_data_read_retry_io_err, bio->bi_status); return; } if (((rbio->flags & BCH_READ_retry_if_stale) && race_fault()) || (ca && dev_ptr_stale(ca, &rbio->pick.ptr))) { trace_and_count(c, io_read_reuse_race, &rbio->bio); if (rbio->flags & BCH_READ_retry_if_stale) bch2_rbio_error(rbio, -BCH_ERR_data_read_ptr_stale_retry, BLK_STS_AGAIN); else bch2_rbio_error(rbio, -BCH_ERR_data_read_ptr_stale_race, BLK_STS_AGAIN); return; } if (rbio->narrow_crcs || rbio->promote || crc_is_compressed(rbio->pick.crc) || bch2_csum_type_is_encryption(rbio->pick.crc.csum_type)) context = RBIO_CONTEXT_UNBOUND, wq = system_unbound_wq; else if (rbio->pick.crc.csum_type) context = RBIO_CONTEXT_HIGHPRI, wq = system_highpri_wq; bch2_rbio_punt(rbio, __bch2_read_endio, context, wq); } static noinline void read_from_stale_dirty_pointer(struct btree_trans *trans, struct bch_dev *ca, struct bkey_s_c k, struct bch_extent_ptr ptr) { struct bch_fs *c = trans->c; struct btree_iter iter; struct printbuf buf = PRINTBUF; int ret; bch2_trans_iter_init(trans, &iter, BTREE_ID_alloc, PTR_BUCKET_POS(ca, &ptr), BTREE_ITER_cached); int gen = bucket_gen_get(ca, iter.pos.offset); if (gen >= 0) { prt_printf(&buf, "Attempting to read from stale dirty pointer:\n"); printbuf_indent_add(&buf, 2); bch2_bkey_val_to_text(&buf, c, k); prt_newline(&buf); prt_printf(&buf, "memory gen: %u", gen); ret = lockrestart_do(trans, bkey_err(k = bch2_btree_iter_peek_slot(trans, &iter))); if (!ret) { prt_newline(&buf); bch2_bkey_val_to_text(&buf, c, k); } } else { prt_printf(&buf, "Attempting to read from invalid bucket %llu:%llu:\n", iter.pos.inode, iter.pos.offset); printbuf_indent_add(&buf, 2); prt_printf(&buf, "first bucket %u nbuckets %llu\n", ca->mi.first_bucket, ca->mi.nbuckets); bch2_bkey_val_to_text(&buf, c, k); prt_newline(&buf); } bch2_fs_inconsistent(c, "%s", buf.buf); bch2_trans_iter_exit(trans, &iter); printbuf_exit(&buf); } int __bch2_read_extent(struct btree_trans *trans, struct bch_read_bio *orig, struct bvec_iter iter, struct bpos read_pos, enum btree_id data_btree, struct bkey_s_c k, unsigned offset_into_extent, struct bch_io_failures *failed, unsigned flags, int dev) { struct bch_fs *c = trans->c; struct extent_ptr_decoded pick; struct bch_read_bio *rbio = NULL; bool bounce = false, read_full = false, narrow_crcs = false; struct bpos data_pos = bkey_start_pos(k.k); struct data_update *u = rbio_data_update(orig); int ret = 0; if (bkey_extent_is_inline_data(k.k)) { unsigned bytes = min_t(unsigned, iter.bi_size, bkey_inline_data_bytes(k.k)); swap(iter.bi_size, bytes); memcpy_to_bio(&orig->bio, iter, bkey_inline_data_p(k)); swap(iter.bi_size, bytes); bio_advance_iter(&orig->bio, &iter, bytes); zero_fill_bio_iter(&orig->bio, iter); this_cpu_add(c->counters[BCH_COUNTER_io_read_inline], bvec_iter_sectors(iter)); goto out_read_done; } if ((bch2_bkey_extent_flags(k) & BIT_ULL(BCH_EXTENT_FLAG_poisoned)) && !orig->data_update) return bch_err_throw(c, extent_poisoned); retry_pick: ret = bch2_bkey_pick_read_device(c, k, failed, &pick, dev); /* hole or reservation - just zero fill: */ if (!ret) goto hole; if (unlikely(ret < 0)) { if (ret == -BCH_ERR_data_read_csum_err) { int ret2 = maybe_poison_extent(trans, orig, data_btree, k); if (ret2) { ret = ret2; goto err; } trace_and_count(c, io_read_fail_and_poison, &orig->bio); } struct printbuf buf = PRINTBUF; bch2_read_err_msg_trans(trans, &buf, orig, read_pos); prt_printf(&buf, "%s\n ", bch2_err_str(ret)); bch2_bkey_val_to_text(&buf, c, k); bch_err_ratelimited(c, "%s", buf.buf); printbuf_exit(&buf); goto err; } if (unlikely(bch2_csum_type_is_encryption(pick.crc.csum_type)) && !c->chacha20_key_set) { struct printbuf buf = PRINTBUF; bch2_read_err_msg_trans(trans, &buf, orig, read_pos); prt_printf(&buf, "attempting to read encrypted data without encryption key\n "); bch2_bkey_val_to_text(&buf, c, k); bch_err_ratelimited(c, "%s", buf.buf); printbuf_exit(&buf); ret = bch_err_throw(c, data_read_no_encryption_key); goto err; } struct bch_dev *ca = bch2_dev_get_ioref(c, pick.ptr.dev, READ, BCH_DEV_READ_REF_io_read); /* * Stale dirty pointers are treated as IO errors, but @failed isn't * allocated unless we're in the retry path - so if we're not in the * retry path, don't check here, it'll be caught in bch2_read_endio() * and we'll end up in the retry path: */ if ((flags & BCH_READ_in_retry) && !pick.ptr.cached && ca && unlikely(dev_ptr_stale(ca, &pick.ptr))) { read_from_stale_dirty_pointer(trans, ca, k, pick.ptr); bch2_mark_io_failure(failed, &pick, false); enumerated_ref_put(&ca->io_ref[READ], BCH_DEV_READ_REF_io_read); goto retry_pick; } if (likely(!u)) { if (!(flags & BCH_READ_last_fragment) || bio_flagged(&orig->bio, BIO_CHAIN)) flags |= BCH_READ_must_clone; narrow_crcs = !(flags & BCH_READ_in_retry) && bch2_can_narrow_extent_crcs(k, pick.crc); if (narrow_crcs && (flags & BCH_READ_user_mapped)) flags |= BCH_READ_must_bounce; EBUG_ON(offset_into_extent + bvec_iter_sectors(iter) > k.k->size); if (crc_is_compressed(pick.crc) || (pick.crc.csum_type != BCH_CSUM_none && (bvec_iter_sectors(iter) != pick.crc.uncompressed_size || (bch2_csum_type_is_encryption(pick.crc.csum_type) && (flags & BCH_READ_user_mapped)) || (flags & BCH_READ_must_bounce)))) { read_full = true; bounce = true; } } else { /* * can happen if we retry, and the extent we were going to read * has been merged in the meantime: */ if (pick.crc.compressed_size > u->op.wbio.bio.bi_iter.bi_size) { if (ca) enumerated_ref_put(&ca->io_ref[READ], BCH_DEV_READ_REF_io_read); rbio->ret = bch_err_throw(c, data_read_buffer_too_small); goto out_read_done; } iter.bi_size = pick.crc.compressed_size << 9; read_full = true; } if (orig->opts.promote_target || have_io_error(failed)) rbio = promote_alloc(trans, iter, k, &pick, flags, orig, &bounce, &read_full, failed); if (!read_full) { EBUG_ON(crc_is_compressed(pick.crc)); EBUG_ON(pick.crc.csum_type && (bvec_iter_sectors(iter) != pick.crc.uncompressed_size || bvec_iter_sectors(iter) != pick.crc.live_size || pick.crc.offset || offset_into_extent)); data_pos.offset += offset_into_extent; pick.ptr.offset += pick.crc.offset + offset_into_extent; offset_into_extent = 0; pick.crc.compressed_size = bvec_iter_sectors(iter); pick.crc.uncompressed_size = bvec_iter_sectors(iter); pick.crc.offset = 0; pick.crc.live_size = bvec_iter_sectors(iter); } if (rbio) { /* * promote already allocated bounce rbio: * promote needs to allocate a bio big enough for uncompressing * data in the write path, but we're not going to use it all * here: */ EBUG_ON(rbio->bio.bi_iter.bi_size < pick.crc.compressed_size << 9); rbio->bio.bi_iter.bi_size = pick.crc.compressed_size << 9; } else if (bounce) { unsigned sectors = pick.crc.compressed_size; rbio = rbio_init_fragment(bio_alloc_bioset(NULL, DIV_ROUND_UP(sectors, PAGE_SECTORS), 0, GFP_NOFS, &c->bio_read_split), orig); bch2_bio_alloc_pages_pool(c, &rbio->bio, sectors << 9); rbio->bounce = true; } else if (flags & BCH_READ_must_clone) { /* * Have to clone if there were any splits, due to error * reporting issues (if a split errored, and retrying didn't * work, when it reports the error to its parent (us) we don't * know if the error was from our bio, and we should retry, or * from the whole bio, in which case we don't want to retry and * lose the error) */ rbio = rbio_init_fragment(bio_alloc_clone(NULL, &orig->bio, GFP_NOFS, &c->bio_read_split), orig); rbio->bio.bi_iter = iter; } else { rbio = orig; rbio->bio.bi_iter = iter; EBUG_ON(bio_flagged(&rbio->bio, BIO_CHAIN)); } EBUG_ON(bio_sectors(&rbio->bio) != pick.crc.compressed_size); rbio->submit_time = local_clock(); if (!rbio->split) rbio->end_io = orig->bio.bi_end_io; rbio->bvec_iter = iter; rbio->offset_into_extent= offset_into_extent; rbio->flags = flags; rbio->have_ioref = ca != NULL; rbio->narrow_crcs = narrow_crcs; rbio->ret = 0; rbio->context = 0; rbio->pick = pick; rbio->subvol = orig->subvol; rbio->read_pos = read_pos; rbio->data_btree = data_btree; rbio->data_pos = data_pos; rbio->version = k.k->bversion; INIT_WORK(&rbio->work, NULL); rbio->bio.bi_opf = orig->bio.bi_opf; rbio->bio.bi_iter.bi_sector = pick.ptr.offset; rbio->bio.bi_end_io = bch2_read_endio; async_object_list_add(c, rbio, rbio, &rbio->list_idx); if (rbio->bounce) trace_and_count(c, io_read_bounce, &rbio->bio); if (!u) this_cpu_add(c->counters[BCH_COUNTER_io_read], bio_sectors(&rbio->bio)); else this_cpu_add(c->counters[BCH_COUNTER_io_move_read], bio_sectors(&rbio->bio)); bch2_increment_clock(c, bio_sectors(&rbio->bio), READ); /* * If it's being moved internally, we don't want to flag it as a cache * hit: */ if (ca && pick.ptr.cached && !u) bch2_bucket_io_time_reset(trans, pick.ptr.dev, PTR_BUCKET_NR(ca, &pick.ptr), READ); if (!(flags & (BCH_READ_in_retry|BCH_READ_last_fragment))) { bio_inc_remaining(&orig->bio); trace_and_count(c, io_read_split, &orig->bio); } /* * Unlock the iterator while the btree node's lock is still in * cache, before doing the IO: */ if (!(flags & BCH_READ_in_retry)) bch2_trans_unlock(trans); else bch2_trans_unlock_long(trans); if (likely(!rbio->pick.do_ec_reconstruct)) { if (unlikely(!rbio->have_ioref)) { bch2_rbio_error(rbio, -BCH_ERR_data_read_retry_device_offline, BLK_STS_IOERR); goto out; } this_cpu_add(ca->io_done->sectors[READ][BCH_DATA_user], bio_sectors(&rbio->bio)); bio_set_dev(&rbio->bio, ca->disk_sb.bdev); if (unlikely(c->opts.no_data_io)) { if (likely(!(flags & BCH_READ_in_retry))) bio_endio(&rbio->bio); } else { if (likely(!(flags & BCH_READ_in_retry))) submit_bio(&rbio->bio); else submit_bio_wait(&rbio->bio); } /* * We just submitted IO which may block, we expect relock fail * events and shouldn't count them: */ trans->notrace_relock_fail = true; } else { /* Attempting reconstruct read: */ if (bch2_ec_read_extent(trans, rbio, k)) { bch2_rbio_error(rbio, -BCH_ERR_data_read_retry_ec_reconstruct_err, BLK_STS_IOERR); goto out; } if (likely(!(flags & BCH_READ_in_retry))) bio_endio(&rbio->bio); } out: if (likely(!(flags & BCH_READ_in_retry))) { return 0; } else { bch2_trans_unlock(trans); int ret; rbio->context = RBIO_CONTEXT_UNBOUND; bch2_read_endio(&rbio->bio); ret = rbio->ret; rbio = bch2_rbio_free(rbio); if (bch2_err_matches(ret, BCH_ERR_data_read_retry_avoid)) bch2_mark_io_failure(failed, &pick, ret == -BCH_ERR_data_read_retry_csum_err); return ret; } err: if (flags & BCH_READ_in_retry) return ret; orig->bio.bi_status = BLK_STS_IOERR; orig->ret = ret; goto out_read_done; hole: this_cpu_add(c->counters[BCH_COUNTER_io_read_hole], bvec_iter_sectors(iter)); /* * won't normally happen in the data update (bch2_move_extent()) path, * but if we retry and the extent we wanted to read no longer exists we * have to signal that: */ if (u) orig->ret = bch_err_throw(c, data_read_key_overwritten); zero_fill_bio_iter(&orig->bio, iter); out_read_done: if ((flags & BCH_READ_last_fragment) && !(flags & BCH_READ_in_retry)) bch2_rbio_done(orig); return 0; } int __bch2_read(struct btree_trans *trans, struct bch_read_bio *rbio, struct bvec_iter bvec_iter, subvol_inum inum, struct bch_io_failures *failed, struct bkey_buf *prev_read, unsigned flags) { struct bch_fs *c = trans->c; struct btree_iter iter; struct bkey_buf sk; struct bkey_s_c k; enum btree_id data_btree; int ret; EBUG_ON(rbio->data_update); bch2_bkey_buf_init(&sk); bch2_trans_iter_init(trans, &iter, BTREE_ID_extents, POS(inum.inum, bvec_iter.bi_sector), BTREE_ITER_slots); while (1) { data_btree = BTREE_ID_extents; bch2_trans_begin(trans); u32 snapshot; ret = bch2_subvolume_get_snapshot(trans, inum.subvol, &snapshot); if (ret) goto err; bch2_btree_iter_set_snapshot(trans, &iter, snapshot); bch2_btree_iter_set_pos(trans, &iter, POS(inum.inum, bvec_iter.bi_sector)); k = bch2_btree_iter_peek_slot(trans, &iter); ret = bkey_err(k); if (ret) goto err; s64 offset_into_extent = iter.pos.offset - bkey_start_offset(k.k); unsigned sectors = k.k->size - offset_into_extent; bch2_bkey_buf_reassemble(&sk, c, k); ret = bch2_read_indirect_extent(trans, &data_btree, &offset_into_extent, &sk); if (ret) goto err; k = bkey_i_to_s_c(sk.k); if (unlikely(flags & BCH_READ_in_retry)) { if (!bkey_and_val_eq(k, bkey_i_to_s_c(prev_read->k))) failed->nr = 0; bch2_bkey_buf_copy(prev_read, c, sk.k); } /* * With indirect extents, the amount of data to read is the min * of the original extent and the indirect extent: */ sectors = min_t(unsigned, sectors, k.k->size - offset_into_extent); unsigned bytes = min(sectors, bvec_iter_sectors(bvec_iter)) << 9; swap(bvec_iter.bi_size, bytes); if (bvec_iter.bi_size == bytes) flags |= BCH_READ_last_fragment; ret = __bch2_read_extent(trans, rbio, bvec_iter, iter.pos, data_btree, k, offset_into_extent, failed, flags, -1); swap(bvec_iter.bi_size, bytes); if (ret) goto err; if (flags & BCH_READ_last_fragment) break; bio_advance_iter(&rbio->bio, &bvec_iter, bytes); err: if (ret == -BCH_ERR_data_read_retry_csum_err_maybe_userspace) flags |= BCH_READ_must_bounce; if (ret && !bch2_err_matches(ret, BCH_ERR_transaction_restart) && !bch2_err_matches(ret, BCH_ERR_data_read_retry)) break; } if (unlikely(ret)) { if (ret != -BCH_ERR_extent_poisoned) { struct printbuf buf = PRINTBUF; lockrestart_do(trans, bch2_inum_offset_err_msg_trans(trans, &buf, inum, bvec_iter.bi_sector << 9)); prt_printf(&buf, "data read error: %s", bch2_err_str(ret)); bch_err_ratelimited(c, "%s", buf.buf); printbuf_exit(&buf); } rbio->bio.bi_status = BLK_STS_IOERR; rbio->ret = ret; if (!(flags & BCH_READ_in_retry)) bch2_rbio_done(rbio); } bch2_trans_iter_exit(trans, &iter); bch2_bkey_buf_exit(&sk, c); return ret; } static const char * const bch2_read_bio_flags[] = { #define x(n) #n, BCH_READ_FLAGS() #undef x NULL }; void bch2_read_bio_to_text(struct printbuf *out, struct bch_read_bio *rbio) { u64 now = local_clock(); prt_printf(out, "start_time:\t%llu\n", rbio->start_time ? now - rbio->start_time : 0); prt_printf(out, "submit_time:\t%llu\n", rbio->submit_time ? now - rbio->submit_time : 0); if (!rbio->split) prt_printf(out, "end_io:\t%ps\n", rbio->end_io); else prt_printf(out, "parent:\t |