Total coverage: 269070 (18%)of 1563940
17 17 108 108 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 /* * Copyright (c) 2006-2008 Intel Corporation * Copyright (c) 2007 Dave Airlie <airlied@linux.ie> * * DRM core CRTC related functions * * Permission to use, copy, modify, distribute, and sell this software and its * documentation for any purpose is hereby granted without fee, provided that * the above copyright notice appear in all copies and that both that copyright * notice and this permission notice appear in supporting documentation, and * that the name of the copyright holders not be used in advertising or * publicity pertaining to distribution of the software without specific, * written prior permission. The copyright holders make no representations * about the suitability of this software for any purpose. It is provided "as * is" without express or implied warranty. * * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE * OF THIS SOFTWARE. * * Authors: * Keith Packard * Eric Anholt <eric@anholt.net> * Dave Airlie <airlied@linux.ie> * Jesse Barnes <jesse.barnes@intel.com> */ #include <linux/export.h> #include <linux/kernel.h> #include <linux/moduleparam.h> #include <linux/dynamic_debug.h> #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_atomic_uapi.h> #include <drm/drm_bridge.h> #include <drm/drm_crtc.h> #include <drm/drm_crtc_helper.h> #include <drm/drm_drv.h> #include <drm/drm_edid.h> #include <drm/drm_encoder.h> #include <drm/drm_fourcc.h> #include <drm/drm_framebuffer.h> #include <drm/drm_print.h> #include <drm/drm_vblank.h> #include "drm_crtc_helper_internal.h" DECLARE_DYNDBG_CLASSMAP(drm_debug_classes, DD_CLASS_TYPE_DISJOINT_BITS, 0, "DRM_UT_CORE", "DRM_UT_DRIVER", "DRM_UT_KMS", "DRM_UT_PRIME", "DRM_UT_ATOMIC", "DRM_UT_VBL", "DRM_UT_STATE", "DRM_UT_LEASE", "DRM_UT_DP", "DRM_UT_DRMRES"); /** * DOC: overview * * The CRTC modeset helper library provides a default set_config implementation * in drm_crtc_helper_set_config(). Plus a few other convenience functions using * the same callbacks which drivers can use to e.g. restore the modeset * configuration on resume with drm_helper_resume_force_mode(). * * Note that this helper library doesn't track the current power state of CRTCs * and encoders. It can call callbacks like &drm_encoder_helper_funcs.dpms even * though the hardware is already in the desired state. This deficiency has been * fixed in the atomic helpers. * * The driver callbacks are mostly compatible with the atomic modeset helpers, * except for the handling of the primary plane: Atomic helpers require that the * primary plane is implemented as a real standalone plane and not directly tied * to the CRTC state. For easier transition this library provides functions to * implement the old semantics required by the CRTC helpers using the new plane * and atomic helper callbacks. * * Drivers are strongly urged to convert to the atomic helpers (by way of first * converting to the plane helpers). New drivers must not use these functions * but need to implement the atomic interface instead, potentially using the * atomic helpers for that. * * These legacy modeset helpers use the same function table structures as * all other modesetting helpers. See the documentation for struct * &drm_crtc_helper_funcs, &struct drm_encoder_helper_funcs and struct * &drm_connector_helper_funcs. */ /** * drm_helper_encoder_in_use - check if a given encoder is in use * @encoder: encoder to check * * Checks whether @encoder is with the current mode setting output configuration * in use by any connector. This doesn't mean that it is actually enabled since * the DPMS state is tracked separately. * * Returns: * True if @encoder is used, false otherwise. */ bool drm_helper_encoder_in_use(struct drm_encoder *encoder) { struct drm_connector *connector; struct drm_connector_list_iter conn_iter; struct drm_device *dev = encoder->dev; drm_WARN_ON(dev, drm_drv_uses_atomic_modeset(dev)); /* * We can expect this mutex to be locked if we are not panicking. * Locking is currently fubar in the panic handler. */ if (!oops_in_progress) { drm_WARN_ON(dev, !mutex_is_locked(&dev->mode_config.mutex)); drm_WARN_ON(dev, !drm_modeset_is_locked(&dev->mode_config.connection_mutex)); } drm_connector_list_iter_begin(dev, &conn_iter); drm_for_each_connector_iter(connector, &conn_iter) { if (connector->encoder == encoder) { drm_connector_list_iter_end(&conn_iter); return true; } } drm_connector_list_iter_end(&conn_iter); return false; } EXPORT_SYMBOL(drm_helper_encoder_in_use); /** * drm_helper_crtc_in_use - check if a given CRTC is in a mode_config * @crtc: CRTC to check * * Checks whether @crtc is with the current mode setting output configuration * in use by any connector. This doesn't mean that it is actually enabled since * the DPMS state is tracked separately. * * Returns: * True if @crtc is used, false otherwise. */ bool drm_helper_crtc_in_use(struct drm_crtc *crtc) { struct drm_encoder *encoder; struct drm_device *dev = crtc->dev; drm_WARN_ON(dev, drm_drv_uses_atomic_modeset(dev)); /* * We can expect this mutex to be locked if we are not panicking. * Locking is currently fubar in the panic handler. */ if (!oops_in_progress) drm_WARN_ON(dev, !mutex_is_locked(&dev->mode_config.mutex)); drm_for_each_encoder(encoder, dev) if (encoder->crtc == crtc && drm_helper_encoder_in_use(encoder)) return true; return false; } EXPORT_SYMBOL(drm_helper_crtc_in_use); static void drm_encoder_disable(struct drm_encoder *encoder) { const struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private; if (!encoder_funcs) return; if (encoder_funcs->disable) (*encoder_funcs->disable)(encoder); else if (encoder_funcs->dpms) (*encoder_funcs->dpms)(encoder, DRM_MODE_DPMS_OFF); } static void __drm_helper_disable_unused_functions(struct drm_device *dev) { struct drm_encoder *encoder; struct drm_crtc *crtc; drm_warn_on_modeset_not_all_locked(dev); drm_for_each_encoder(encoder, dev) { if (!drm_helper_encoder_in_use(encoder)) { drm_encoder_disable(encoder); /* disconnect encoder from any connector */ encoder->crtc = NULL; } } drm_for_each_crtc(crtc, dev) { const struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; crtc->enabled = drm_helper_crtc_in_use(crtc); if (!crtc->enabled) { if (crtc_funcs->disable) (*crtc_funcs->disable)(crtc); else (*crtc_funcs->dpms)(crtc, DRM_MODE_DPMS_OFF); crtc->primary->fb = NULL; } } } /** * drm_helper_disable_unused_functions - disable unused objects * @dev: DRM device * * This function walks through the entire mode setting configuration of @dev. It * will remove any CRTC links of unused encoders and encoder links of * disconnected connectors. Then it will disable all unused encoders and CRTCs * either by calling their disable callback if available or by calling their * dpms callback with DRM_MODE_DPMS_OFF. * * NOTE: * * This function is part of the legacy modeset helper library and will cause * major confusion with atomic drivers. This is because atomic helpers guarantee * to never call ->disable() hooks on a disabled function, or ->enable() hooks * on an enabled functions. drm_helper_disable_unused_functions() on the other * hand throws such guarantees into the wind and calls disable hooks * unconditionally on unused functions. */ void drm_helper_disable_unused_functions(struct drm_device *dev) { drm_WARN_ON(dev, drm_drv_uses_atomic_modeset(dev)); drm_modeset_lock_all(dev); __drm_helper_disable_unused_functions(dev); drm_modeset_unlock_all(dev); } EXPORT_SYMBOL(drm_helper_disable_unused_functions); /* * Check the CRTC we're going to map each output to vs. its current * CRTC. If they don't match, we have to disable the output and the CRTC * since the driver will have to re-route things. */ static void drm_crtc_prepare_encoders(struct drm_device *dev) { const struct drm_encoder_helper_funcs *encoder_funcs; struct drm_encoder *encoder; drm_for_each_encoder(encoder, dev) { encoder_funcs = encoder->helper_private; if (!encoder_funcs) continue; /* Disable unused encoders */ if (encoder->crtc == NULL) drm_encoder_disable(encoder); } } /** * drm_crtc_helper_set_mode - internal helper to set a mode * @crtc: CRTC to program * @mode: mode to use * @x: horizontal offset into the surface * @y: vertical offset into the surface * @old_fb: old framebuffer, for cleanup * * Try to set @mode on @crtc. Give @crtc and its associated connectors a chance * to fixup or reject the mode prior to trying to set it. This is an internal * helper that drivers could e.g. use to update properties that require the * entire output pipe to be disabled and re-enabled in a new configuration. For * example for changing whether audio is enabled on a hdmi link or for changing * panel fitter or dither attributes. It is also called by the * drm_crtc_helper_set_config() helper function to drive the mode setting * sequence. * * Returns: * True if the mode was set successfully, false otherwise. */ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc, struct drm_display_mode *mode, int x, int y, struct drm_framebuffer *old_fb) { struct drm_device *dev = crtc->dev; struct drm_display_mode *adjusted_mode, saved_mode, saved_hwmode; const struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; const struct drm_encoder_helper_funcs *encoder_funcs; int saved_x, saved_y; bool saved_enabled; struct drm_encoder *encoder; bool ret = true; drm_WARN_ON(dev, drm_drv_uses_atomic_modeset(dev)); drm_warn_on_modeset_not_all_locked(dev); saved_enabled = crtc->enabled; crtc->enabled = drm_helper_crtc_in_use(crtc); if (!crtc->enabled) return true; adjusted_mode = drm_mode_duplicate(dev, mode); if (!adjusted_mode) { crtc->enabled = saved_enabled; return false; } drm_mode_init(&saved_mode, &crtc->mode); drm_mode_init(&saved_hwmode, &crtc->hwmode); saved_x = crtc->x; saved_y = crtc->y; /* Update crtc values up front so the driver can rely on them for mode * setting. */ drm_mode_copy(&crtc->mode, mode); crtc->x = x; crtc->y = y; /* Pass our mode to the connectors and the CRTC to give them a chance to * adjust it according to limitations or connector properties, and also * a chance to reject the mode entirely. */ drm_for_each_encoder(encoder, dev) { if (encoder->crtc != crtc) continue; encoder_funcs = encoder->helper_private; if (!encoder_funcs) continue; encoder_funcs = encoder->helper_private; if (encoder_funcs->mode_fixup) { if (!(ret = encoder_funcs->mode_fixup(encoder, mode, adjusted_mode))) { drm_dbg_kms(dev, "[ENCODER:%d:%s] mode fixup failed\n", encoder->base.id, encoder->name); goto done; } } } if (crtc_funcs->mode_fixup) { if (!(ret = crtc_funcs->mode_fixup(crtc, mode, adjusted_mode))) { drm_dbg_kms(dev, "[CRTC:%d:%s] mode fixup failed\n", crtc->base.id, crtc->name); goto done; } } drm_dbg_kms(dev, "[CRTC:%d:%s]\n", crtc->base.id, crtc->name); drm_mode_copy(&crtc->hwmode, adjusted_mode); /* Prepare the encoders and CRTCs before setting the mode. */ drm_for_each_encoder(encoder, dev) { if (encoder->crtc != crtc) continue; encoder_funcs = encoder->helper_private; if (!encoder_funcs) continue; /* Disable the encoders as the first thing we do. */ if (encoder_funcs->prepare) encoder_funcs->prepare(encoder); } drm_crtc_prepare_encoders(dev); crtc_funcs->prepare(crtc); /* Set up the DPLL and any encoders state that needs to adjust or depend * on the DPLL. */ ret = !crtc_funcs->mode_set(crtc, mode, adjusted_mode, x, y, old_fb); if (!ret) goto done; drm_for_each_encoder(encoder, dev) { if (encoder->crtc != crtc) continue; encoder_funcs = encoder->helper_private; if (!encoder_funcs) continue; drm_dbg_kms(dev, "[ENCODER:%d:%s] set [MODE:%s]\n", encoder->base.id, encoder->name, mode->name); if (encoder_funcs->mode_set) encoder_funcs->mode_set(encoder, mode, adjusted_mode); } /* Now enable the clocks, plane, pipe, and connectors that we set up. */ crtc_funcs->commit(crtc); drm_for_each_encoder(encoder, dev) { if (encoder->crtc != crtc) continue; encoder_funcs = encoder->helper_private; if (!encoder_funcs) continue; if (encoder_funcs->commit) encoder_funcs->commit(encoder); } /* Calculate and store various constants which * are later needed by vblank and swap-completion * timestamping. They are derived from true hwmode. */ drm_calc_timestamping_constants(crtc, &crtc->hwmode); /* FIXME: add subpixel order */ done: drm_mode_destroy(dev, adjusted_mode); if (!ret) { crtc->enabled = saved_enabled; drm_mode_copy(&crtc->mode, &saved_mode); drm_mode_copy(&crtc->hwmode, &saved_hwmode); crtc->x = saved_x; crtc->y = saved_y; } return ret; } EXPORT_SYMBOL(drm_crtc_helper_set_mode); /** * drm_crtc_helper_atomic_check() - Helper to check CRTC atomic-state * @crtc: CRTC to check * @state: atomic state object * * Provides a default CRTC-state check handler for CRTCs that only have * one primary plane attached to it. This is often the case for the CRTC * of simple framebuffers. * * RETURNS: * Zero on success, or an errno code otherwise. */ int drm_crtc_helper_atomic_check(struct drm_crtc *crtc, struct drm_atomic_state *state) { struct drm_crtc_state *new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc); if (!new_crtc_state->enable) return 0; return drm_atomic_helper_check_crtc_primary_plane(new_crtc_state); } EXPORT_SYMBOL(drm_crtc_helper_atomic_check); static void drm_crtc_helper_disable(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; struct drm_connector *connector; struct drm_encoder *encoder; /* Decouple all encoders and their attached connectors from this crtc */ drm_for_each_encoder(encoder, dev) { struct drm_connector_list_iter conn_iter; if (encoder->crtc != crtc) continue; drm_connector_list_iter_begin(dev, &conn_iter); drm_for_each_connector_iter(connector, &conn_iter) { if (connector->encoder != encoder) continue; connector->encoder = NULL; /* * drm_helper_disable_unused_functions() ought to be * doing this, but since we've decoupled the encoder * from the connector above, the required connection * between them is henceforth no longer available. */ connector->dpms = DRM_MODE_DPMS_OFF; /* we keep a reference while the encoder is bound */ drm_connector_put(connector); } drm_connector_list_iter_end(&conn_iter); } __drm_helper_disable_unused_functions(dev); } /* * For connectors that support multiple encoders, either the * .atomic_best_encoder() or .best_encoder() operation must be implemented. */ struct drm_encoder * drm_connector_get_single_encoder(struct drm_connector *connector) { struct drm_encoder *encoder; drm_WARN_ON(connector->dev, hweight32(connector->possible_encoders) > 1); drm_connector_for_each_possible_encoder(connector, encoder) return encoder; return NULL; } /** * drm_crtc_helper_set_config - set a new config from userspace * @set: mode set configuration * @ctx: lock acquire context, not used here * * The drm_crtc_helper_set_config() helper function implements the of * &drm_crtc_funcs.set_config callback for drivers using the legacy CRTC * helpers. * * It first tries to locate the best encoder for each connector by calling the * connector @drm_connector_helper_funcs.best_encoder helper operation. * * After locating the appropriate encoders, the helper function will call the * mode_fixup encoder and CRTC helper operations to adjust the requested mode, * or reject it completely in which case an error will be returned to the * application. If the new configuration after mode adjustment is identical to * the current configuration the helper function will return without performing * any other operation. * * If the adjusted mode is identical to the current mode but changes to the * frame buffer need to be applied, the drm_crtc_helper_set_config() function * will call the CRTC &drm_crtc_helper_funcs.mode_set_base helper operation. * * If the adjusted mode differs from the current mode, or if the * ->mode_set_base() helper operation is not provided, the helper function * performs a full mode set sequence by calling the ->prepare(), ->mode_set() * and ->commit() CRTC and encoder helper operations, in that order. * Alternatively it can also use the dpms and disable helper operations. For * details see &struct drm_crtc_helper_funcs and struct * &drm_encoder_helper_funcs. * * This function is deprecated. New drivers must implement atomic modeset * support, for which this function is unsuitable. Instead drivers should use * drm_atomic_helper_set_config(). * * Returns: * Returns 0 on success, negative errno numbers on failure. */ int drm_crtc_helper_set_config(struct drm_mode_set *set, struct drm_modeset_acquire_ctx *ctx) { struct drm_device *dev; struct drm_crtc **save_encoder_crtcs, *new_crtc; struct drm_encoder **save_connector_encoders, *new_encoder, *encoder; bool mode_changed = false; /* if true do a full mode set */ bool fb_changed = false; /* if true and !mode_changed just do a flip */ struct drm_connector *connector; struct drm_connector_list_iter conn_iter; int count = 0, ro, fail = 0; const struct drm_crtc_helper_funcs *crtc_funcs; struct drm_mode_set save_set; int ret; int i; BUG_ON(!set); BUG_ON(!set->crtc); BUG_ON(!set->crtc->helper_private); /* Enforce sane interface api - has been abused by the fb helper. */ BUG_ON(!set->mode && set->fb); BUG_ON(set->fb && set->num_connectors == 0); crtc_funcs = set->crtc->helper_private; dev = set->crtc->dev; drm_dbg_kms(dev, "\n"); drm_WARN_ON(dev, drm_drv_uses_atomic_modeset(dev)); if (!set->mode) set->fb = NULL; if (set->fb) { drm_dbg_kms(dev, "[CRTC:%d:%s] [FB:%d] #connectors=%d (x y) (%i %i)\n", set->crtc->base.id, set->crtc->name, set->fb->base.id, (int)set->num_connectors, set->x, set->y); } else { drm_dbg_kms(dev, "[CRTC:%d:%s] [NOFB]\n", set->crtc->base.id, set->crtc->name); drm_crtc_helper_disable(set->crtc); return 0; } drm_warn_on_modeset_not_all_locked(dev); /* * Allocate space for the backup of all (non-pointer) encoder and * connector data. */ save_encoder_crtcs = kcalloc(dev->mode_config.num_encoder, sizeof(struct drm_crtc *), GFP_KERNEL); if (!save_encoder_crtcs) return -ENOMEM; save_connector_encoders = kcalloc(dev->mode_config.num_connector, sizeof(struct drm_encoder *), GFP_KERNEL); if (!save_connector_encoders) { kfree(save_encoder_crtcs); return -ENOMEM; } /* * Copy data. Note that driver private data is not affected. * Should anything bad happen only the expected state is * restored, not the drivers personal bookkeeping. */ count = 0; drm_for_each_encoder(encoder, dev) { save_encoder_crtcs[count++] = encoder->crtc; } count = 0; drm_connector_list_iter_begin(dev, &conn_iter); drm_for_each_connector_iter(connector, &conn_iter) save_connector_encoders[count++] = connector->encoder; drm_connector_list_iter_end(&conn_iter); save_set.crtc = set->crtc; save_set.mode = &set->crtc->mode; save_set.x = set->crtc->x; save_set.y = set->crtc->y; save_set.fb = set->crtc->primary->fb; /* We should be able to check here if the fb has the same properties * and then just flip_or_move it */ if (set->crtc->primary->fb != set->fb) { /* If we have no fb then treat it as a full mode set */ if (set->crtc->primary->fb == NULL) { drm_dbg_kms(dev, "[CRTC:%d:%s] no fb, full mode set\n", set->crtc->base.id, set->crtc->name); mode_changed = true; } else if (set->fb->format != set->crtc->primary->fb->format) { mode_changed = true; } else fb_changed = true; } if (set->x != set->crtc->x || set->y != set->crtc->y) fb_changed = true; if (!drm_mode_equal(set->mode, &set->crtc->mode)) { drm_dbg_kms(dev, "[CRTC:%d:%s] modes are different, full mode set:\n", set->crtc->base.id, set->crtc->name); drm_dbg_kms(dev, DRM_MODE_FMT "\n", DRM_MODE_ARG(&set->crtc->mode)); drm_dbg_kms(dev, DRM_MODE_FMT "\n", DRM_MODE_ARG(set->mode)); mode_changed = true; } /* take a reference on all unbound connectors in set, reuse the * already taken reference for bound connectors */ for (ro = 0; ro < set->num_connectors; ro++) { if (set->connectors[ro]->encoder) continue; drm_connector_get(set->connectors[ro]); } /* a) traverse passed in connector list and get encoders for them */ count = 0; drm_connector_list_iter_begin(dev, &conn_iter); drm_for_each_connector_iter(connector, &conn_iter) { const struct drm_connector_helper_funcs *connector_funcs = connector->helper_private; new_encoder = connector->encoder; for (ro = 0; ro < set->num_connectors; ro++) { if (set->connectors[ro] == connector) { if (connector_funcs->best_encoder) new_encoder = connector_funcs->best_encoder(connector); else new_encoder = drm_connector_get_single_encoder(connector); /* if we can't get an encoder for a connector we are setting now - then fail */ if (new_encoder == NULL) /* don't break so fail path works correct */ fail = 1; if (connector->dpms != DRM_MODE_DPMS_ON) { drm_dbg_kms(dev, "[CONNECTOR:%d:%s] DPMS not on, full mode switch\n", connector->base.id, connector->name); mode_changed = true; } break; } } if (new_encoder != connector->encoder) { drm_dbg_kms(dev, "[CONNECTOR:%d:%s] encoder changed, full mode switch\n", connector->base.id, connector->name); mode_changed = true; /* If the encoder is reused for another connector, then * the appropriate crtc will be set later. */ if (connector->encoder) connector->encoder->crtc = NULL; connector->encoder = new_encoder; } } drm_connector_list_iter_end(&conn_iter); if (fail) { ret = -EINVAL; goto fail; } count = 0; drm_connector_list_iter_begin(dev, &conn_iter); drm_for_each_connector_iter(connector, &conn_iter) { if (!connector->encoder) continue; if (connector->encoder->crtc == set->crtc) new_crtc = NULL; else new_crtc = connector->encoder->crtc; for (ro = 0; ro < set->num_connectors; ro++) { if (set->connectors[ro] == connector) new_crtc = set->crtc; } /* Make sure the new CRTC will work with the encoder */ if (new_crtc && !drm_encoder_crtc_ok(connector->encoder, new_crtc)) { ret = -EINVAL; drm_connector_list_iter_end(&conn_iter); goto fail; } if (new_crtc != connector->encoder->crtc) { drm_dbg_kms(dev, "[CONNECTOR:%d:%s] CRTC changed, full mode switch\n", connector->base.id, connector->name); mode_changed = true; connector->encoder->crtc = new_crtc; } if (new_crtc) { drm_dbg_kms(dev, "[CONNECTOR:%d:%s] to [CRTC:%d:%s]\n", connector->base.id, connector->name, new_crtc->base.id, new_crtc->name); } else { drm_dbg_kms(dev, "[CONNECTOR:%d:%s] to [NOCRTC]\n", connector->base.id, connector->name); } } drm_connector_list_iter_end(&conn_iter); /* mode_set_base is not a required function */ if (fb_changed && !crtc_funcs->mode_set_base) mode_changed = true; if (mode_changed) { if (drm_helper_crtc_in_use(set->crtc)) { drm_dbg_kms(dev, "[CRTC:%d:%s] attempting to set mode from userspace: " DRM_MODE_FMT "\n", set->crtc->base.id, set->crtc->name, DRM_MODE_ARG(set->mode)); set->crtc->primary->fb = set->fb; if (!drm_crtc_helper_set_mode(set->crtc, set->mode, set->x, set->y, save_set.fb)) { drm_err(dev, "[CRTC:%d:%s] failed to set mode\n", set->crtc->base.id, set->crtc->name); set->crtc->primary->fb = save_set.fb; ret = -EINVAL; goto fail; } drm_dbg_kms(dev, "[CRTC:%d:%s] Setting connector DPMS state to on\n", set->crtc->base.id, set->crtc->name); for (i = 0; i < set->num_connectors; i++) { drm_dbg_kms(dev, "\t[CONNECTOR:%d:%s] set DPMS on\n", set->connectors[i]->base.id, set->connectors[i]->name); set->connectors[i]->funcs->dpms(set->connectors[i], DRM_MODE_DPMS_ON); } } __drm_helper_disable_unused_functions(dev); } else if (fb_changed) { set->crtc->x = set->x; set->crtc->y = set->y; set->crtc->primary->fb = set->fb; ret = crtc_funcs->mode_set_base(set->crtc, set->x, set->y, save_set.fb); if (ret != 0) { set->crtc->x = save_set.x; set->crtc->y = save_set.y; set->crtc->primary->fb = save_set.fb; goto fail; } } kfree(save_connector_encoders); kfree(save_encoder_crtcs); return 0; fail: /* Restore all previous data. */ count = 0; drm_for_each_encoder(encoder, dev) { encoder->crtc = save_encoder_crtcs[count++]; } count = 0; drm_connector_list_iter_begin(dev, &conn_iter); drm_for_each_connector_iter(connector, &conn_iter) connector->encoder = save_connector_encoders[count++]; drm_connector_list_iter_end(&conn_iter); /* after fail drop reference on all unbound connectors in set, let * bound connectors keep their reference */ for (ro = 0; ro < set->num_connectors; ro++) { if (set->connectors[ro]->encoder) continue; drm_connector_put(set->connectors[ro]); } /* Try to restore the config */ if (mode_changed && !drm_crtc_helper_set_mode(save_set.crtc, save_set.mode, save_set.x, save_set.y, save_set.fb)) drm_err(dev, "failed to restore config after modeset failure\n"); kfree(save_connector_encoders); kfree(save_encoder_crtcs); return ret; } EXPORT_SYMBOL(drm_crtc_helper_set_config); static int drm_helper_choose_encoder_dpms(struct drm_encoder *encoder) { int dpms = DRM_MODE_DPMS_OFF; struct drm_connector *connector; struct drm_connector_list_iter conn_iter; struct drm_device *dev = encoder->dev; drm_connector_list_iter_begin(dev, &conn_iter); drm_for_each_connector_iter(connector, &conn_iter) if (connector->encoder == encoder) if (connector->dpms < dpms) dpms = connector->dpms; drm_connector_list_iter_end(&conn_iter); return dpms; } /* Helper which handles bridge ordering around encoder dpms */ static void drm_helper_encoder_dpms(struct drm_encoder *encoder, int mode) { const struct drm_encoder_helper_funcs *encoder_funcs; encoder_funcs = encoder->helper_private; if (!encoder_funcs) return; if (encoder_funcs->dpms) encoder_funcs->dpms(encoder, mode); } static int drm_helper_choose_crtc_dpms(struct drm_crtc *crtc) { int dpms = DRM_MODE_DPMS_OFF; struct drm_connector *connector; struct drm_connector_list_iter conn_iter; struct drm_device *dev = crtc->dev; drm_connector_list_iter_begin(dev, &conn_iter); drm_for_each_connector_iter(connector, &conn_iter) if (connector->encoder && connector->encoder->crtc == crtc) if (connector->dpms < dpms) dpms = connector->dpms; drm_connector_list_iter_end(&conn_iter); return dpms; } /** * drm_helper_connector_dpms() - connector dpms helper implementation * @connector: affected connector * @mode: DPMS mode * * The drm_helper_connector_dpms() helper function implements the * &drm_connector_funcs.dpms callback for drivers using the legacy CRTC * helpers. * * This is the main helper function provided by the CRTC helper framework for * implementing the DPMS connector attribute. It computes the new desired DPMS * state for all encoders and CRTCs in the output mesh and calls the * &drm_crtc_helper_funcs.dpms and &drm_encoder_helper_funcs.dpms callbacks * provided by the driver. * * This function is deprecated. New drivers must implement atomic modeset * support, where DPMS is handled in the DRM core. * * Returns: * Always returns 0. */ int drm_helper_connector_dpms(struct drm_connector *connector, int mode) { struct drm_encoder *encoder = connector->encoder; struct drm_crtc *crtc = encoder ? encoder->crtc : NULL; int old_dpms, encoder_dpms = DRM_MODE_DPMS_OFF; drm_WARN_ON(connector->dev, drm_drv_uses_atomic_modeset(connector->dev)); if (mode == connector->dpms) return 0; old_dpms = connector->dpms; connector->dpms = mode; if (encoder) encoder_dpms = drm_helper_choose_encoder_dpms(encoder); /* from off to on, do crtc then encoder */ if (mode < old_dpms) { if (crtc) { const struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; if (crtc_funcs->dpms) (*crtc_funcs->dpms) (crtc, drm_helper_choose_crtc_dpms(crtc)); } if (encoder) drm_helper_encoder_dpms(encoder, encoder_dpms); } /* from on to off, do encoder then crtc */ if (mode > old_dpms) { if (encoder) drm_helper_encoder_dpms(encoder, encoder_dpms); if (crtc) { const struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; if (crtc_funcs->dpms) (*crtc_funcs->dpms) (crtc, drm_helper_choose_crtc_dpms(crtc)); } } return 0; } EXPORT_SYMBOL(drm_helper_connector_dpms); /** * drm_helper_resume_force_mode - force-restore mode setting configuration * @dev: drm_device which should be restored * * Drivers which use the mode setting helpers can use this function to * force-restore the mode setting configuration e.g. on resume or when something * else might have trampled over the hw state (like some overzealous old BIOSen * tended to do). * * This helper doesn't provide a error return value since restoring the old * config should never fail due to resource allocation issues since the driver * has successfully set the restored configuration already. Hence this should * boil down to the equivalent of a few dpms on calls, which also don't provide * an error code. * * Drivers where simply restoring an old configuration again might fail (e.g. * due to slight differences in allocating shared resources when the * configuration is restored in a different order than when userspace set it up) * need to use their own restore logic. * * This function is deprecated. New drivers should implement atomic mode- * setting and use the atomic suspend/resume helpers. * * See also: * drm_atomic_helper_suspend(), drm_atomic_helper_resume() */ void drm_helper_resume_force_mode(struct drm_device *dev) { struct drm_crtc *crtc; struct drm_encoder *encoder; const struct drm_crtc_helper_funcs *crtc_funcs; int encoder_dpms; bool ret; drm_WARN_ON(dev, drm_drv_uses_atomic_modeset(dev)); drm_modeset_lock_all(dev); drm_for_each_crtc(crtc, dev) { if (!crtc->enabled) continue; ret = drm_crtc_helper_set_mode(crtc, &crtc->mode, crtc->x, crtc->y, crtc->primary->fb); /* Restoring the old config should never fail! */ if (ret == false) drm_err(dev, "failed to set mode on crtc %p\n", crtc); /* Turn off outputs that were already powered off */ if (drm_helper_choose_crtc_dpms(crtc)) { drm_for_each_encoder(encoder, dev) { if(encoder->crtc != crtc) continue; encoder_dpms = drm_helper_choose_encoder_dpms( encoder); drm_helper_encoder_dpms(encoder, encoder_dpms); } crtc_funcs = crtc->helper_private; if (crtc_funcs->dpms) (*crtc_funcs->dpms) (crtc, drm_helper_choose_crtc_dpms(crtc)); } } /* disable the unused connectors while restoring the modesetting */ __drm_helper_disable_unused_functions(dev); drm_modeset_unlock_all(dev); } EXPORT_SYMBOL(drm_helper_resume_force_mode); /** * drm_helper_force_disable_all - Forcibly turn off all enabled CRTCs * @dev: DRM device whose CRTCs to turn off * * Drivers may want to call this on unload to ensure that all displays are * unlit and the GPU is in a consistent, low power state. Takes modeset locks. * * Note: This should only be used by non-atomic legacy drivers. For an atomic * version look at drm_atomic_helper_shutdown(). * * Returns: * Zero on success, error code on failure. */ int drm_helper_force_disable_all(struct drm_device *dev) { struct drm_crtc *crtc; int ret = 0; drm_modeset_lock_all(dev); drm_for_each_crtc(crtc, dev) if (crtc->enabled) { struct drm_mode_set set = { .crtc = crtc, }; ret = drm_mode_set_config_internal(&set); if (ret) goto out; } out: drm_modeset_unlock_all(dev); return ret; } EXPORT_SYMBOL(drm_helper_force_disable_all);
5255 1 939 367 687 934 941 363 3 930 183 182 13 174 184 22 22 22 21 144 354 406 407 128 404 404 1 1 34 34 34 579 579 39 580 583 39 581 580 581 581 583 11 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 // SPDX-License-Identifier: GPL-2.0-only /* * jump label support * * Copyright (C) 2009 Jason Baron <jbaron@redhat.com> * Copyright (C) 2011 Peter Zijlstra * */ #include <linux/memory.h> #include <linux/uaccess.h> #include <linux/module.h> #include <linux/list.h> #include <linux/slab.h> #include <linux/sort.h> #include <linux/err.h> #include <linux/static_key.h> #include <linux/jump_label_ratelimit.h> #include <linux/bug.h> #include <linux/cpu.h> #include <asm/sections.h> /* mutex to protect coming/going of the jump_label table */ static DEFINE_MUTEX(jump_label_mutex); void jump_label_lock(void) { mutex_lock(&jump_label_mutex); } void jump_label_unlock(void) { mutex_unlock(&jump_label_mutex); } static int jump_label_cmp(const void *a, const void *b) { const struct jump_entry *jea = a; const struct jump_entry *jeb = b; /* * Entrires are sorted by key. */ if (jump_entry_key(jea) < jump_entry_key(jeb)) return -1; if (jump_entry_key(jea) > jump_entry_key(jeb)) return 1; /* * In the batching mode, entries should also be sorted by the code * inside the already sorted list of entries, enabling a bsearch in * the vector. */ if (jump_entry_code(jea) < jump_entry_code(jeb)) return -1; if (jump_entry_code(jea) > jump_entry_code(jeb)) return 1; return 0; } static void jump_label_swap(void *a, void *b, int size) { long delta = (unsigned long)a - (unsigned long)b; struct jump_entry *jea = a; struct jump_entry *jeb = b; struct jump_entry tmp = *jea; jea->code = jeb->code - delta; jea->target = jeb->target - delta; jea->key = jeb->key - delta; jeb->code = tmp.code + delta; jeb->target = tmp.target + delta; jeb->key = tmp.key + delta; } static void jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop) { unsigned long size; void *swapfn = NULL; if (IS_ENABLED(CONFIG_HAVE_ARCH_JUMP_LABEL_RELATIVE)) swapfn = jump_label_swap; size = (((unsigned long)stop - (unsigned long)start) / sizeof(struct jump_entry)); sort(start, size, sizeof(struct jump_entry), jump_label_cmp, swapfn); } static void jump_label_update(struct static_key *key); /* * There are similar definitions for the !CONFIG_JUMP_LABEL case in jump_label.h. * The use of 'atomic_read()' requires atomic.h and its problematic for some * kernel headers such as kernel.h and others. Since static_key_count() is not * used in the branch statements as it is for the !CONFIG_JUMP_LABEL case its ok * to have it be a function here. Similarly, for 'static_key_enable()' and * 'static_key_disable()', which require bug.h. This should allow jump_label.h * to be included from most/all places for CONFIG_JUMP_LABEL. */ int static_key_count(struct static_key *key) { /* * -1 means the first static_key_slow_inc() is in progress. * static_key_enabled() must return true, so return 1 here. */ int n = atomic_read(&key->enabled); return n >= 0 ? n : 1; } EXPORT_SYMBOL_GPL(static_key_count); /* * static_key_fast_inc_not_disabled - adds a user for a static key * @key: static key that must be already enabled * * The caller must make sure that the static key can't get disabled while * in this function. It doesn't patch jump labels, only adds a user to * an already enabled static key. * * Returns true if the increment was done. Unlike refcount_t the ref counter * is not saturated, but will fail to increment on overflow. */ bool static_key_fast_inc_not_disabled(struct static_key *key) { int v; STATIC_KEY_CHECK_USE(key); /* * Negative key->enabled has a special meaning: it sends * static_key_slow_inc/dec() down the slow path, and it is non-zero * so it counts as "enabled" in jump_label_update(). * * The INT_MAX overflow condition is either used by the networking * code to reset or detected in the slow path of * static_key_slow_inc_cpuslocked(). */ v = atomic_read(&key->enabled); do { if (v <= 0 || v == INT_MAX) return false; } while (!likely(atomic_try_cmpxchg(&key->enabled, &v, v + 1))); return true; } EXPORT_SYMBOL_GPL(static_key_fast_inc_not_disabled); bool static_key_slow_inc_cpuslocked(struct static_key *key) { lockdep_assert_cpus_held(); /* * Careful if we get concurrent static_key_slow_inc/dec() calls; * later calls must wait for the first one to _finish_ the * jump_label_update() process. At the same time, however, * the jump_label_update() call below wants to see * static_key_enabled(&key) for jumps to be updated properly. */ if (static_key_fast_inc_not_disabled(key)) return true; guard(mutex)(&jump_label_mutex); /* Try to mark it as 'enabling in progress. */ if (!atomic_cmpxchg(&key->enabled, 0, -1)) { jump_label_update(key); /* * Ensure that when static_key_fast_inc_not_disabled() or * static_key_dec_not_one() observe the positive value, * they must also observe all the text changes. */ atomic_set_release(&key->enabled, 1); } else { /* * While holding the mutex this should never observe * anything else than a value >= 1 and succeed */ if (WARN_ON_ONCE(!static_key_fast_inc_not_disabled(key))) return false; } return true; } bool static_key_slow_inc(struct static_key *key) { bool ret; cpus_read_lock(); ret = static_key_slow_inc_cpuslocked(key); cpus_read_unlock(); return ret; } EXPORT_SYMBOL_GPL(static_key_slow_inc); void static_key_enable_cpuslocked(struct static_key *key) { STATIC_KEY_CHECK_USE(key); lockdep_assert_cpus_held(); if (atomic_read(&key->enabled) > 0) { WARN_ON_ONCE(atomic_read(&key->enabled) != 1); return; } jump_label_lock(); if (atomic_read(&key->enabled) == 0) { atomic_set(&key->enabled, -1); jump_label_update(key); /* * See static_key_slow_inc(). */ atomic_set_release(&key->enabled, 1); } jump_label_unlock(); } EXPORT_SYMBOL_GPL(static_key_enable_cpuslocked); void static_key_enable(struct static_key *key) { cpus_read_lock(); static_key_enable_cpuslocked(key); cpus_read_unlock(); } EXPORT_SYMBOL_GPL(static_key_enable); void static_key_disable_cpuslocked(struct static_key *key) { STATIC_KEY_CHECK_USE(key); lockdep_assert_cpus_held(); if (atomic_read(&key->enabled) != 1) { WARN_ON_ONCE(atomic_read(&key->enabled) != 0); return; } jump_label_lock(); if (atomic_cmpxchg(&key->enabled, 1, 0) == 1) jump_label_update(key); jump_label_unlock(); } EXPORT_SYMBOL_GPL(static_key_disable_cpuslocked); void static_key_disable(struct static_key *key) { cpus_read_lock(); static_key_disable_cpuslocked(key); cpus_read_unlock(); } EXPORT_SYMBOL_GPL(static_key_disable); static bool static_key_dec_not_one(struct static_key *key) { int v; /* * Go into the slow path if key::enabled is less than or equal than * one. One is valid to shut down the key, anything less than one * is an imbalance, which is handled at the call site. * * That includes the special case of '-1' which is set in * static_key_slow_inc_cpuslocked(), but that's harmless as it is * fully serialized in the slow path below. By the time this task * acquires the jump label lock the value is back to one and the * retry under the lock must succeed. */ v = atomic_read(&key->enabled); do { /* * Warn about the '-1' case though; since that means a * decrement is concurrent with a first (0->1) increment. IOW * people are trying to disable something that wasn't yet fully * enabled. This suggests an ordering problem on the user side. */ WARN_ON_ONCE(v < 0); /* * Warn about underflow, and lie about success in an attempt to * not make things worse. */ if (WARN_ON_ONCE(v == 0)) return true; if (v <= 1) return false; } while (!likely(atomic_try_cmpxchg(&key->enabled, &v, v - 1))); return true; } static void __static_key_slow_dec_cpuslocked(struct static_key *key) { lockdep_assert_cpus_held(); int val; if (static_key_dec_not_one(key)) return; guard(mutex)(&jump_label_mutex); val = atomic_read(&key->enabled); /* * It should be impossible to observe -1 with jump_label_mutex held, * see static_key_slow_inc_cpuslocked(). */ if (WARN_ON_ONCE(val == -1)) return; /* * Cannot already be 0, something went sideways. */ if (WARN_ON_ONCE(val == 0)) return; if (atomic_dec_and_test(&key->enabled)) jump_label_update(key); } static void __static_key_slow_dec(struct static_key *key) { cpus_read_lock(); __static_key_slow_dec_cpuslocked(key); cpus_read_unlock(); } void jump_label_update_timeout(struct work_struct *work) { struct static_key_deferred *key = container_of(work, struct static_key_deferred, work.work); __static_key_slow_dec(&key->key); } EXPORT_SYMBOL_GPL(jump_label_update_timeout); void static_key_slow_dec(struct static_key *key) { STATIC_KEY_CHECK_USE(key); __static_key_slow_dec(key); } EXPORT_SYMBOL_GPL(static_key_slow_dec); void static_key_slow_dec_cpuslocked(struct static_key *key) { STATIC_KEY_CHECK_USE(key); __static_key_slow_dec_cpuslocked(key); } void __static_key_slow_dec_deferred(struct static_key *key, struct delayed_work *work, unsigned long timeout) { STATIC_KEY_CHECK_USE(key); if (static_key_dec_not_one(key)) return; schedule_delayed_work(work, timeout); } EXPORT_SYMBOL_GPL(__static_key_slow_dec_deferred); void __static_key_deferred_flush(void *key, struct delayed_work *work) { STATIC_KEY_CHECK_USE(key); flush_delayed_work(work); } EXPORT_SYMBOL_GPL(__static_key_deferred_flush); void jump_label_rate_limit(struct static_key_deferred *key, unsigned long rl) { STATIC_KEY_CHECK_USE(key); key->timeout = rl; INIT_DELAYED_WORK(&key->work, jump_label_update_timeout); } EXPORT_SYMBOL_GPL(jump_label_rate_limit); static int addr_conflict(struct jump_entry *entry, void *start, void *end) { if (jump_entry_code(entry) <= (unsigned long)end && jump_entry_code(entry) + jump_entry_size(entry) > (unsigned long)start) return 1; return 0; } static int __jump_label_text_reserved(struct jump_entry *iter_start, struct jump_entry *iter_stop, void *start, void *end, bool init) { struct jump_entry *iter; iter = iter_start; while (iter < iter_stop) { if (init || !jump_entry_is_init(iter)) { if (addr_conflict(iter, start, end)) return 1; } iter++; } return 0; } #ifndef arch_jump_label_transform_static static void arch_jump_label_transform_static(struct jump_entry *entry, enum jump_label_type type) { /* nothing to do on most architectures */ } #endif static inline struct jump_entry *static_key_entries(struct static_key *key) { WARN_ON_ONCE(key->type & JUMP_TYPE_LINKED); return (struct jump_entry *)(key->type & ~JUMP_TYPE_MASK); } static inline bool static_key_type(struct static_key *key) { return key->type & JUMP_TYPE_TRUE; } static inline bool static_key_linked(struct static_key *key) { return key->type & JUMP_TYPE_LINKED; } static inline void static_key_clear_linked(struct static_key *key) { key->type &= ~JUMP_TYPE_LINKED; } static inline void static_key_set_linked(struct static_key *key) { key->type |= JUMP_TYPE_LINKED; } /*** * A 'struct static_key' uses a union such that it either points directly * to a table of 'struct jump_entry' or to a linked list of modules which in * turn point to 'struct jump_entry' tables. * * The two lower bits of the pointer are used to keep track of which pointer * type is in use and to store the initial branch direction, we use an access * function which preserves these bits. */ static void static_key_set_entries(struct static_key *key, struct jump_entry *entries) { unsigned long type; WARN_ON_ONCE((unsigned long)entries & JUMP_TYPE_MASK); type = key->type & JUMP_TYPE_MASK; key->entries = entries; key->type |= type; } static enum jump_label_type jump_label_type(struct jump_entry *entry) { struct static_key *key = jump_entry_key(entry); bool enabled = static_key_enabled(key); bool branch = jump_entry_is_branch(entry); /* See the comment in linux/jump_label.h */ return enabled ^ branch; } static bool jump_label_can_update(struct jump_entry *entry, bool init) { /* * Cannot update code that was in an init text area. */ if (!init && jump_entry_is_init(entry)) return false; if (!kernel_text_address(jump_entry_code(entry))) { /* * This skips patching built-in __exit, which * is part of init_section_contains() but is * not part of kernel_text_address(). * * Skipping built-in __exit is fine since it * will never be executed. */ WARN_ONCE(!jump_entry_is_init(entry), "can't patch jump_label at %pS", (void *)jump_entry_code(entry)); return false; } return true; } #ifndef HAVE_JUMP_LABEL_BATCH static void __jump_label_update(struct static_key *key, struct jump_entry *entry, struct jump_entry *stop, bool init) { for (; (entry < stop) && (jump_entry_key(entry) == key); entry++) { if (jump_label_can_update(entry, init)) arch_jump_label_transform(entry, jump_label_type(entry)); } } #else static void __jump_label_update(struct static_key *key, struct jump_entry *entry, struct jump_entry *stop, bool init) { for (; (entry < stop) && (jump_entry_key(entry) == key); entry++) { if (!jump_label_can_update(entry, init)) continue; if (!arch_jump_label_transform_queue(entry, jump_label_type(entry))) { /* * Queue is full: Apply the current queue and try again. */ arch_jump_label_transform_apply(); BUG_ON(!arch_jump_label_transform_queue(entry, jump_label_type(entry))); } } arch_jump_label_transform_apply(); } #endif void __init jump_label_init(void) { struct jump_entry *iter_start = __start___jump_table; struct jump_entry *iter_stop = __stop___jump_table; struct static_key *key = NULL; struct jump_entry *iter; /* * Since we are initializing the static_key.enabled field with * with the 'raw' int values (to avoid pulling in atomic.h) in * jump_label.h, let's make sure that is safe. There are only two * cases to check since we initialize to 0 or 1. */ BUILD_BUG_ON((int)ATOMIC_INIT(0) != 0); BUILD_BUG_ON((int)ATOMIC_INIT(1) != 1); if (static_key_initialized) return; cpus_read_lock(); jump_label_lock(); jump_label_sort_entries(iter_start, iter_stop); for (iter = iter_start; iter < iter_stop; iter++) { struct static_key *iterk; bool in_init; /* rewrite NOPs */ if (jump_label_type(iter) == JUMP_LABEL_NOP) arch_jump_label_transform_static(iter, JUMP_LABEL_NOP); in_init = init_section_contains((void *)jump_entry_code(iter), 1); jump_entry_set_init(iter, in_init); iterk = jump_entry_key(iter); if (iterk == key) continue; key = iterk; static_key_set_entries(key, iter); } static_key_initialized = true; jump_label_unlock(); cpus_read_unlock(); } static inline bool static_key_sealed(struct static_key *key) { return (key->type & JUMP_TYPE_LINKED) && !(key->type & ~JUMP_TYPE_MASK); } static inline void static_key_seal(struct static_key *key) { unsigned long type = key->type & JUMP_TYPE_TRUE; key->type = JUMP_TYPE_LINKED | type; } void jump_label_init_ro(void) { struct jump_entry *iter_start = __start___jump_table; struct jump_entry *iter_stop = __stop___jump_table; struct jump_entry *iter; if (WARN_ON_ONCE(!static_key_initialized)) return; cpus_read_lock(); jump_label_lock(); for (iter = iter_start; iter < iter_stop; iter++) { struct static_key *iterk = jump_entry_key(iter); if (!is_kernel_ro_after_init((unsigned long)iterk)) continue; if (static_key_sealed(iterk)) continue; static_key_seal(iterk); } jump_label_unlock(); cpus_read_unlock(); } #ifdef CONFIG_MODULES enum jump_label_type jump_label_init_type(struct jump_entry *entry) { struct static_key *key = jump_entry_key(entry); bool type = static_key_type(key); bool branch = jump_entry_is_branch(entry); /* See the comment in linux/jump_label.h */ return type ^ branch; } struct static_key_mod { struct static_key_mod *next; struct jump_entry *entries; struct module *mod; }; static inline struct static_key_mod *static_key_mod(struct static_key *key) { WARN_ON_ONCE(!static_key_linked(key)); return (struct static_key_mod *)(key->type & ~JUMP_TYPE_MASK); } /*** * key->type and key->next are the same via union. * This sets key->next and preserves the type bits. * * See additional comments above static_key_set_entries(). */ static void static_key_set_mod(struct static_key *key, struct static_key_mod *mod) { unsigned long type; WARN_ON_ONCE((unsigned long)mod & JUMP_TYPE_MASK); type = key->type & JUMP_TYPE_MASK; key->next = mod; key->type |= type; } static int __jump_label_mod_text_reserved(void *start, void *end) { struct module *mod; int ret; preempt_disable(); mod = __module_text_address((unsigned long)start); WARN_ON_ONCE(__module_text_address((unsigned long)end) != mod); if (!try_module_get(mod)) mod = NULL; preempt_enable(); if (!mod) return 0; ret = __jump_label_text_reserved(mod->jump_entries, mod->jump_entries + mod->num_jump_entries, start, end, mod->state == MODULE_STATE_COMING); module_put(mod); return ret; } static void __jump_label_mod_update(struct static_key *key) { struct static_key_mod *mod; for (mod = static_key_mod(key); mod; mod = mod->next) { struct jump_entry *stop; struct module *m; /* * NULL if the static_key is defined in a module * that does not use it */ if (!mod->entries) continue; m = mod->mod; if (!m) stop = __stop___jump_table; else stop = m->jump_entries + m->num_jump_entries; __jump_label_update(key, mod->entries, stop, m && m->state == MODULE_STATE_COMING); } } static int jump_label_add_module(struct module *mod) { struct jump_entry *iter_start = mod->jump_entries; struct jump_entry *iter_stop = iter_start + mod->num_jump_entries; struct jump_entry *iter; struct static_key *key = NULL; struct static_key_mod *jlm, *jlm2; /* if the module doesn't have jump label entries, just return */ if (iter_start == iter_stop) return 0; jump_label_sort_entries(iter_start, iter_stop); for (iter = iter_start; iter < iter_stop; iter++) { struct static_key *iterk; bool in_init; in_init = within_module_init(jump_entry_code(iter), mod); jump_entry_set_init(iter, in_init); iterk = jump_entry_key(iter); if (iterk == key) continue; key = iterk; if (within_module((unsigned long)key, mod)) { static_key_set_entries(key, iter); continue; } /* * If the key was sealed at init, then there's no need to keep a * reference to its module entries - just patch them now and be * done with it. */ if (static_key_sealed(key)) goto do_poke; jlm = kzalloc(sizeof(struct static_key_mod), GFP_KERNEL); if (!jlm) return -ENOMEM; if (!static_key_linked(key)) { jlm2 = kzalloc(sizeof(struct static_key_mod), GFP_KERNEL); if (!jlm2) { kfree(jlm); return -ENOMEM; } preempt_disable(); jlm2->mod = __module_address((unsigned long)key); preempt_enable(); jlm2->entries = static_key_entries(key); jlm2->next = NULL; static_key_set_mod(key, jlm2); static_key_set_linked(key); } jlm->mod = mod; jlm->entries = iter; jlm->next = static_key_mod(key); static_key_set_mod(key, jlm); static_key_set_linked(key); /* Only update if we've changed from our initial state */ do_poke: if (jump_label_type(iter) != jump_label_init_type(iter)) __jump_label_update(key, iter, iter_stop, true); } return 0; } static void jump_label_del_module(struct module *mod) { struct jump_entry *iter_start = mod->jump_entries; struct jump_entry *iter_stop = iter_start + mod->num_jump_entries; struct jump_entry *iter; struct static_key *key = NULL; struct static_key_mod *jlm, **prev; for (iter = iter_start; iter < iter_stop; iter++) { if (jump_entry_key(iter) == key) continue; key = jump_entry_key(iter); if (within_module((unsigned long)key, mod)) continue; /* No @jlm allocated because key was sealed at init. */ if (static_key_sealed(key)) continue; /* No memory during module load */ if (WARN_ON(!static_key_linked(key))) continue; prev = &key->next; jlm = static_key_mod(key); while (jlm && jlm->mod != mod) { prev = &jlm->next; jlm = jlm->next; } /* No memory during module load */ if (WARN_ON(!jlm)) continue; if (prev == &key->next) static_key_set_mod(key, jlm->next); else *prev = jlm->next; kfree(jlm); jlm = static_key_mod(key); /* if only one etry is left, fold it back into the static_key */ if (jlm->next == NULL) { static_key_set_entries(key, jlm->entries); static_key_clear_linked(key); kfree(jlm); } } } static int jump_label_module_notify(struct notifier_block *self, unsigned long val, void *data) { struct module *mod = data; int ret = 0; cpus_read_lock(); jump_label_lock(); switch (val) { case MODULE_STATE_COMING: ret = jump_label_add_module(mod); if (ret) { WARN(1, "Failed to allocate memory: jump_label may not work properly.\n"); jump_label_del_module(mod); } break; case MODULE_STATE_GOING: jump_label_del_module(mod); break; } jump_label_unlock(); cpus_read_unlock(); return notifier_from_errno(ret); } static struct notifier_block jump_label_module_nb = { .notifier_call = jump_label_module_notify, .priority = 1, /* higher than tracepoints */ }; static __init int jump_label_init_module(void) { return register_module_notifier(&jump_label_module_nb); } early_initcall(jump_label_init_module); #endif /* CONFIG_MODULES */ /*** * jump_label_text_reserved - check if addr range is reserved * @start: start text addr * @end: end text addr * * checks if the text addr located between @start and @end * overlaps with any of the jump label patch addresses. Code * that wants to modify kernel text should first verify that * it does not overlap with any of the jump label addresses. * Caller must hold jump_label_mutex. * * returns 1 if there is an overlap, 0 otherwise */ int jump_label_text_reserved(void *start, void *end) { bool init = system_state < SYSTEM_RUNNING; int ret = __jump_label_text_reserved(__start___jump_table, __stop___jump_table, start, end, init); if (ret) return ret; #ifdef CONFIG_MODULES ret = __jump_label_mod_text_reserved(start, end); #endif return ret; } static void jump_label_update(struct static_key *key) { struct jump_entry *stop = __stop___jump_table; bool init = system_state < SYSTEM_RUNNING; struct jump_entry *entry; #ifdef CONFIG_MODULES struct module *mod; if (static_key_linked(key)) { __jump_label_mod_update(key); return; } preempt_disable(); mod = __module_address((unsigned long)key); if (mod) { stop = mod->jump_entries + mod->num_jump_entries; init = mod->state == MODULE_STATE_COMING; } preempt_enable(); #endif entry = static_key_entries(key); /* if there are no users, entry can be NULL */ if (entry) __jump_label_update(key, entry, stop, init); } #ifdef CONFIG_STATIC_KEYS_SELFTEST static DEFINE_STATIC_KEY_TRUE(sk_true); static DEFINE_STATIC_KEY_FALSE(sk_false); static __init int jump_label_test(void) { int i; for (i = 0; i < 2; i++) { WARN_ON(static_key_enabled(&sk_true.key) != true); WARN_ON(static_key_enabled(&sk_false.key) != false); WARN_ON(!static_branch_likely(&sk_true)); WARN_ON(!static_branch_unlikely(&sk_true)); WARN_ON(static_branch_likely(&sk_false)); WARN_ON(static_branch_unlikely(&sk_false)); static_branch_disable(&sk_true); static_branch_enable(&sk_false); WARN_ON(static_key_enabled(&sk_true.key) == true); WARN_ON(static_key_enabled(&sk_false.key) == false); WARN_ON(static_branch_likely(&sk_true)); WARN_ON(static_branch_unlikely(&sk_true)); WARN_ON(!static_branch_likely(&sk_false)); WARN_ON(!static_branch_unlikely(&sk_false)); static_branch_enable(&sk_true); static_branch_disable(&sk_false); } return 0; } early_initcall(jump_label_test); #endif /* STATIC_KEYS_SELFTEST */
5 2 2 10 9 9 1 5 2 3 3 3 3 16 16 1 6 2 1 2 6 6 10 5 5 2 3 3 2 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 // SPDX-License-Identifier: GPL-2.0 /* MPTCP socket monitoring support * * Copyright (c) 2020 Red Hat * * Author: Paolo Abeni <pabeni@redhat.com> */ #include <linux/kernel.h> #include <linux/net.h> #include <linux/inet_diag.h> #include <net/netlink.h> #include "protocol.h" static int sk_diag_dump(struct sock *sk, struct sk_buff *skb, struct netlink_callback *cb, const struct inet_diag_req_v2 *req, struct nlattr *bc, bool net_admin) { if (!inet_diag_bc_sk(bc, sk)) return 0; return inet_sk_diag_fill(sk, inet_csk(sk), skb, cb, req, NLM_F_MULTI, net_admin); } static int mptcp_diag_dump_one(struct netlink_callback *cb, const struct inet_diag_req_v2 *req) { struct sk_buff *in_skb = cb->skb; struct mptcp_sock *msk = NULL; struct sk_buff *rep; int err = -ENOENT; struct net *net; struct sock *sk; net = sock_net(in_skb->sk); msk = mptcp_token_get_sock(net, req->id.idiag_cookie[0]); if (!msk) goto out_nosk; err = -ENOMEM; sk = (struct sock *)msk; rep = nlmsg_new(nla_total_size(sizeof(struct inet_diag_msg)) + inet_diag_msg_attrs_size() + nla_total_size(sizeof(struct mptcp_info)) + nla_total_size(sizeof(struct inet_diag_meminfo)) + 64, GFP_KERNEL); if (!rep) goto out; err = inet_sk_diag_fill(sk, inet_csk(sk), rep, cb, req, 0, netlink_net_capable(in_skb, CAP_NET_ADMIN)); if (err < 0) { WARN_ON(err == -EMSGSIZE); kfree_skb(rep); goto out; } err = nlmsg_unicast(net->diag_nlsk, rep, NETLINK_CB(in_skb).portid); out: sock_put(sk); out_nosk: return err; } struct mptcp_diag_ctx { long s_slot; long s_num; unsigned int l_slot; unsigned int l_num; }; static void mptcp_diag_dump_listeners(struct sk_buff *skb, struct netlink_callback *cb, const struct inet_diag_req_v2 *r, bool net_admin) { struct inet_diag_dump_data *cb_data = cb->data; struct mptcp_diag_ctx *diag_ctx = (void *)cb->ctx; struct nlattr *bc = cb_data->inet_diag_nla_bc; struct net *net = sock_net(skb->sk); struct inet_hashinfo *hinfo; int i; hinfo = net->ipv4.tcp_death_row.hashinfo; for (i = diag_ctx->l_slot; i <= hinfo->lhash2_mask; i++) { struct inet_listen_hashbucket *ilb; struct hlist_nulls_node *node; struct sock *sk; int num = 0; ilb = &hinfo->lhash2[i]; rcu_read_lock(); spin_lock(&ilb->lock); sk_nulls_for_each(sk, node, &ilb->nulls_head) { const struct mptcp_subflow_context *ctx = mptcp_subflow_ctx(sk); struct inet_sock *inet = inet_sk(sk); int ret; if (num < diag_ctx->l_num) goto next_listen; if (!ctx || strcmp(inet_csk(sk)->icsk_ulp_ops->name, "mptcp")) goto next_listen; sk = ctx->conn; if (!sk || !net_eq(sock_net(sk), net)) goto next_listen; if (r->sdiag_family != AF_UNSPEC && sk->sk_family != r->sdiag_family) goto next_listen; if (r->id.idiag_sport != inet->inet_sport && r->id.idiag_sport) goto next_listen; if (!refcount_inc_not_zero(&sk->sk_refcnt)) goto next_listen; ret = sk_diag_dump(sk, skb, cb, r, bc, net_admin); sock_put(sk); if (ret < 0) { spin_unlock(&ilb->lock); rcu_read_unlock(); diag_ctx->l_slot = i; diag_ctx->l_num = num; return; } diag_ctx->l_num = num + 1; num = 0; next_listen: ++num; } spin_unlock(&ilb->lock); rcu_read_unlock(); cond_resched(); diag_ctx->l_num = 0; } diag_ctx->l_num = 0; diag_ctx->l_slot = i; } static void mptcp_diag_dump(struct sk_buff *skb, struct netlink_callback *cb, const struct inet_diag_req_v2 *r) { bool net_admin = netlink_net_capable(cb->skb, CAP_NET_ADMIN); struct mptcp_diag_ctx *diag_ctx = (void *)cb->ctx; struct net *net = sock_net(skb->sk); struct inet_diag_dump_data *cb_data; struct mptcp_sock *msk; struct nlattr *bc; BUILD_BUG_ON(sizeof(cb->ctx) < sizeof(*diag_ctx)); cb_data = cb->data; bc = cb_data->inet_diag_nla_bc; while ((msk = mptcp_token_iter_next(net, &diag_ctx->s_slot, &diag_ctx->s_num)) != NULL) { struct inet_sock *inet = (struct inet_sock *)msk; struct sock *sk = (struct sock *)msk; int ret = 0; if (!(r->idiag_states & (1 << sk->sk_state))) goto next; if (r->sdiag_family != AF_UNSPEC && sk->sk_family != r->sdiag_family) goto next; if (r->id.idiag_sport != inet->inet_sport && r->id.idiag_sport) goto next; if (r->id.idiag_dport != inet->inet_dport && r->id.idiag_dport) goto next; ret = sk_diag_dump(sk, skb, cb, r, bc, net_admin); next: sock_put(sk); if (ret < 0) { /* will retry on the same position */ diag_ctx->s_num--; break; } cond_resched(); } if ((r->idiag_states & TCPF_LISTEN) && r->id.idiag_dport == 0) mptcp_diag_dump_listeners(skb, cb, r, net_admin); } static void mptcp_diag_get_info(struct sock *sk, struct inet_diag_msg *r, void *_info) { struct mptcp_sock *msk = mptcp_sk(sk); struct mptcp_info *info = _info; r->idiag_rqueue = sk_rmem_alloc_get(sk); r->idiag_wqueue = sk_wmem_alloc_get(sk); if (inet_sk_state_load(sk) == TCP_LISTEN) { struct sock *lsk = READ_ONCE(msk->first); if (lsk) { /* override with settings from tcp listener, * so Send-Q will show accept queue. */ r->idiag_rqueue = READ_ONCE(lsk->sk_ack_backlog); r->idiag_wqueue = READ_ONCE(lsk->sk_max_ack_backlog); } } if (!info) return; mptcp_diag_fill_info(msk, info); } static const struct inet_diag_handler mptcp_diag_handler = { .owner = THIS_MODULE, .dump = mptcp_diag_dump, .dump_one = mptcp_diag_dump_one, .idiag_get_info = mptcp_diag_get_info, .idiag_type = IPPROTO_MPTCP, .idiag_info_size = sizeof(struct mptcp_info), }; static int __init mptcp_diag_init(void) { return inet_diag_register(&mptcp_diag_handler); } static void __exit mptcp_diag_exit(void) { inet_diag_unregister(&mptcp_diag_handler); } module_init(mptcp_diag_init); module_exit(mptcp_diag_exit); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("MPTCP socket monitoring via SOCK_DIAG"); MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, 2-262 /* AF_INET - IPPROTO_MPTCP */);
9577 2101 6 13 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 /* SPDX-License-Identifier: GPL-2.0 */ #undef TRACE_SYSTEM #define TRACE_SYSTEM percpu #if !defined(_TRACE_PERCPU_H) || defined(TRACE_HEADER_MULTI_READ) #define _TRACE_PERCPU_H #include <linux/tracepoint.h> #include <trace/events/mmflags.h> TRACE_EVENT(percpu_alloc_percpu, TP_PROTO(unsigned long call_site, bool reserved, bool is_atomic, size_t size, size_t align, void *base_addr, int off, void __percpu *ptr, size_t bytes_alloc, gfp_t gfp_flags), TP_ARGS(call_site, reserved, is_atomic, size, align, base_addr, off, ptr, bytes_alloc, gfp_flags), TP_STRUCT__entry( __field( unsigned long, call_site ) __field( bool, reserved ) __field( bool, is_atomic ) __field( size_t, size ) __field( size_t, align ) __field( void *, base_addr ) __field( int, off ) __field( void __percpu *, ptr ) __field( size_t, bytes_alloc ) __field( unsigned long, gfp_flags ) ), TP_fast_assign( __entry->call_site = call_site; __entry->reserved = reserved; __entry->is_atomic = is_atomic; __entry->size = size; __entry->align = align; __entry->base_addr = base_addr; __entry->off = off; __entry->ptr = ptr; __entry->bytes_alloc = bytes_alloc; __entry->gfp_flags = (__force unsigned long)gfp_flags; ), TP_printk("call_site=%pS reserved=%d is_atomic=%d size=%zu align=%zu base_addr=%p off=%d ptr=%p bytes_alloc=%zu gfp_flags=%s", (void *)__entry->call_site, __entry->reserved, __entry->is_atomic, __entry->size, __entry->align, __entry->base_addr, __entry->off, __entry->ptr, __entry->bytes_alloc, show_gfp_flags(__entry->gfp_flags)) ); TRACE_EVENT(percpu_free_percpu, TP_PROTO(void *base_addr, int off, void __percpu *ptr), TP_ARGS(base_addr, off, ptr), TP_STRUCT__entry( __field( void *, base_addr ) __field( int, off ) __field( void __percpu *, ptr ) ), TP_fast_assign( __entry->base_addr = base_addr; __entry->off = off; __entry->ptr = ptr; ), TP_printk("base_addr=%p off=%d ptr=%p", __entry->base_addr, __entry->off, __entry->ptr) ); TRACE_EVENT(percpu_alloc_percpu_fail, TP_PROTO(bool reserved, bool is_atomic, size_t size, size_t align), TP_ARGS(reserved, is_atomic, size, align), TP_STRUCT__entry( __field( bool, reserved ) __field( bool, is_atomic ) __field( size_t, size ) __field( size_t, align ) ), TP_fast_assign( __entry->reserved = reserved; __entry->is_atomic = is_atomic; __entry->size = size; __entry->align = align; ), TP_printk("reserved=%d is_atomic=%d size=%zu align=%zu", __entry->reserved, __entry->is_atomic, __entry->size, __entry->align) ); TRACE_EVENT(percpu_create_chunk, TP_PROTO(void *base_addr), TP_ARGS(base_addr), TP_STRUCT__entry( __field( void *, base_addr ) ), TP_fast_assign( __entry->base_addr = base_addr; ), TP_printk("base_addr=%p", __entry->base_addr) ); TRACE_EVENT(percpu_destroy_chunk, TP_PROTO(void *base_addr), TP_ARGS(base_addr), TP_STRUCT__entry( __field( void *, base_addr ) ), TP_fast_assign( __entry->base_addr = base_addr; ), TP_printk("base_addr=%p", __entry->base_addr) ); #endif /* _TRACE_PERCPU_H */ #include <trace/define_trace.h>
8 2 28 10 5 13 29 29 11 12 13 13 11 11 10 9 11 9 1 6 10 11 11 3 13 13 13 13 11 11 11 4 3 11 8 3 10 8 3 11 13 13 13 13 9 11 10 11 11 11 11 11 4 10 5 11 10 11 11 10 4 10 1 11 6 9 11 10 2 2 11 11 10 11 10 11 11 11 3 3 2 1 1 2 3 3 29 9 11 9 13 11 13 5 11 11 22 3 3 3 3 3 3 13 10 2 13 13 2 11 13 13 13 13 13 13 13 1 1 1 1 1 1 1 25 25 25 26 26 23 22 21 8 13 22 25 20 11 11 10 11 11 10 11 11 10 9 2 10 11 5 6 6 6 6 6 6 6 6 6 5 5 5 5 2 2 3 3 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 3422 3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434 3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456 3457 3458 3459 3460 3461 3462 3463 3464 3465 3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479 3480 3481 3482 3483 3484 3485 3486 3487 3488 3489 3490 3491 3492 3493 3494 3495 3496 3497 3498 3499 3500 3501 3502 3503 3504 3505 3506 3507 3508 3509 3510 3511 3512 3513 3514 3515 3516 3517 3518 3519 3520 3521 3522 3523 3524 3525 3526 3527 3528 3529 3530 3531 3532 3533 3534 3535 3536 3537 3538 3539 3540 3541 3542 3543 3544 3545 3546 3547 3548 3549 3550 3551 3552 3553 3554 3555 3556 3557 3558 3559 3560 3561 3562 3563 3564 3565 3566 3567 3568 3569 3570 3571 3572 3573 3574 3575 3576 3577 3578 3579 3580 3581 3582 3583 3584 3585 3586 3587 3588 3589 3590 3591 3592 3593 3594 3595 3596 3597 3598 3599 3600 3601 3602 3603 3604 3605 3606 3607 3608 3609 3610 3611 3612 3613 3614 3615 3616 3617 3618 3619 3620 3621 3622 3623 3624 3625 3626 3627 3628 3629 3630 3631 3632 3633 3634 3635 3636 3637 3638 3639 3640 3641 3642 3643 3644 3645 3646 3647 3648 3649 3650 3651 3652 3653 3654 3655 3656 3657 3658 3659 3660 3661 3662 3663 3664 3665 3666 3667 3668 3669 3670 3671 3672 3673 3674 3675 3676 3677 3678 3679 3680 3681 3682 3683 3684 3685 3686 3687 3688 3689 3690 3691 3692 3693 3694 3695 3696 3697 3698 3699 3700 3701 3702 3703 3704 3705 3706 3707 3708 3709 3710 3711 3712 3713 3714 3715 3716 3717 3718 3719 3720 3721 3722 3723 3724 3725 3726 3727 3728 3729 3730 3731 3732 3733 3734 3735 3736 3737 3738 3739 3740 3741 3742 3743 3744 3745 3746 3747 3748 3749 3750 3751 3752 3753 3754 3755 3756 3757 3758 3759 3760 3761 3762 3763 3764 3765 3766 3767 3768 3769 3770 3771 3772 3773 3774 3775 3776 3777 3778 3779 3780 3781 3782 3783 3784 3785 3786 3787 3788 3789 3790 3791 3792 3793 3794 3795 3796 3797 3798 3799 3800 3801 3802 3803 3804 3805 3806 3807 3808 3809 3810 3811 3812 3813 3814 3815 3816 3817 3818 3819 3820 3821 3822 3823 3824 3825 3826 3827 3828 3829 3830 3831 3832 3833 3834 3835 3836 3837 3838 3839 3840 3841 3842 3843 3844 3845 3846 3847 3848 3849 3850 3851 3852 3853 3854 3855 3856 3857 3858 3859 3860 3861 3862 3863 3864 3865 3866 3867 3868 3869 3870 3871 3872 3873 3874 3875 3876 3877 3878 3879 3880 3881 3882 3883 3884 3885 3886 3887 3888 3889 3890 3891 3892 3893 3894 3895 3896 3897 3898 3899 3900 3901 3902 3903 3904 3905 3906 3907 3908 3909 3910 3911 3912 3913 3914 3915 3916 3917 3918 3919 3920 3921 3922 3923 3924 3925 3926 3927 3928 3929 3930 3931 3932 3933 3934 3935 3936 3937 3938 3939 3940 3941 3942 3943 3944 3945 3946 3947 3948 3949 3950 3951 3952 3953 3954 3955 3956 3957 3958 3959 3960 3961 3962 3963 3964 3965 3966 3967 3968 3969 3970 3971 3972 3973 3974 3975 3976 3977 3978 3979 3980 3981 3982 3983 3984 3985 3986 3987 3988 3989 3990 3991 3992 3993 3994 3995 3996 3997 3998 3999 4000 4001 4002 4003 4004 4005 4006 4007 4008 4009 4010 4011 4012 4013 4014 4015 4016 4017 4018 4019 4020 4021 4022 4023 4024 4025 4026 4027 4028 4029 4030 4031 4032 4033 4034 4035 4036 4037 4038 4039 4040 4041 4042 4043 4044 4045 4046 4047 4048 4049 4050 4051 4052 4053 4054 4055 4056 4057 4058 4059 4060 4061 4062 4063 4064 4065 4066 4067 4068 4069 4070 4071 4072 4073 4074 4075 4076 4077 4078 4079 4080 4081 4082 4083 4084 4085 4086 4087 4088 4089 4090 4091 4092 4093 4094 4095 4096 4097 4098 4099 4100 4101 4102 4103 4104 4105 4106 4107 4108 4109 4110 4111 4112 4113 4114 4115 4116 4117 4118 4119 4120 4121 4122 4123 4124 4125 4126 4127 4128 4129 4130 4131 4132 4133 4134 4135 4136 4137 4138 4139 4140 4141 4142 4143 4144 4145 4146 4147 4148 4149 4150 4151 4152 4153 4154 4155 4156 4157 4158 4159 4160 4161 4162 4163 4164 4165 4166 4167 4168 4169 4170 // SPDX-License-Identifier: GPL-2.0 /* * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc. * All Rights Reserved. */ #include "xfs.h" #include "xfs_fs.h" #include "xfs_format.h" #include "xfs_log_format.h" #include "xfs_shared.h" #include "xfs_trans_resv.h" #include "xfs_bit.h" #include "xfs_mount.h" #include "xfs_defer.h" #include "xfs_btree.h" #include "xfs_rmap.h" #include "xfs_alloc_btree.h" #include "xfs_alloc.h" #include "xfs_extent_busy.h" #include "xfs_errortag.h" #include "xfs_error.h" #include "xfs_trace.h" #include "xfs_trans.h" #include "xfs_buf_item.h" #include "xfs_log.h" #include "xfs_ag.h" #include "xfs_ag_resv.h" #include "xfs_bmap.h" #include "xfs_health.h" #include "xfs_extfree_item.h" struct kmem_cache *xfs_extfree_item_cache; struct workqueue_struct *xfs_alloc_wq; #define XFS_ABSDIFF(a,b) (((a) <= (b)) ? ((b) - (a)) : ((a) - (b))) #define XFSA_FIXUP_BNO_OK 1 #define XFSA_FIXUP_CNT_OK 2 /* * Size of the AGFL. For CRC-enabled filesystes we steal a couple of slots in * the beginning of the block for a proper header with the location information * and CRC. */ unsigned int xfs_agfl_size( struct xfs_mount *mp) { unsigned int size = mp->m_sb.sb_sectsize; if (xfs_has_crc(mp)) size -= sizeof(struct xfs_agfl); return size / sizeof(xfs_agblock_t); } unsigned int xfs_refc_block( struct xfs_mount *mp) { if (xfs_has_rmapbt(mp)) return XFS_RMAP_BLOCK(mp) + 1; if (xfs_has_finobt(mp)) return XFS_FIBT_BLOCK(mp) + 1; return XFS_IBT_BLOCK(mp) + 1; } xfs_extlen_t xfs_prealloc_blocks( struct xfs_mount *mp) { if (xfs_has_reflink(mp)) return xfs_refc_block(mp) + 1; if (xfs_has_rmapbt(mp)) return XFS_RMAP_BLOCK(mp) + 1; if (xfs_has_finobt(mp)) return XFS_FIBT_BLOCK(mp) + 1; return XFS_IBT_BLOCK(mp) + 1; } /* * The number of blocks per AG that we withhold from xfs_dec_fdblocks to * guarantee that we can refill the AGFL prior to allocating space in a nearly * full AG. Although the space described by the free space btrees, the * blocks used by the freesp btrees themselves, and the blocks owned by the * AGFL are counted in the ondisk fdblocks, it's a mistake to let the ondisk * free space in the AG drop so low that the free space btrees cannot refill an * empty AGFL up to the minimum level. Rather than grind through empty AGs * until the fs goes down, we subtract this many AG blocks from the incore * fdblocks to ensure user allocation does not overcommit the space the * filesystem needs for the AGFLs. The rmap btree uses a per-AG reservation to * withhold space from xfs_dec_fdblocks, so we do not account for that here. */ #define XFS_ALLOCBT_AGFL_RESERVE 4 /* * Compute the number of blocks that we set aside to guarantee the ability to * refill the AGFL and handle a full bmap btree split. * * In order to avoid ENOSPC-related deadlock caused by out-of-order locking of * AGF buffer (PV 947395), we place constraints on the relationship among * actual allocations for data blocks, freelist blocks, and potential file data * bmap btree blocks. However, these restrictions may result in no actual space * allocated for a delayed extent, for example, a data block in a certain AG is * allocated but there is no additional block for the additional bmap btree * block due to a split of the bmap btree of the file. The result of this may * lead to an infinite loop when the file gets flushed to disk and all delayed * extents need to be actually allocated. To get around this, we explicitly set * aside a few blocks which will not be reserved in delayed allocation. * * For each AG, we need to reserve enough blocks to replenish a totally empty * AGFL and 4 more to handle a potential split of the file's bmap btree. */ unsigned int xfs_alloc_set_aside( struct xfs_mount *mp) { return mp->m_sb.sb_agcount * (XFS_ALLOCBT_AGFL_RESERVE + 4); } /* * When deciding how much space to allocate out of an AG, we limit the * allocation maximum size to the size the AG. However, we cannot use all the * blocks in the AG - some are permanently used by metadata. These * blocks are generally: * - the AG superblock, AGF, AGI and AGFL * - the AGF (bno and cnt) and AGI btree root blocks, and optionally * the AGI free inode and rmap btree root blocks. * - blocks on the AGFL according to xfs_alloc_set_aside() limits * - the rmapbt root block * * The AG headers are sector sized, so the amount of space they take up is * dependent on filesystem geometry. The others are all single blocks. */ unsigned int xfs_alloc_ag_max_usable( struct xfs_mount *mp) { unsigned int blocks; blocks = XFS_BB_TO_FSB(mp, XFS_FSS_TO_BB(mp, 4)); /* ag headers */ blocks += XFS_ALLOCBT_AGFL_RESERVE; blocks += 3; /* AGF, AGI btree root blocks */ if (xfs_has_finobt(mp)) blocks++; /* finobt root block */ if (xfs_has_rmapbt(mp)) blocks++; /* rmap root block */ if (xfs_has_reflink(mp)) blocks++; /* refcount root block */ return mp->m_sb.sb_agblocks - blocks; } static int xfs_alloc_lookup( struct xfs_btree_cur *cur, xfs_lookup_t dir, xfs_agblock_t bno, xfs_extlen_t len, int *stat) { int error; cur->bc_rec.a.ar_startblock = bno; cur->bc_rec.a.ar_blockcount = len; error = xfs_btree_lookup(cur, dir, stat); if (*stat == 1) cur->bc_flags |= XFS_BTREE_ALLOCBT_ACTIVE; else cur->bc_flags &= ~XFS_BTREE_ALLOCBT_ACTIVE; return error; } /* * Lookup the record equal to [bno, len] in the btree given by cur. */ static inline int /* error */ xfs_alloc_lookup_eq( struct xfs_btree_cur *cur, /* btree cursor */ xfs_agblock_t bno, /* starting block of extent */ xfs_extlen_t len, /* length of extent */ int *stat) /* success/failure */ { return xfs_alloc_lookup(cur, XFS_LOOKUP_EQ, bno, len, stat); } /* * Lookup the first record greater than or equal to [bno, len] * in the btree given by cur. */ int /* error */ xfs_alloc_lookup_ge( struct xfs_btree_cur *cur, /* btree cursor */ xfs_agblock_t bno, /* starting block of extent */ xfs_extlen_t len, /* length of extent */ int *stat) /* success/failure */ { return xfs_alloc_lookup(cur, XFS_LOOKUP_GE, bno, len, stat); } /* * Lookup the first record less than or equal to [bno, len] * in the btree given by cur. */ int /* error */ xfs_alloc_lookup_le( struct xfs_btree_cur *cur, /* btree cursor */ xfs_agblock_t bno, /* starting block of extent */ xfs_extlen_t len, /* length of extent */ int *stat) /* success/failure */ { return xfs_alloc_lookup(cur, XFS_LOOKUP_LE, bno, len, stat); } static inline bool xfs_alloc_cur_active( struct xfs_btree_cur *cur) { return cur && (cur->bc_flags & XFS_BTREE_ALLOCBT_ACTIVE); } /* * Update the record referred to by cur to the value given * by [bno, len]. * This either works (return 0) or gets an EFSCORRUPTED error. */ STATIC int /* error */ xfs_alloc_update( struct xfs_btree_cur *cur, /* btree cursor */ xfs_agblock_t bno, /* starting block of extent */ xfs_extlen_t len) /* length of extent */ { union xfs_btree_rec rec; rec.alloc.ar_startblock = cpu_to_be32(bno); rec.alloc.ar_blockcount = cpu_to_be32(len); return xfs_btree_update(cur, &rec); } /* Convert the ondisk btree record to its incore representation. */ void xfs_alloc_btrec_to_irec( const union xfs_btree_rec *rec, struct xfs_alloc_rec_incore *irec) { irec->ar_startblock = be32_to_cpu(rec->alloc.ar_startblock); irec->ar_blockcount = be32_to_cpu(rec->alloc.ar_blockcount); } /* Simple checks for free space records. */ xfs_failaddr_t xfs_alloc_check_irec( struct xfs_perag *pag, const struct xfs_alloc_rec_incore *irec) { if (irec->ar_blockcount == 0) return __this_address; /* check for valid extent range, including overflow */ if (!xfs_verify_agbext(pag, irec->ar_startblock, irec->ar_blockcount)) return __this_address; return NULL; } static inline int xfs_alloc_complain_bad_rec( struct xfs_btree_cur *cur, xfs_failaddr_t fa, const struct xfs_alloc_rec_incore *irec) { struct xfs_mount *mp = cur->bc_mp; xfs_warn(mp, "%sbt record corruption in AG %d detected at %pS!", cur->bc_ops->name, cur->bc_group->xg_gno, fa); xfs_warn(mp, "start block 0x%x block count 0x%x", irec->ar_startblock, irec->ar_blockcount); xfs_btree_mark_sick(cur); return -EFSCORRUPTED; } /* * Get the data from the pointed-to record. */ int /* error */ xfs_alloc_get_rec( struct xfs_btree_cur *cur, /* btree cursor */ xfs_agblock_t *bno, /* output: starting block of extent */ xfs_extlen_t *len, /* output: length of extent */ int *stat) /* output: success/failure */ { struct xfs_alloc_rec_incore irec; union xfs_btree_rec *rec; xfs_failaddr_t fa; int error; error = xfs_btree_get_rec(cur, &rec, stat); if (error || !(*stat)) return error; xfs_alloc_btrec_to_irec(rec, &irec); fa = xfs_alloc_check_irec(to_perag(cur->bc_group), &irec); if (fa) return xfs_alloc_complain_bad_rec(cur, fa, &irec); *bno = irec.ar_startblock; *len = irec.ar_blockcount; return 0; } /* * Compute aligned version of the found extent. * Takes alignment and min length into account. */ STATIC bool xfs_alloc_compute_aligned( xfs_alloc_arg_t *args, /* allocation argument structure */ xfs_agblock_t foundbno, /* starting block in found extent */ xfs_extlen_t foundlen, /* length in found extent */ xfs_agblock_t *resbno, /* result block number */ xfs_extlen_t *reslen, /* result length */ unsigned *busy_gen) { xfs_agblock_t bno = foundbno; xfs_extlen_t len = foundlen; xfs_extlen_t diff; bool busy; /* Trim busy sections out of found extent */ busy = xfs_extent_busy_trim(pag_group(args->pag), args->minlen, args->maxlen, &bno, &len, busy_gen); /* * If we have a largish extent that happens to start before min_agbno, * see if we can shift it into range... */ if (bno < args->min_agbno && bno + len > args->min_agbno) { diff = args->min_agbno - bno; if (len > diff) { bno += diff; len -= diff; } } if (args->alignment > 1 && len >= args->minlen) { xfs_agblock_t aligned_bno = roundup(bno, args->alignment); diff = aligned_bno - bno; *resbno = aligned_bno; *reslen = diff >= len ? 0 : len - diff; } else { *resbno = bno; *reslen = len; } return busy; } /* * Compute best start block and diff for "near" allocations. * freelen >= wantlen already checked by caller. */ STATIC xfs_extlen_t /* difference value (absolute) */ xfs_alloc_compute_diff( xfs_agblock_t wantbno, /* target starting block */ xfs_extlen_t wantlen, /* target length */ xfs_extlen_t alignment, /* target alignment */ int datatype, /* are we allocating data? */ xfs_agblock_t freebno, /* freespace's starting block */ xfs_extlen_t freelen, /* freespace's length */ xfs_agblock_t *newbnop) /* result: best start block from free */ { xfs_agblock_t freeend; /* end of freespace extent */ xfs_agblock_t newbno1; /* return block number */ xfs_agblock_t newbno2; /* other new block number */ xfs_extlen_t newlen1=0; /* length with newbno1 */ xfs_extlen_t newlen2=0; /* length with newbno2 */ xfs_agblock_t wantend; /* end of target extent */ bool userdata = datatype & XFS_ALLOC_USERDATA; ASSERT(freelen >= wantlen); freeend = freebno + freelen; wantend = wantbno + wantlen; /* * We want to allocate from the start of a free extent if it is past * the desired block or if we are allocating user data and the free * extent is before desired block. The second case is there to allow * for contiguous allocation from the remaining free space if the file * grows in the short term. */ if (freebno >= wantbno || (userdata && freeend < wantend)) { if ((newbno1 = roundup(freebno, alignment)) >= freeend) newbno1 = NULLAGBLOCK; } else if (freeend >= wantend && alignment > 1) { newbno1 = roundup(wantbno, alignment); newbno2 = newbno1 - alignment; if (newbno1 >= freeend) newbno1 = NULLAGBLOCK; else newlen1 = XFS_EXTLEN_MIN(wantlen, freeend - newbno1); if (newbno2 < freebno) newbno2 = NULLAGBLOCK; else newlen2 = XFS_EXTLEN_MIN(wantlen, freeend - newbno2); if (newbno1 != NULLAGBLOCK && newbno2 != NULLAGBLOCK) { if (newlen1 < newlen2 || (newlen1 == newlen2 && XFS_ABSDIFF(newbno1, wantbno) > XFS_ABSDIFF(newbno2, wantbno))) newbno1 = newbno2; } else if (newbno2 != NULLAGBLOCK) newbno1 = newbno2; } else if (freeend >= wantend) { newbno1 = wantbno; } else if (alignment > 1) { newbno1 = roundup(freeend - wantlen, alignment); if (newbno1 > freeend - wantlen && newbno1 - alignment >= freebno) newbno1 -= alignment; else if (newbno1 >= freeend) newbno1 = NULLAGBLOCK; } else newbno1 = freeend - wantlen; *newbnop = newbno1; return newbno1 == NULLAGBLOCK ? 0 : XFS_ABSDIFF(newbno1, wantbno); } /* * Fix up the length, based on mod and prod. * len should be k * prod + mod for some k. * If len is too small it is returned unchanged. * If len hits maxlen it is left alone. */ STATIC void xfs_alloc_fix_len( xfs_alloc_arg_t *args) /* allocation argument structure */ { xfs_extlen_t k; xfs_extlen_t rlen; ASSERT(args->mod < args->prod); rlen = args->len; ASSERT(rlen >= args->minlen); ASSERT(rlen <= args->maxlen); if (args->prod <= 1 || rlen < args->mod || rlen == args->maxlen || (args->mod == 0 && rlen < args->prod)) return; k = rlen % args->prod; if (k == args->mod) return; if (k > args->mod) rlen = rlen - (k - args->mod); else rlen = rlen - args->prod + (args->mod - k); /* casts to (int) catch length underflows */ if ((int)rlen < (int)args->minlen) return; ASSERT(rlen >= args->minlen && rlen <= args->maxlen); ASSERT(rlen % args->prod == args->mod); ASSERT(args->pag->pagf_freeblks + args->pag->pagf_flcount >= rlen + args->minleft); args->len = rlen; } /* * Determine if the cursor points to the block that contains the right-most * block of records in the by-count btree. This block contains the largest * contiguous free extent in the AG, so if we modify a record in this block we * need to call xfs_alloc_fixup_longest() once the modifications are done to * ensure the agf->agf_longest field is kept up to date with the longest free * extent tracked by the by-count btree. */ static bool xfs_alloc_cursor_at_lastrec( struct xfs_btree_cur *cnt_cur) { struct xfs_btree_block *block; union xfs_btree_ptr ptr; struct xfs_buf *bp; block = xfs_btree_get_block(cnt_cur, 0, &bp); xfs_btree_get_sibling(cnt_cur, block, &ptr, XFS_BB_RIGHTSIB); return xfs_btree_ptr_is_null(cnt_cur, &ptr); } /* * Find the rightmost record of the cntbt, and return the longest free space * recorded in it. Simply set both the block number and the length to their * maximum values before searching. */ static int xfs_cntbt_longest( struct xfs_btree_cur *cnt_cur, xfs_extlen_t *longest) { struct xfs_alloc_rec_incore irec; union xfs_btree_rec *rec; int stat = 0; int error; memset(&cnt_cur->bc_rec, 0xFF, sizeof(cnt_cur->bc_rec)); error = xfs_btree_lookup(cnt_cur, XFS_LOOKUP_LE, &stat); if (error) return error; if (!stat) { /* totally empty tree */ *longest = 0; return 0; } error = xfs_btree_get_rec(cnt_cur, &rec, &stat); if (error) return error; if (XFS_IS_CORRUPT(cnt_cur->bc_mp, !stat)) { xfs_btree_mark_sick(cnt_cur); return -EFSCORRUPTED; } xfs_alloc_btrec_to_irec(rec, &irec); *longest = irec.ar_blockcount; return 0; } /* * Update the longest contiguous free extent in the AG from the by-count cursor * that is passed to us. This should be done at the end of any allocation or * freeing operation that touches the longest extent in the btree. * * Needing to update the longest extent can be determined by calling * xfs_alloc_cursor_at_lastrec() after the cursor is positioned for record * modification but before the modification begins. */ static int xfs_alloc_fixup_longest( struct xfs_btree_cur *cnt_cur) { struct xfs_perag *pag = to_perag(cnt_cur->bc_group); struct xfs_buf *bp = cnt_cur->bc_ag.agbp; struct xfs_agf *agf = bp->b_addr; xfs_extlen_t longest = 0; int error; /* Lookup last rec in order to update AGF. */ error = xfs_cntbt_longest(cnt_cur, &longest); if (error) return error; pag->pagf_longest = longest; agf->agf_longest = cpu_to_be32(pag->pagf_longest); xfs_alloc_log_agf(cnt_cur->bc_tp, bp, XFS_AGF_LONGEST); return 0; } /* * Update the two btrees, logically removing from freespace the extent * starting at rbno, rlen blocks. The extent is contained within the * actual (current) free extent fbno for flen blocks. * Flags are passed in indicating whether the cursors are set to the * relevant records. */ STATIC int /* error code */ xfs_alloc_fixup_trees( struct xfs_btree_cur *cnt_cur, /* cursor for by-size btree */ struct xfs_btree_cur *bno_cur, /* cursor for by-block btree */ xfs_agblock_t fbno, /* starting block of free extent */ xfs_extlen_t flen, /* length of free extent */ xfs_agblock_t rbno, /* starting block of returned extent */ xfs_extlen_t rlen, /* length of returned extent */ int flags) /* flags, XFSA_FIXUP_... */ { int error; /* error code */ int i; /* operation results */ xfs_agblock_t nfbno1; /* first new free startblock */ xfs_agblock_t nfbno2; /* second new free startblock */ xfs_extlen_t nflen1=0; /* first new free length */ xfs_extlen_t nflen2=0; /* second new free length */ struct xfs_mount *mp; bool fixup_longest = false; mp = cnt_cur->bc_mp; /* * Look up the record in the by-size tree if necessary. */ if (flags & XFSA_FIXUP_CNT_OK) { #ifdef DEBUG if ((error = xfs_alloc_get_rec(cnt_cur, &nfbno1, &nflen1, &i))) return error; if (XFS_IS_CORRUPT(mp, i != 1 || nfbno1 != fbno || nflen1 != flen)) { xfs_btree_mark_sick(cnt_cur); return -EFSCORRUPTED; } #endif } else { if ((error = xfs_alloc_lookup_eq(cnt_cur, fbno, flen, &i))) return error; if (XFS_IS_CORRUPT(mp, i != 1)) { xfs_btree_mark_sick(cnt_cur); return -EFSCORRUPTED; } } /* * Look up the record in the by-block tree if necessary. */ if (flags & XFSA_FIXUP_BNO_OK) { #ifdef DEBUG if ((error = xfs_alloc_get_rec(bno_cur, &nfbno1, &nflen1, &i))) return error; if (XFS_IS_CORRUPT(mp, i != 1 || nfbno1 != fbno || nflen1 != flen)) { xfs_btree_mark_sick(bno_cur); return -EFSCORRUPTED; } #endif } else { if ((error = xfs_alloc_lookup_eq(bno_cur, fbno, flen, &i))) return error; if (XFS_IS_CORRUPT(mp, i != 1)) { xfs_btree_mark_sick(bno_cur); return -EFSCORRUPTED; } } #ifdef DEBUG if (bno_cur->bc_nlevels == 1 && cnt_cur->bc_nlevels == 1) { struct xfs_btree_block *bnoblock; struct xfs_btree_block *cntblock; bnoblock = XFS_BUF_TO_BLOCK(bno_cur->bc_levels[0].bp); cntblock = XFS_BUF_TO_BLOCK(cnt_cur->bc_levels[0].bp); if (XFS_IS_CORRUPT(mp, bnoblock->bb_numrecs != cntblock->bb_numrecs)) { xfs_btree_mark_sick(bno_cur); return -EFSCORRUPTED; } } #endif /* * Deal with all four cases: the allocated record is contained * within the freespace record, so we can have new freespace * at either (or both) end, or no freespace remaining. */ if (rbno == fbno && rlen == flen) nfbno1 = nfbno2 = NULLAGBLOCK; else if (rbno == fbno) { nfbno1 = rbno + rlen; nflen1 = flen - rlen; nfbno2 = NULLAGBLOCK; } else if (rbno + rlen == fbno + flen) { nfbno1 = fbno; nflen1 = flen - rlen; nfbno2 = NULLAGBLOCK; } else { nfbno1 = fbno; nflen1 = rbno - fbno; nfbno2 = rbno + rlen; nflen2 = (fbno + flen) - nfbno2; } if (xfs_alloc_cursor_at_lastrec(cnt_cur)) fixup_longest = true; /* * Delete the entry from the by-size btree. */ if ((error = xfs_btree_delete(cnt_cur, &i))) return error; if (XFS_IS_CORRUPT(mp, i != 1)) { xfs_btree_mark_sick(cnt_cur); return -EFSCORRUPTED; } /* * Add new by-size btree entry(s). */ if (nfbno1 != NULLAGBLOCK) { if ((error = xfs_alloc_lookup_eq(cnt_cur, nfbno1, nflen1, &i))) return error; if (XFS_IS_CORRUPT(mp, i != 0)) { xfs_btree_mark_sick(cnt_cur); return -EFSCORRUPTED; } if ((error = xfs_btree_insert(cnt_cur, &i))) return error; if (XFS_IS_CORRUPT(mp, i != 1)) { xfs_btree_mark_sick(cnt_cur); return -EFSCORRUPTED; } } if (nfbno2 != NULLAGBLOCK) { if ((error = xfs_alloc_lookup_eq(cnt_cur, nfbno2, nflen2, &i))) return error; if (XFS_IS_CORRUPT(mp, i != 0)) { xfs_btree_mark_sick(cnt_cur); return -EFSCORRUPTED; } if ((error = xfs_btree_insert(cnt_cur, &i))) return error; if (XFS_IS_CORRUPT(mp, i != 1)) { xfs_btree_mark_sick(cnt_cur); return -EFSCORRUPTED; } } /* * Fix up the by-block btree entry(s). */ if (nfbno1 == NULLAGBLOCK) { /* * No remaining freespace, just delete the by-block tree entry. */ if ((error = xfs_btree_delete(bno_cur, &i))) return error; if (XFS_IS_CORRUPT(mp, i != 1)) { xfs_btree_mark_sick(bno_cur); return -EFSCORRUPTED; } } else { /* * Update the by-block entry to start later|be shorter. */ if ((error = xfs_alloc_update(bno_cur, nfbno1, nflen1))) return error; } if (nfbno2 != NULLAGBLOCK) { /* * 2 resulting free entries, need to add one. */ if ((error = xfs_alloc_lookup_eq(bno_cur, nfbno2, nflen2, &i))) return error; if (XFS_IS_CORRUPT(mp, i != 0)) { xfs_btree_mark_sick(bno_cur); return -EFSCORRUPTED; } if ((error = xfs_btree_insert(bno_cur, &i))) return error; if (XFS_IS_CORRUPT(mp, i != 1)) { xfs_btree_mark_sick(bno_cur); return -EFSCORRUPTED; } } if (fixup_longest) return xfs_alloc_fixup_longest(cnt_cur); return 0; } /* * We do not verify the AGFL contents against AGF-based index counters here, * even though we may have access to the perag that contains shadow copies. We * don't know if the AGF based counters have been checked, and if they have they * still may be inconsistent because they haven't yet been reset on the first * allocation after the AGF has been read in. * * This means we can only check that all agfl entries contain valid or null * values because we can't reliably determine the active range to exclude * NULLAGBNO as a valid value. * * However, we can't even do that for v4 format filesystems because there are * old versions of mkfs out there that does not initialise the AGFL to known, * verifiable values. HEnce we can't tell the difference between a AGFL block * allocated by mkfs and a corrupted AGFL block here on v4 filesystems. * * As a result, we can only fully validate AGFL block numbers when we pull them * from the freelist in xfs_alloc_get_freelist(). */ static xfs_failaddr_t xfs_agfl_verify( struct xfs_buf *bp) { struct xfs_mount *mp = bp->b_mount; struct xfs_agfl *agfl = XFS_BUF_TO_AGFL(bp); __be32 *agfl_bno = xfs_buf_to_agfl_bno(bp); int i; if (!xfs_has_crc(mp)) return NULL; if (!xfs_verify_magic(bp, agfl->agfl_magicnum)) return __this_address; if (!uuid_equal(&agfl->agfl_uuid, &mp->m_sb.sb_meta_uuid)) return __this_address; /* * during growfs operations, the perag is not fully initialised, * so we can't use it for any useful checking. growfs ensures we can't * use it by using uncached buffers that don't have the perag attached * so we can detect and avoid this problem. */ if (bp->b_pag && be32_to_cpu(agfl->agfl_seqno) != pag_agno((bp->b_pag))) return __this_address; for (i = 0; i < xfs_agfl_size(mp); i++) { if (be32_to_cpu(agfl_bno[i]) != NULLAGBLOCK && be32_to_cpu(agfl_bno[i]) >= mp->m_sb.sb_agblocks) return __this_address; } if (!xfs_log_check_lsn(mp, be64_to_cpu(XFS_BUF_TO_AGFL(bp)->agfl_lsn))) return __this_address; return NULL; } static void xfs_agfl_read_verify( struct xfs_buf *bp) { struct xfs_mount *mp = bp->b_mount; xfs_failaddr_t fa; /* * There is no verification of non-crc AGFLs because mkfs does not * initialise the AGFL to zero or NULL. Hence the only valid part of the * AGFL is what the AGF says is active. We can't get to the AGF, so we * can't verify just those entries are valid. */ if (!xfs_has_crc(mp)) return; if (!xfs_buf_verify_cksum(bp, XFS_AGFL_CRC_OFF)) xfs_verifier_error(bp, -EFSBADCRC, __this_address); else { fa = xfs_agfl_verify(bp); if (fa) xfs_verifier_error(bp, -EFSCORRUPTED, fa); } } static void xfs_agfl_write_verify( struct xfs_buf *bp) { struct xfs_mount *mp = bp->b_mount; struct xfs_buf_log_item *bip = bp->b_log_item; xfs_failaddr_t fa; /* no verification of non-crc AGFLs */ if (!xfs_has_crc(mp)) return; fa = xfs_agfl_verify(bp); if (fa) { xfs_verifier_error(bp, -EFSCORRUPTED, fa); return; } if (bip) XFS_BUF_TO_AGFL(bp)->agfl_lsn = cpu_to_be64(bip->bli_item.li_lsn); xfs_buf_update_cksum(bp, XFS_AGFL_CRC_OFF); } const struct xfs_buf_ops xfs_agfl_buf_ops = { .name = "xfs_agfl", .magic = { cpu_to_be32(XFS_AGFL_MAGIC), cpu_to_be32(XFS_AGFL_MAGIC) }, .verify_read = xfs_agfl_read_verify, .verify_write = xfs_agfl_write_verify, .verify_struct = xfs_agfl_verify, }; /* * Read in the allocation group free block array. */ int xfs_alloc_read_agfl( struct xfs_perag *pag, struct xfs_trans *tp, struct xfs_buf **bpp) { struct xfs_mount *mp = pag_mount(pag); struct xfs_buf *bp; int error; error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, XFS_AG_DADDR(mp, pag_agno(pag), XFS_AGFL_DADDR(mp)), XFS_FSS_TO_BB(mp, 1), 0, &bp, &xfs_agfl_buf_ops); if (xfs_metadata_is_sick(error)) xfs_ag_mark_sick(pag, XFS_SICK_AG_AGFL); if (error) return error; xfs_buf_set_ref(bp, XFS_AGFL_REF); *bpp = bp; return 0; } STATIC int xfs_alloc_update_counters( struct xfs_trans *tp, struct xfs_buf *agbp, long len) { struct xfs_agf *agf = agbp->b_addr; agbp->b_pag->pagf_freeblks += len; be32_add_cpu(&agf->agf_freeblks, len); if (unlikely(be32_to_cpu(agf->agf_freeblks) > be32_to_cpu(agf->agf_length))) { xfs_buf_mark_corrupt(agbp); xfs_ag_mark_sick(agbp->b_pag, XFS_SICK_AG_AGF); return -EFSCORRUPTED; } xfs_alloc_log_agf(tp, agbp, XFS_AGF_FREEBLKS); return 0; } /* * Block allocation algorithm and data structures. */ struct xfs_alloc_cur { struct xfs_btree_cur *cnt; /* btree cursors */ struct xfs_btree_cur *bnolt; struct xfs_btree_cur *bnogt; xfs_extlen_t cur_len;/* current search length */ xfs_agblock_t rec_bno;/* extent startblock */ xfs_extlen_t rec_len;/* extent length */ xfs_agblock_t bno; /* alloc bno */ xfs_extlen_t len; /* alloc len */ xfs_extlen_t diff; /* diff from search bno */ unsigned int busy_gen;/* busy state */ bool busy; }; /* * Set up cursors, etc. in the extent allocation cursor. This function can be * called multiple times to reset an initialized structure without having to * reallocate cursors. */ static int xfs_alloc_cur_setup( struct xfs_alloc_arg *args, struct xfs_alloc_cur *acur) { int error; int i; acur->cur_len = args->maxlen; acur->rec_bno = 0; acur->rec_len = 0; acur->bno = 0; acur->len = 0; acur->diff = -1; acur->busy = false; acur->busy_gen = 0; /* * Perform an initial cntbt lookup to check for availability of maxlen * extents. If this fails, we'll return -ENOSPC to signal the caller to * attempt a small allocation. */ if (!acur->cnt) acur->cnt = xfs_cntbt_init_cursor(args->mp, args->tp, args->agbp, args->pag); error = xfs_alloc_lookup_ge(acur->cnt, 0, args->maxlen, &i); if (error) return error; /* * Allocate the bnobt left and right search cursors. */ if (!acur->bnolt) acur->bnolt = xfs_bnobt_init_cursor(args->mp, args->tp, args->agbp, args->pag); if (!acur->bnogt) acur->bnogt = xfs_bnobt_init_cursor(args->mp, args->tp, args->agbp, args->pag); return i == 1 ? 0 : -ENOSPC; } static void xfs_alloc_cur_close( struct xfs_alloc_cur *acur, bool error) { int cur_error = XFS_BTREE_NOERROR; if (error) cur_error = XFS_BTREE_ERROR; if (acur->cnt) xfs_btree_del_cursor(acur->cnt, cur_error); if (acur->bnolt) xfs_btree_del_cursor(acur->bnolt, cur_error); if (acur->bnogt) xfs_btree_del_cursor(acur->bnogt, cur_error); acur->cnt = acur->bnolt = acur->bnogt = NULL; } /* * Check an extent for allocation and track the best available candidate in the * allocation structure. The cursor is deactivated if it has entered an out of * range state based on allocation arguments. Optionally return the extent * extent geometry and allocation status if requested by the caller. */ static int xfs_alloc_cur_check( struct xfs_alloc_arg *args, struct xfs_alloc_cur *acur, struct xfs_btree_cur *cur, int *new) { int error, i; xfs_agblock_t bno, bnoa, bnew; xfs_extlen_t len, lena, diff = -1; bool busy; unsigned busy_gen = 0; bool deactivate = false; bool isbnobt = xfs_btree_is_bno(cur->bc_ops); *new = 0; error = xfs_alloc_get_rec(cur, &bno, &len, &i); if (error) return error; if (XFS_IS_CORRUPT(args->mp, i != 1)) { xfs_btree_mark_sick(cur); return -EFSCORRUPTED; } /* * Check minlen and deactivate a cntbt cursor if out of acceptable size * range (i.e., walking backwards looking for a minlen extent). */ if (len < args->minlen) { deactivate = !isbnobt; goto out; } busy = xfs_alloc_compute_aligned(args, bno, len, &bnoa, &lena, &busy_gen); acur->busy |= busy; if (busy) acur->busy_gen = busy_gen; /* deactivate a bnobt cursor outside of locality range */ if (bnoa < args->min_agbno || bnoa > args->max_agbno) { deactivate = isbnobt; goto out; } if (lena < args->minlen) goto out; args->len = XFS_EXTLEN_MIN(lena, args->maxlen); xfs_alloc_fix_len(args); ASSERT(args->len >= args->minlen); if (args->len < acur->len) goto out; /* * We have an aligned record that satisfies minlen and beats or matches * the candidate extent size. Compare locality for near allocation mode. */ diff = xfs_alloc_compute_diff(args->agbno, args->len, args->alignment, args->datatype, bnoa, lena, &bnew); if (bnew == NULLAGBLOCK) goto out; /* * Deactivate a bnobt cursor with worse locality than the current best. */ if (diff > acur->diff) { deactivate = isbnobt; goto out; } ASSERT(args->len > acur->len || (args->len == acur->len && diff <= acur->diff)); acur->rec_bno = bno; acur->rec_len = len; acur->bno = bnew; acur->len = args->len; acur->diff = diff; *new = 1; /* * We're done if we found a perfect allocation. This only deactivates * the current cursor, but this is just an optimization to terminate a * cntbt search that otherwise runs to the edge of the tree. */ if (acur->diff == 0 && acur->len == args->maxlen) deactivate = true; out: if (deactivate) cur->bc_flags &= ~XFS_BTREE_ALLOCBT_ACTIVE; trace_xfs_alloc_cur_check(cur, bno, len, diff, *new); return 0; } /* * Complete an allocation of a candidate extent. Remove the extent from both * trees and update the args structure. */ STATIC int xfs_alloc_cur_finish( struct xfs_alloc_arg *args, struct xfs_alloc_cur *acur) { int error; ASSERT(acur->cnt && acur->bnolt); ASSERT(acur->bno >= acur->rec_bno); ASSERT(acur->bno + acur->len <= acur->rec_bno + acur->rec_len); ASSERT(xfs_verify_agbext(args->pag, acur->rec_bno, acur->rec_len)); error = xfs_alloc_fixup_trees(acur->cnt, acur->bnolt, acur->rec_bno, acur->rec_len, acur->bno, acur->len, 0); if (error) return error; args->agbno = acur->bno; args->len = acur->len; args->wasfromfl = 0; trace_xfs_alloc_cur(args); return 0; } /* * Locality allocation lookup algorithm. This expects a cntbt cursor and uses * bno optimized lookup to search for extents with ideal size and locality. */ STATIC int xfs_alloc_cntbt_iter( struct xfs_alloc_arg *args, struct xfs_alloc_cur *acur) { struct xfs_btree_cur *cur = acur->cnt; xfs_agblock_t bno; xfs_extlen_t len, cur_len; int error; int i; if (!xfs_alloc_cur_active(cur)) return 0; /* locality optimized lookup */ cur_len = acur->cur_len; error = xfs_alloc_lookup_ge(cur, args->agbno, cur_len, &i); if (error) return error; if (i == 0) return 0; error = xfs_alloc_get_rec(cur, &bno, &len, &i); if (error) return error; /* check the current record and update search length from it */ error = xfs_alloc_cur_check(args, acur, cur, &i); if (error) return error; ASSERT(len >= acur->cur_len); acur->cur_len = len; /* * We looked up the first record >= [agbno, len] above. The agbno is a * secondary key and so the current record may lie just before or after * agbno. If it is past agbno, check the previous record too so long as * the length matches as it may be closer. Don't check a smaller record * because that could deactivate our cursor. */ if (bno > args->agbno) { error = xfs_btree_decrement(cur, 0, &i); if (!error && i) { error = xfs_alloc_get_rec(cur, &bno, &len, &i); if (!error && i && len == acur->cur_len) error = xfs_alloc_cur_check(args, acur, cur, &i); } if (error) return error; } /* * Increment the search key until we find at least one allocation * candidate or if the extent we found was larger. Otherwise, double the * search key to optimize the search. Efficiency is more important here * than absolute best locality. */ cur_len <<= 1; if (!acur->len || acur->cur_len >= cur_len) acur->cur_len++; else acur->cur_len = cur_len; return error; } /* * Deal with the case where only small freespaces remain. Either return the * contents of the last freespace record, or allocate space from the freelist if * there is nothing in the tree. */ STATIC int /* error */ xfs_alloc_ag_vextent_small( struct xfs_alloc_arg *args, /* allocation argument structure */ struct xfs_btree_cur *ccur, /* optional by-size cursor */ xfs_agblock_t *fbnop, /* result block number */ xfs_extlen_t *flenp, /* result length */ int *stat) /* status: 0-freelist, 1-normal/none */ { struct xfs_agf *agf = args->agbp->b_addr; int error = 0; xfs_agblock_t fbno = NULLAGBLOCK; xfs_extlen_t flen = 0; int i = 0; /* * If a cntbt cursor is provided, try to allocate the largest record in * the tree. Try the AGFL if the cntbt is empty, otherwise fail the * allocation. Make sure to respect minleft even when pulling from the * freelist. */ if (ccur) error = xfs_btree_decrement(ccur, 0, &i); if (error) goto error; if (i) { error = xfs_alloc_get_rec(ccur, &fbno, &flen, &i); if (error) goto error; if (XFS_IS_CORRUPT(args->mp, i != 1)) { xfs_btree_mark_sick(ccur); error = -EFSCORRUPTED; goto error; } goto out; } if (args->minlen != 1 || args->alignment != 1 || args->resv == XFS_AG_RESV_AGFL || be32_to_cpu(agf->agf_flcount) <= args->minleft) goto out; error = xfs_alloc_get_freelist(args->pag, args->tp, args->agbp, &fbno, 0); if (error) goto error; if (fbno == NULLAGBLOCK) goto out; xfs_extent_busy_reuse(pag_group(args->pag), fbno, 1, (args->datatype & XFS_ALLOC_NOBUSY)); if (args->datatype & XFS_ALLOC_USERDATA) { struct xfs_buf *bp; error = xfs_trans_get_buf(args->tp, args->mp->m_ddev_targp, xfs_agbno_to_daddr(args->pag, fbno), args->mp->m_bsize, 0, &bp); if (error) goto error; xfs_trans_binval(args->tp, bp); } *fbnop = args->agbno = fbno; *flenp = args->len = 1; if (XFS_IS_CORRUPT(args->mp, fbno >= be32_to_cpu(agf->agf_length))) { xfs_btree_mark_sick(ccur); error = -EFSCORRUPTED; goto error; } args->wasfromfl = 1; trace_xfs_alloc_small_freelist(args); /* * If we're feeding an AGFL block to something that doesn't live in the * free space, we need to clear out the OWN_AG rmap. */ error = xfs_rmap_free(args->tp, args->agbp, args->pag, fbno, 1, &XFS_RMAP_OINFO_AG); if (error) goto error; *stat = 0; return 0; out: /* * Can't do the allocation, give up. */ if (flen < args->minlen) { args->agbno = NULLAGBLOCK; trace_xfs_alloc_small_notenough(args); flen = 0; } *fbnop = fbno; *flenp = flen; *stat = 1; trace_xfs_alloc_small_done(args); return 0; error: trace_xfs_alloc_small_error(args); return error; } /* * Allocate a variable extent at exactly agno/bno. * Extent's length (returned in *len) will be between minlen and maxlen, * and of the form k * prod + mod unless there's nothing that large. * Return the starting a.g. block (bno), or NULLAGBLOCK if we can't do it. */ STATIC int /* error */ xfs_alloc_ag_vextent_exact( xfs_alloc_arg_t *args) /* allocation argument structure */ { struct xfs_btree_cur *bno_cur;/* by block-number btree cursor */ struct xfs_btree_cur *cnt_cur;/* by count btree cursor */ int error; xfs_agblock_t fbno; /* start block of found extent */ xfs_extlen_t flen; /* length of found extent */ xfs_agblock_t tbno; /* start block of busy extent */ xfs_extlen_t tlen; /* length of busy extent */ xfs_agblock_t tend; /* end block of busy extent */ int i; /* success/failure of operation */ unsigned busy_gen; ASSERT(args->alignment == 1); /* * Allocate/initialize a cursor for the by-number freespace btree. */ bno_cur = xfs_bnobt_init_cursor(args->mp, args->tp, args->agbp, args->pag); /* * Lookup bno and minlen in the btree (minlen is irrelevant, really). * Look for the closest free block <= bno, it must contain bno * if any free block does. */ error = xfs_alloc_lookup_le(bno_cur, args->agbno, args->minlen, &i); if (error) goto error0; if (!i) goto not_found; /* * Grab the freespace record. */ error = xfs_alloc_get_rec(bno_cur, &fbno, &flen, &i); if (error) goto error0; if (XFS_IS_CORRUPT(args->mp, i != 1)) { xfs_btree_mark_sick(bno_cur); error = -EFSCORRUPTED; goto error0; } ASSERT(fbno <= args->agbno); /* * Check for overlapping busy extents. */ tbno = fbno; tlen = flen; xfs_extent_busy_trim(pag_group(args->pag), args->minlen, args->maxlen, &tbno, &tlen, &busy_gen); /* * Give up if the start of the extent is busy, or the freespace isn't * long enough for the minimum request. */ if (tbno > args->agbno) goto not_found; if (tlen < args->minlen) goto not_found; tend = tbno + tlen; if (tend < args->agbno + args->minlen) goto not_found; /* * End of extent will be smaller of the freespace end and the * maximal requested end. * * Fix the length according to mod and prod if given. */ args->len = XFS_AGBLOCK_MIN(tend, args->agbno + args->maxlen) - args->agbno; xfs_alloc_fix_len(args); ASSERT(args->agbno + args->len <= tend); /* * We are allocating agbno for args->len * Allocate/initialize a cursor for the by-size btree. */ cnt_cur = xfs_cntbt_init_cursor(args->mp, args->tp, args->agbp, args->pag); ASSERT(xfs_verify_agbext(args->pag, args->agbno, args->len)); error = xfs_alloc_fixup_trees(cnt_cur, bno_cur, fbno, flen, args->agbno, args->len, XFSA_FIXUP_BNO_OK); if (error) { xfs_btree_del_cursor(cnt_cur, XFS_BTREE_ERROR); goto error0; } xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR); xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR); args->wasfromfl = 0; trace_xfs_alloc_exact_done(args); return 0; not_found: /* Didn't find it, return null. */ xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR); args->agbno = NULLAGBLOCK; trace_xfs_alloc_exact_notfound(args); return 0; error0: xfs_btree_del_cursor(bno_cur, XFS_BTREE_ERROR); trace_xfs_alloc_exact_error(args); return error; } /* * Search a given number of btree records in a given direction. Check each * record against the good extent we've already found. */ STATIC int xfs_alloc_walk_iter( struct xfs_alloc_arg *args, struct xfs_alloc_cur *acur, struct xfs_btree_cur *cur, bool increment, bool find_one, /* quit on first candidate */ int count, /* rec count (-1 for infinite) */ int *stat) { int error; int i; *stat = 0; /* * Search so long as the cursor is active or we find a better extent. * The cursor is deactivated if it extends beyond the range of the * current allocation candidate. */ while (xfs_alloc_cur_active(cur) && count) { error = xfs_alloc_cur_check(args, acur, cur, &i); if (error) return error; if (i == 1) { *stat = 1; if (find_one) break; } if (!xfs_alloc_cur_active(cur)) break; if (increment) error = xfs_btree_increment(cur, 0, &i); else error = xfs_btree_decrement(cur, 0, &i); if (error) return error; if (i == 0) cur->bc_flags &= ~XFS_BTREE_ALLOCBT_ACTIVE; if (count > 0) count--; } return 0; } /* * Search the by-bno and by-size btrees in parallel in search of an extent with * ideal locality based on the NEAR mode ->agbno locality hint. */ STATIC int xfs_alloc_ag_vextent_locality( struct xfs_alloc_arg *args, struct xfs_alloc_cur *acur, int *stat) { struct xfs_btree_cur *fbcur = NULL; int error; int i; bool fbinc; ASSERT(acur->len == 0); *stat = 0; error = xfs_alloc_lookup_ge(acur->cnt, args->agbno, acur->cur_len, &i); if (error) return error; error = xfs_alloc_lookup_le(acur->bnolt, args->agbno, 0, &i); if (error) return error; error = xfs_alloc_lookup_ge(acur->bnogt, args->agbno, 0, &i); if (error) return error; /* * Search the bnobt and cntbt in parallel. Search the bnobt left and * right and lookup the closest extent to the locality hint for each * extent size key in the cntbt. The entire search terminates * immediately on a bnobt hit because that means we've found best case * locality. Otherwise the search continues until the cntbt cursor runs * off the end of the tree. If no allocation candidate is found at this * point, give up on locality, walk backwards from the end of the cntbt * and take the first available extent. * * The parallel tree searches balance each other out to provide fairly * consistent performance for various situations. The bnobt search can * have pathological behavior in the worst case scenario of larger * allocation requests and fragmented free space. On the other hand, the * bnobt is able to satisfy most smaller allocation requests much more * quickly than the cntbt. The cntbt search can sift through fragmented * free space and sets of free extents for larger allocation requests * more quickly than the bnobt. Since the locality hint is just a hint * and we don't want to scan the entire bnobt for perfect locality, the * cntbt search essentially bounds the bnobt search such that we can * find good enough locality at reasonable performance in most cases. */ while (xfs_alloc_cur_active(acur->bnolt) || xfs_alloc_cur_active(acur->bnogt) || xfs_alloc_cur_active(acur->cnt)) { trace_xfs_alloc_cur_lookup(args); /* * Search the bnobt left and right. In the case of a hit, finish * the search in the opposite direction and we're done. */ error = xfs_alloc_walk_iter(args, acur, acur->bnolt, false, true, 1, &i); if (error) return error; if (i == 1) { trace_xfs_alloc_cur_left(args); fbcur = acur->bnogt; fbinc = true; break; } error = xfs_alloc_walk_iter(args, acur, acur->bnogt, true, true, 1, &i); if (error) return error; if (i == 1) { trace_xfs_alloc_cur_right(args); fbcur = acur->bnolt; fbinc = false; break; } /* * Check the extent with best locality based on the current * extent size search key and keep track of the best candidate. */ error = xfs_alloc_cntbt_iter(args, acur); if (error) return error; if (!xfs_alloc_cur_active(acur->cnt)) { trace_xfs_alloc_cur_lookup_done(args); break; } } /* * If we failed to find anything due to busy extents, return empty * handed so the caller can flush and retry. If no busy extents were * found, walk backwards from the end of the cntbt as a last resort. */ if (!xfs_alloc_cur_active(acur->cnt) && !acur->len && !acur->busy) { error = xfs_btree_decrement(acur->cnt, 0, &i); if (error) return error; if (i) { acur->cnt->bc_flags |= XFS_BTREE_ALLOCBT_ACTIVE; fbcur = acur->cnt; fbinc = false; } } /* * Search in the opposite direction for a better entry in the case of * a bnobt hit or walk backwards from the end of the cntbt. */ if (fbcur) { error = xfs_alloc_walk_iter(args, acur, fbcur, fbinc, true, -1, &i); if (error) return error; } if (acur->len) *stat = 1; return 0; } /* Check the last block of the cnt btree for allocations. */ static int xfs_alloc_ag_vextent_lastblock( struct xfs_alloc_arg *args, struct xfs_alloc_cur *acur, xfs_agblock_t *bno, xfs_extlen_t *len, bool *allocated) { int error; int i; #ifdef DEBUG /* Randomly don't execute the first algorithm. */ if (get_random_u32_below(2)) return 0; #endif /* * Start from the entry that lookup found, sequence through all larger * free blocks. If we're actually pointing at a record smaller than * maxlen, go to the start of this block, and skip all those smaller * than minlen. */ if (*len || args->alignment > 1) { acur->cnt->bc_levels[0].ptr = 1; do { error = xfs_alloc_get_rec(acur->cnt, bno, len, &i); if (error) return error; if (XFS_IS_CORRUPT(args->mp, i != 1)) { xfs_btree_mark_sick(acur->cnt); return -EFSCORRUPTED; } if (*len >= args->minlen) break; error = xfs_btree_increment(acur->cnt, 0, &i); if (error) return error; } while (i); ASSERT(*len >= args->minlen); if (!i) return 0; } error = xfs_alloc_walk_iter(args, acur, acur->cnt, true, false, -1, &i); if (error) return error; /* * It didn't work. We COULD be in a case where there's a good record * somewhere, so try again. */ if (acur->len == 0) return 0; trace_xfs_alloc_near_first(args); *allocated = true; return 0; } /* * Allocate a variable extent near bno in the allocation group agno. * Extent's length (returned in len) will be between minlen and maxlen, * and of the form k * prod + mod unless there's nothing that large. * Return the starting a.g. block, or NULLAGBLOCK if we can't do it. */ STATIC int xfs_alloc_ag_vextent_near( struct xfs_alloc_arg *args, uint32_t alloc_flags) { struct xfs_alloc_cur acur = {}; int error; /* error code */ int i; /* result code, temporary */ xfs_agblock_t bno; xfs_extlen_t len; /* handle uninitialized agbno range so caller doesn't have to */ if (!args->min_agbno && !args->max_agbno) args->max_agbno = args->mp->m_sb.sb_agblocks - 1; ASSERT(args->min_agbno <= args->max_agbno); /* clamp agbno to the range if it's outside */ if (args->agbno < args->min_agbno) args->agbno = args->min_agbno; if (args->agbno > args->max_agbno) args->agbno = args->max_agbno; /* Retry once quickly if we find busy extents before blocking. */ alloc_flags |= XFS_ALLOC_FLAG_TRYFLUSH; restart: len = 0; /* * Set up cursors and see if there are any free extents as big as * maxlen. If not, pick the last entry in the tree unless the tree is * empty. */ error = xfs_alloc_cur_setup(args, &acur); if (error == -ENOSPC) { error = xfs_alloc_ag_vextent_small(args, acur.cnt, &bno, &len, &i); if (error) goto out; if (i == 0 || len == 0) { trace_xfs_alloc_near_noentry(args); goto out; } ASSERT(i == 1); } else if (error) { goto out; } /* * First algorithm. * If the requested extent is large wrt the freespaces available * in this a.g., then the cursor will be pointing to a btree entry * near the right edge of the tree. If it's in the last btree leaf * block, then we just examine all the entries in that block * that are big enough, and pick the best one. */ if (xfs_btree_islastblock(acur.cnt, 0)) { bool allocated = false; error = xfs_alloc_ag_vextent_lastblock(args, &acur, &bno, &len, &allocated); if (error) goto out; if (allocated) goto alloc_finish; } /* * Second algorithm. Combined cntbt and bnobt search to find ideal * locality. */ error = xfs_alloc_ag_vextent_locality(args, &acur, &i); if (error) goto out; /* * If we couldn't get anything, give up. */ if (!acur.len) { if (acur.busy) { /* * Our only valid extents must have been busy. Flush and * retry the allocation again. If we get an -EAGAIN * error, we're being told that a deadlock was avoided * and the current transaction needs committing before * the allocation can be retried. */ trace_xfs_alloc_near_busy(args); error = xfs_extent_busy_flush(args->tp, pag_group(args->pag), acur.busy_gen, alloc_flags); if (error) goto out; alloc_flags &= ~XFS_ALLOC_FLAG_TRYFLUSH; goto restart; } trace_xfs_alloc_size_neither(args); args->agbno = NULLAGBLOCK; goto out; } alloc_finish: /* fix up btrees on a successful allocation */ error = xfs_alloc_cur_finish(args, &acur); out: xfs_alloc_cur_close(&acur, error); return error; } /* * Allocate a variable extent anywhere in the allocation group agno. * Extent's length (returned in len) will be between minlen and maxlen, * and of the form k * prod + mod unless there's nothing that large. * Return the starting a.g. block, or NULLAGBLOCK if we can't do it. */ static int xfs_alloc_ag_vextent_size( struct xfs_alloc_arg *args, uint32_t alloc_flags) { struct xfs_agf *agf = args->agbp->b_addr; struct xfs_btree_cur *bno_cur; struct xfs_btree_cur *cnt_cur; xfs_agblock_t fbno; /* start of found freespace */ xfs_extlen_t flen; /* length of found freespace */ xfs_agblock_t rbno; /* returned block number */ xfs_extlen_t rlen; /* length of returned extent */ bool busy; unsigned busy_gen; int error; int i; /* Retry once quickly if we find busy extents before blocking. */ alloc_flags |= XFS_ALLOC_FLAG_TRYFLUSH; restart: /* * Allocate and initialize a cursor for the by-size btree. */ cnt_cur = xfs_cntbt_init_cursor(args->mp, args->tp, args->agbp, args->pag); bno_cur = NULL; /* * Look for an entry >= maxlen+alignment-1 blocks. */ if ((error = xfs_alloc_lookup_ge(cnt_cur, 0, args->maxlen + args->alignment - 1, &i))) goto error0; /* * If none then we have to settle for a smaller extent. In the case that * there are no large extents, this will return the last entry in the * tree unless the tree is empty. In the case that there are only busy * large extents, this will return the largest small extent unless there * are no smaller extents available. */ if (!i) { error = xfs_alloc_ag_vextent_small(args, cnt_cur, &fbno, &flen, &i); if (error) goto error0; if (i == 0 || flen == 0) { xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR); trace_xfs_alloc_size_noentry(args); return 0; } ASSERT(i == 1); busy = xfs_alloc_compute_aligned(args, fbno, flen, &rbno, &rlen, &busy_gen); } else { /* * Search for a non-busy extent that is large enough. */ for (;;) { error = xfs_alloc_get_rec(cnt_cur, &fbno, &flen, &i); if (error) goto error0; if (XFS_IS_CORRUPT(args->mp, i != 1)) { xfs_btree_mark_sick(cnt_cur); error = -EFSCORRUPTED; goto error0; } busy = xfs_alloc_compute_aligned(args, fbno, flen, &rbno, &rlen, &busy_gen); if (rlen >= args->maxlen) break; error = xfs_btree_increment(cnt_cur, 0, &i); if (error) goto error0; if (i) continue; /* * Our only valid extents must have been busy. Flush and * retry the allocation again. If we get an -EAGAIN * error, we're being told that a deadlock was avoided * and the current transaction needs committing before * the allocation can be retried. */ trace_xfs_alloc_size_busy(args); error = xfs_extent_busy_flush(args->tp, pag_group(args->pag), busy_gen, alloc_flags); if (error) goto error0; alloc_flags &= ~XFS_ALLOC_FLAG_TRYFLUSH; xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR); goto restart; } } /* * In the first case above, we got the last entry in the * by-size btree. Now we check to see if the space hits maxlen * once aligned; if not, we search left for something better. * This can't happen in the second case above. */ rlen = XFS_EXTLEN_MIN(args->maxlen, rlen); if (XFS_IS_CORRUPT(args->mp, rlen != 0 && (rlen > flen || rbno + rlen > fbno + flen))) { xfs_btree_mark_sick(cnt_cur); error = -EFSCORRUPTED; goto error0; } if (rlen < args->maxlen) { xfs_agblock_t bestfbno; xfs_extlen_t bestflen; xfs_agblock_t bestrbno; xfs_extlen_t bestrlen; bestrlen = rlen; bestrbno = rbno; bestflen = flen; bestfbno = fbno; for (;;) { if ((error = xfs_btree_decrement(cnt_cur, 0, &i))) goto error0; if (i == 0) break; if ((error = xfs_alloc_get_rec(cnt_cur, &fbno, &flen, &i))) goto error0; if (XFS_IS_CORRUPT(args->mp, i != 1)) { xfs_btree_mark_sick(cnt_cur); error = -EFSCORRUPTED; goto error0; } if (flen <= bestrlen) break; busy = xfs_alloc_compute_aligned(args, fbno, flen, &rbno, &rlen, &busy_gen); rlen = XFS_EXTLEN_MIN(args->maxlen, rlen); if (XFS_IS_CORRUPT(args->mp, rlen != 0 && (rlen > flen || rbno + rlen > fbno + flen))) { xfs_btree_mark_sick(cnt_cur); error = -EFSCORRUPTED; goto error0; } if (rlen > bestrlen) { bestrlen = rlen; bestrbno = rbno; bestflen = flen; bestfbno = fbno; if (rlen == args->maxlen) break; } } if ((error = xfs_alloc_lookup_eq(cnt_cur, bestfbno, bestflen, &i))) goto error0; if (XFS_IS_CORRUPT(args->mp, i != 1)) { xfs_btree_mark_sick(cnt_cur); error = -EFSCORRUPTED; goto error0; } rlen = bestrlen; rbno = bestrbno; flen = bestflen; fbno = bestfbno; } args->wasfromfl = 0; /* * Fix up the length. */ args->len = rlen; if (rlen < args->minlen) { if (busy) { /* * Our only valid extents must have been busy. Flush and * retry the allocation again. If we get an -EAGAIN * error, we're being told that a deadlock was avoided * and the current transaction needs committing before * the allocation can be retried. */ trace_xfs_alloc_size_busy(args); error = xfs_extent_busy_flush(args->tp, pag_group(args->pag), busy_gen, alloc_flags); if (error) goto error0; alloc_flags &= ~XFS_ALLOC_FLAG_TRYFLUSH; xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR); goto restart; } goto out_nominleft; } xfs_alloc_fix_len(args); rlen = args->len; if (XFS_IS_CORRUPT(args->mp, rlen > flen)) { xfs_btree_mark_sick(cnt_cur); error = -EFSCORRUPTED; goto error0; } /* * Allocate and initialize a cursor for the by-block tree. */ bno_cur = xfs_bnobt_init_cursor(args->mp, args->tp, args->agbp, args->pag); if ((error = xfs_alloc_fixup_trees(cnt_cur, bno_cur, fbno, flen, rbno, rlen, XFSA_FIXUP_CNT_OK))) goto error0; xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR); xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR); cnt_cur = bno_cur = NULL; args->len = rlen; args->agbno = rbno; if (XFS_IS_CORRUPT(args->mp, args->agbno + args->len > be32_to_cpu(agf->agf_length))) { xfs_ag_mark_sick(args->pag, XFS_SICK_AG_BNOBT); error = -EFSCORRUPTED; goto error0; } trace_xfs_alloc_size_done(args); return 0; error0: trace_xfs_alloc_size_error(args); if (cnt_cur) xfs_btree_del_cursor(cnt_cur, XFS_BTREE_ERROR); if (bno_cur) xfs_btree_del_cursor(bno_cur, XFS_BTREE_ERROR); return error; out_nominleft: xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR); trace_xfs_alloc_size_nominleft(args); args->agbno = NULLAGBLOCK; return 0; } /* * Free the extent starting at agno/bno for length. */ int xfs_free_ag_extent( struct xfs_trans *tp, struct xfs_buf *agbp, xfs_agblock_t bno, xfs_extlen_t len, const struct xfs_owner_info *oinfo, enum xfs_ag_resv_type type) { struct xfs_mount *mp; struct xfs_btree_cur *bno_cur; struct xfs_btree_cur *cnt_cur; xfs_agblock_t gtbno; /* start of right neighbor */ xfs_extlen_t gtlen; /* length of right neighbor */ xfs_agblock_t ltbno; /* start of left neighbor */ xfs_extlen_t ltlen; /* length of left neighbor */ xfs_agblock_t nbno; /* new starting block of freesp */ xfs_extlen_t nlen; /* new length of freespace */ int haveleft; /* have a left neighbor */ int haveright; /* have a right neighbor */ int i; int error; struct xfs_perag *pag = agbp->b_pag; bool fixup_longest = false; bno_cur = cnt_cur = NULL; mp = tp->t_mountp; if (!xfs_rmap_should_skip_owner_update(oinfo)) { error = xfs_rmap_free(tp, agbp, pag, bno, len, oinfo); if (error) goto error0; } /* * Allocate and initialize a cursor for the by-block btree. */ bno_cur = xfs_bnobt_init_cursor(mp, tp, agbp, pag); /* * Look for a neighboring block on the left (lower block numbers) * that is contiguous with this space. */ if ((error = xfs_alloc_lookup_le(bno_cur, bno, len, &haveleft))) goto error0; if (haveleft) { /* * There is a block to our left. */ if ((error = xfs_alloc_get_rec(bno_cur, &ltbno, &ltlen, &i))) goto error0; if (XFS_IS_CORRUPT(mp, i != 1)) { xfs_btree_mark_sick(bno_cur); error = -EFSCORRUPTED; goto error0; } /* * It's not contiguous, though. */ if (ltbno + ltlen < bno) haveleft = 0; else { /* * If this failure happens the request to free this * space was invalid, it's (partly) already free. * Very bad. */ if (XFS_IS_CORRUPT(mp, ltbno + ltlen > bno)) { xfs_btree_mark_sick(bno_cur); error = -EFSCORRUPTED; goto error0; } } } /* * Look for a neighboring block on the right (higher block numbers) * that is contiguous with this space. */ if ((error = xfs_btree_increment(bno_cur, 0, &haveright))) goto error0; if (haveright) { /* * There is a block to our right. */ if ((error = xfs_alloc_get_rec(bno_cur, &gtbno, &gtlen, &i))) goto error0; if (XFS_IS_CORRUPT(mp, i != 1)) { xfs_btree_mark_sick(bno_cur); error = -EFSCORRUPTED; goto error0; } /* * It's not contiguous, though. */ if (bno + len < gtbno) haveright = 0; else { /* * If this failure happens the request to free this * space was invalid, it's (partly) already free. * Very bad. */ if (XFS_IS_CORRUPT(mp, bno + len > gtbno)) { xfs_btree_mark_sick(bno_cur); error = -EFSCORRUPTED; goto error0; } } } /* * Now allocate and initialize a cursor for the by-size tree. */ cnt_cur = xfs_cntbt_init_cursor(mp, tp, agbp, pag); /* * Have both left and right contiguous neighbors. * Merge all three into a single free block. */ if (haveleft && haveright) { /* * Delete the old by-size entry on the left. */ if ((error = xfs_alloc_lookup_eq(cnt_cur, ltbno, ltlen, &i))) goto error0; if (XFS_IS_CORRUPT(mp, i != 1)) { xfs_btree_mark_sick(cnt_cur); error = -EFSCORRUPTED; goto error0; } if ((error = xfs_btree_delete(cnt_cur, &i))) goto error0; if (XFS_IS_CORRUPT(mp, i != 1)) { xfs_btree_mark_sick(cnt_cur); error = -EFSCORRUPTED; goto error0; } /* * Delete the old by-size entry on the right. */ if ((error = xfs_alloc_lookup_eq(cnt_cur, gtbno, gtlen, &i))) goto error0; if (XFS_IS_CORRUPT(mp, i != 1)) { xfs_btree_mark_sick(cnt_cur); error = -EFSCORRUPTED; goto error0; } if ((error = xfs_btree_delete(cnt_cur, &i))) goto error0; if (XFS_IS_CORRUPT(mp, i != 1)) { xfs_btree_mark_sick(cnt_cur); error = -EFSCORRUPTED; goto error0; } /* * Delete the old by-block entry for the right block. */ if ((error = xfs_btree_delete(bno_cur, &i))) goto error0; if (XFS_IS_CORRUPT(mp, i != 1)) { xfs_btree_mark_sick(bno_cur); error = -EFSCORRUPTED; goto error0; } /* * Move the by-block cursor back to the left neighbor. */ if ((error = xfs_btree_decrement(bno_cur, 0, &i))) goto error0; if (XFS_IS_CORRUPT(mp, i != 1)) { xfs_btree_mark_sick(bno_cur); error = -EFSCORRUPTED; goto error0; } #ifdef DEBUG /* * Check that this is the right record: delete didn't * mangle the cursor. */ { xfs_agblock_t xxbno; xfs_extlen_t xxlen; if ((error = xfs_alloc_get_rec(bno_cur, &xxbno, &xxlen, &i))) goto error0; if (XFS_IS_CORRUPT(mp, i != 1 || xxbno != ltbno || xxlen != ltlen)) { xfs_btree_mark_sick(bno_cur); error = -EFSCORRUPTED; goto error0; } } #endif /* * Update remaining by-block entry to the new, joined block. */ nbno = ltbno; nlen = len + ltlen + gtlen; if ((error = xfs_alloc_update(bno_cur, nbno, nlen))) goto error0; } /* * Have only a left contiguous neighbor. * Merge it together with the new freespace. */ else if (haveleft) { /* * Delete the old by-size entry on the left. */ if ((error = xfs_alloc_lookup_eq(cnt_cur, ltbno, ltlen, &i))) goto error0; if (XFS_IS_CORRUPT(mp, i != 1)) { xfs_btree_mark_sick(cnt_cur); error = -EFSCORRUPTED; goto error0; } if ((error = xfs_btree_delete(cnt_cur, &i))) goto error0; if (XFS_IS_CORRUPT(mp, i != 1)) { xfs_btree_mark_sick(cnt_cur); error = -EFSCORRUPTED; goto error0; } /* * Back up the by-block cursor to the left neighbor, and * update its length. */ if ((error = xfs_btree_decrement(bno_cur, 0, &i))) goto error0; if (XFS_IS_CORRUPT(mp, i != 1)) { xfs_btree_mark_sick(bno_cur); error = -EFSCORRUPTED; goto error0; } nbno = ltbno; nlen = len + ltlen; if ((error = xfs_alloc_update(bno_cur, nbno, nlen))) goto error0; } /* * Have only a right contiguous neighbor. * Merge it together with the new freespace. */ else if (haveright) { /* * Delete the old by-size entry on the right. */ if ((error = xfs_alloc_lookup_eq(cnt_cur, gtbno, gtlen, &i))) goto error0; if (XFS_IS_CORRUPT(mp, i != 1)) { xfs_btree_mark_sick(cnt_cur); error = -EFSCORRUPTED; goto error0; } if ((error = xfs_btree_delete(cnt_cur, &i))) goto error0; if (XFS_IS_CORRUPT(mp, i != 1)) { xfs_btree_mark_sick(cnt_cur); error = -EFSCORRUPTED; goto error0; } /* * Update the starting block and length of the right * neighbor in the by-block tree. */ nbno = bno; nlen = len + gtlen; if ((error = xfs_alloc_update(bno_cur, nbno, nlen))) goto error0; } /* * No contiguous neighbors. * Insert the new freespace into the by-block tree. */ else { nbno = bno; nlen = len; if ((error = xfs_btree_insert(bno_cur, &i))) goto error0; if (XFS_IS_CORRUPT(mp, i != 1)) { xfs_btree_mark_sick(bno_cur); error = -EFSCORRUPTED; goto error0; } } xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR); bno_cur = NULL; /* * In all cases we need to insert the new freespace in the by-size tree. * * If this new freespace is being inserted in the block that contains * the largest free space in the btree, make sure we also fix up the * agf->agf-longest tracker field. */ if ((error = xfs_alloc_lookup_eq(cnt_cur, nbno, nlen, &i))) goto error0; if (XFS_IS_CORRUPT(mp, i != 0)) { xfs_btree_mark_sick(cnt_cur); error = -EFSCORRUPTED; goto error0; } if (xfs_alloc_cursor_at_lastrec(cnt_cur)) fixup_longest = true; if ((error = xfs_btree_insert(cnt_cur, &i))) goto error0; if (XFS_IS_CORRUPT(mp, i != 1)) { xfs_btree_mark_sick(cnt_cur); error = -EFSCORRUPTED; goto error0; } if (fixup_longest) { error = xfs_alloc_fixup_longest(cnt_cur); if (error) goto error0; } xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR); cnt_cur = NULL; /* * Update the freespace totals in the ag and superblock. */ error = xfs_alloc_update_counters(tp, agbp, len); xfs_ag_resv_free_extent(pag, type, tp, len); if (error) goto error0; XFS_STATS_INC(mp, xs_freex); XFS_STATS_ADD(mp, xs_freeb, len); trace_xfs_free_extent(pag, bno, len, type, haveleft, haveright); return 0; error0: trace_xfs_free_extent(pag, bno, len, type, -1, -1); if (bno_cur) xfs_btree_del_cursor(bno_cur, XFS_BTREE_ERROR); if (cnt_cur) xfs_btree_del_cursor(cnt_cur, XFS_BTREE_ERROR); return error; } /* * Visible (exported) allocation/free functions. * Some of these are used just by xfs_alloc_btree.c and this file. */ /* * Compute and fill in value of m_alloc_maxlevels. */ void xfs_alloc_compute_maxlevels( xfs_mount_t *mp) /* file system mount structure */ { mp->m_alloc_maxlevels = xfs_btree_compute_maxlevels(mp->m_alloc_mnr, (mp->m_sb.sb_agblocks + 1) / 2); ASSERT(mp->m_alloc_maxlevels <= xfs_allocbt_maxlevels_ondisk()); } /* * Find the length of the longest extent in an AG. The 'need' parameter * specifies how much space we're going to need for the AGFL and the * 'reserved' parameter tells us how many blocks in this AG are reserved for * other callers. */ xfs_extlen_t xfs_alloc_longest_free_extent( struct xfs_perag *pag, xfs_extlen_t need, xfs_extlen_t reserved) { xfs_extlen_t delta = 0; /* * If the AGFL needs a recharge, we'll have to subtract that from the * longest extent. */ if (need > pag->pagf_flcount) delta = need - pag->pagf_flcount; /* * If we cannot maintain others' reservations with space from the * not-longest freesp extents, we'll have to subtract /that/ from * the longest extent too. */ if (pag->pagf_freeblks - pag->pagf_longest < reserved) delta += reserved - (pag->pagf_freeblks - pag->pagf_longest); /* * If the longest extent is long enough to satisfy all the * reservations and AGFL rules in place, we can return this extent. */ if (pag->pagf_longest > delta) return min_t(xfs_extlen_t, pag_mount(pag)->m_ag_max_usable, pag->pagf_longest - delta); /* Otherwise, let the caller try for 1 block if there's space. */ return pag->pagf_flcount > 0 || pag->pagf_longest > 0; } /* * Compute the minimum length of the AGFL in the given AG. If @pag is NULL, * return the largest possible minimum length. */ unsigned int xfs_alloc_min_freelist( struct xfs_mount *mp, struct xfs_perag *pag) { /* AG btrees have at least 1 level. */ const unsigned int bno_level = pag ? pag->pagf_bno_level : 1; const unsigned int cnt_level = pag ? pag->pagf_cnt_level : 1; const unsigned int rmap_level = pag ? pag->pagf_rmap_level : 1; unsigned int min_free; ASSERT(mp->m_alloc_maxlevels > 0); /* * For a btree shorter than the maximum height, the worst case is that * every level gets split and a new level is added, then while inserting * another entry to refill the AGFL, every level under the old root gets * split again. This is: * * (full height split reservation) + (AGFL refill split height) * = (current height + 1) + (current height - 1) * = (new height) + (new height - 2) * = 2 * new height - 2 * * For a btree of maximum height, the worst case is that every level * under the root gets split, then while inserting another entry to * refill the AGFL, every level under the root gets split again. This is * also: * * 2 * (current height - 1) * = 2 * (new height - 1) * = 2 * new height - 2 */ /* space needed by-bno freespace btree */ min_free = min(bno_level + 1, mp->m_alloc_maxlevels) * 2 - 2; /* space needed by-size freespace btree */ min_free += min(cnt_level + 1, mp->m_alloc_maxlevels) * 2 - 2; /* space needed reverse mapping used space btree */ if (xfs_has_rmapbt(mp)) min_free += min(rmap_level + 1, mp->m_rmap_maxlevels) * 2 - 2; return min_free; } /* * Check if the operation we are fixing up the freelist for should go ahead or * not. If we are freeing blocks, we always allow it, otherwise the allocation * is dependent on whether the size and shape of free space available will * permit the requested allocation to take place. */ static bool xfs_alloc_space_available( struct xfs_alloc_arg *args, xfs_extlen_t min_free, int flags) { struct xfs_perag *pag = args->pag; xfs_extlen_t alloc_len, longest; xfs_extlen_t reservation; /* blocks that are still reserved */ int available; xfs_extlen_t agflcount; if (flags & XFS_ALLOC_FLAG_FREEING) return true; reservation = xfs_ag_resv_needed(pag, args->resv); /* do we have enough contiguous free space for the allocation? */ alloc_len = args->minlen + (args->alignment - 1) + args->minalignslop; longest = xfs_alloc_longest_free_extent(pag, min_free, reservation); if (longest < alloc_len) return false; /* * Do we have enough free space remaining for the allocation? Don't * account extra agfl blocks because we are about to defer free them, * making them unavailable until the current transaction commits. */ agflcount = min_t(xfs_extlen_t, pag->pagf_flcount, min_free); available = (int)(pag->pagf_freeblks + agflcount - reservation - min_free - args->minleft); if (available < (int)max(args->total, alloc_len)) return false; /* * Clamp maxlen to the amount of free space available for the actual * extent allocation. */ if (available < (int)args->maxlen && !(flags & XFS_ALLOC_FLAG_CHECK)) { args->maxlen = available; ASSERT(args->maxlen > 0); ASSERT(args->maxlen >= args->minlen); } return true; } /* * Check the agfl fields of the agf for inconsistency or corruption. * * The original purpose was to detect an agfl header padding mismatch between * current and early v5 kernels. This problem manifests as a 1-slot size * difference between the on-disk flcount and the active [first, last] range of * a wrapped agfl. * * However, we need to use these same checks to catch agfl count corruptions * unrelated to padding. This could occur on any v4 or v5 filesystem, so either * way, we need to reset the agfl and warn the user. * * Return true if a reset is required before the agfl can be used, false * otherwise. */ static bool xfs_agfl_needs_reset( struct xfs_mount *mp, struct xfs_agf *agf) { uint32_t f = be32_to_cpu(agf->agf_flfirst); uint32_t l = be32_to_cpu(agf->agf_fllast); uint32_t c = be32_to_cpu(agf->agf_flcount); int agfl_size = xfs_agfl_size(mp); int active; /* * The agf read verifier catches severe corruption of these fields. * Repeat some sanity checks to cover a packed -> unpacked mismatch if * the verifier allows it. */ if (f >= agfl_size || l >= agfl_size) return true; if (c > agfl_size) return true; /* * Check consistency between the on-disk count and the active range. An * agfl padding mismatch manifests as an inconsistent flcount. */ if (c && l >= f) active = l - f + 1; else if (c) active = agfl_size - f + l + 1; else active = 0; return active != c; } /* * Reset the agfl to an empty state. Ignore/drop any existing blocks since the * agfl content cannot be trusted. Warn the user that a repair is required to * recover leaked blocks. * * The purpose of this mechanism is to handle filesystems affected by the agfl * header padding mismatch problem. A reset keeps the filesystem online with a * relatively minor free space accounting inconsistency rather than suffer the * inevitable crash from use of an invalid agfl block. */ static void xfs_agfl_reset( struct xfs_trans *tp, struct xfs_buf *agbp, struct xfs_perag *pag) { struct xfs_mount *mp = tp->t_mountp; struct xfs_agf *agf = agbp->b_addr; ASSERT(xfs_perag_agfl_needs_reset(pag)); trace_xfs_agfl_reset(mp, agf, 0, _RET_IP_); xfs_warn(mp, "WARNING: Reset corrupted AGFL on AG %u. %d blocks leaked. " "Please unmount and run xfs_repair.", pag_agno(pag), pag->pagf_flcount); agf->agf_flfirst = 0; agf->agf_fllast = cpu_to_be32(xfs_agfl_size(mp) - 1); agf->agf_flcount = 0; xfs_alloc_log_agf(tp, agbp, XFS_AGF_FLFIRST | XFS_AGF_FLLAST | XFS_AGF_FLCOUNT); pag->pagf_flcount = 0; clear_bit(XFS_AGSTATE_AGFL_NEEDS_RESET, &pag->pag_opstate); } /* * Add the extent to the list of extents to be free at transaction end. * The list is maintained sorted (by block number). */ static int xfs_defer_extent_free( struct xfs_trans *tp, xfs_fsblock_t bno, xfs_filblks_t len, const struct xfs_owner_info *oinfo, enum xfs_ag_resv_type type, unsigned int free_flags, struct xfs_defer_pending **dfpp) { struct xfs_extent_free_item *xefi; struct xfs_mount *mp = tp->t_mountp; ASSERT(len <= XFS_MAX_BMBT_EXTLEN); ASSERT(!isnullstartblock(bno)); ASSERT(!(free_flags & ~XFS_FREE_EXTENT_ALL_FLAGS)); if (free_flags & XFS_FREE_EXTENT_REALTIME) { if (type != XFS_AG_RESV_NONE) { ASSERT(type == XFS_AG_RESV_NONE); return -EFSCORRUPTED; } if (XFS_IS_CORRUPT(mp, !xfs_verify_rtbext(mp, bno, len))) return -EFSCORRUPTED; } else { if (XFS_IS_CORRUPT(mp, !xfs_verify_fsbext(mp, bno, len))) return -EFSCORRUPTED; } xefi = kmem_cache_zalloc(xfs_extfree_item_cache, GFP_KERNEL | __GFP_NOFAIL); xefi->xefi_startblock = bno; xefi->xefi_blockcount = (xfs_extlen_t)len; xefi->xefi_agresv = type; if (free_flags & XFS_FREE_EXTENT_SKIP_DISCARD) xefi->xefi_flags |= XFS_EFI_SKIP_DISCARD; if (free_flags & XFS_FREE_EXTENT_REALTIME) xefi->xefi_flags |= XFS_EFI_REALTIME; if (oinfo) { ASSERT(oinfo->oi_offset == 0); if (oinfo->oi_flags & XFS_OWNER_INFO_ATTR_FORK) xefi->xefi_flags |= XFS_EFI_ATTR_FORK; if (oinfo->oi_flags & XFS_OWNER_INFO_BMBT_BLOCK) xefi->xefi_flags |= XFS_EFI_BMBT_BLOCK; xefi->xefi_owner = oinfo->oi_owner; } else { xefi->xefi_owner = XFS_RMAP_OWN_NULL; } xfs_extent_free_defer_add(tp, xefi, dfpp); return 0; } int xfs_free_extent_later( struct xfs_trans *tp, xfs_fsblock_t bno, xfs_filblks_t len, const struct xfs_owner_info *oinfo, enum xfs_ag_resv_type type, unsigned int free_flags) { struct xfs_defer_pending *dontcare = NULL; return xfs_defer_extent_free(tp, bno, len, oinfo, type, free_flags, &dontcare); } /* * Set up automatic freeing of unwritten space in the filesystem. * * This function attached a paused deferred extent free item to the * transaction. Pausing means that the EFI will be logged in the next * transaction commit, but the pending EFI will not be finished until the * pending item is unpaused. * * If the system goes down after the EFI has been persisted to the log but * before the pending item is unpaused, log recovery will find the EFI, fail to * find the EFD, and free the space. * * If the pending item is unpaused, the next transaction commit will log an EFD * without freeing the space. * * Caller must ensure that the tp, fsbno, len, oinfo, and resv flags of the * @args structure are set to the relevant values. */ int xfs_alloc_schedule_autoreap( const struct xfs_alloc_arg *args, unsigned int free_flags, struct xfs_alloc_autoreap *aarp) { int error; error = xfs_defer_extent_free(args->tp, args->fsbno, args->len, &args->oinfo, args->resv, free_flags, &aarp->dfp); if (error) return error; xfs_defer_item_pause(args->tp, aarp->dfp); return 0; } /* * Cancel automatic freeing of unwritten space in the filesystem. * * Earlier, we created a paused deferred extent free item and attached it to * this transaction so that we could automatically roll back a new space * allocation if the system went down. Now we want to cancel the paused work * item by marking the EFI stale so we don't actually free the space, unpausing * the pending item and logging an EFD. * * The caller generally should have already mapped the space into the ondisk * filesystem. If the reserved space was partially used, the caller must call * xfs_free_extent_later to create a new EFI to free the unused space. */ void xfs_alloc_cancel_autoreap( struct xfs_trans *tp, struct xfs_alloc_autoreap *aarp) { struct xfs_defer_pending *dfp = aarp->dfp; struct xfs_extent_free_item *xefi; if (!dfp) return; list_for_each_entry(xefi, &dfp->dfp_work, xefi_list) xefi->xefi_flags |= XFS_EFI_CANCELLED; xfs_defer_item_unpause(tp, dfp); } /* * Commit automatic freeing of unwritten space in the filesystem. * * This unpauses an earlier _schedule_autoreap and commits to freeing the * allocated space. Call this if none of the reserved space was used. */ void xfs_alloc_commit_autoreap( struct xfs_trans *tp, struct xfs_alloc_autoreap *aarp) { if (aarp->dfp) xfs_defer_item_unpause(tp, aarp->dfp); } /* * Check if an AGF has a free extent record whose length is equal to * args->minlen. */ STATIC int xfs_exact_minlen_extent_available( struct xfs_alloc_arg *args, struct xfs_buf *agbp, int *stat) { struct xfs_btree_cur *cnt_cur; xfs_agblock_t fbno; xfs_extlen_t flen; int error = 0; cnt_cur = xfs_cntbt_init_cursor(args->mp, args->tp, agbp, args->pag); error = xfs_alloc_lookup_ge(cnt_cur, 0, args->minlen, stat); if (error) goto out; if (*stat == 0) { xfs_btree_mark_sick(cnt_cur); error = -EFSCORRUPTED; goto out; } error = xfs_alloc_get_rec(cnt_cur, &fbno, &flen, stat); if (error) goto out; if (*stat == 1 && flen != args->minlen) *stat = 0; out: xfs_btree_del_cursor(cnt_cur, error); return error; } /* * Decide whether to use this allocation group for this allocation. * If so, fix up the btree freelist's size. */ int /* error */ xfs_alloc_fix_freelist( struct xfs_alloc_arg *args, /* allocation argument structure */ uint32_t alloc_flags) { struct xfs_mount *mp = args->mp; struct xfs_perag *pag = args->pag; struct xfs_trans *tp = args->tp; struct xfs_buf *agbp = NULL; struct xfs_buf *agflbp = NULL; struct xfs_alloc_arg targs; /* local allocation arguments */ xfs_agblock_t bno; /* freelist block */ xfs_extlen_t need; /* total blocks needed in freelist */ int error = 0; /* deferred ops (AGFL block frees) require permanent transactions */ ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES); if (!xfs_perag_initialised_agf(pag)) { error = xfs_alloc_read_agf(pag, tp, alloc_flags, &agbp); if (error) { /* Couldn't lock the AGF so skip this AG. */ if (error == -EAGAIN) error = 0; goto out_no_agbp; } } /* * If this is a metadata preferred pag and we are user data then try * somewhere else if we are not being asked to try harder at this * point */ if (xfs_perag_prefers_metadata(pag) && (args->datatype & XFS_ALLOC_USERDATA) && (alloc_flags & XFS_ALLOC_FLAG_TRYLOCK)) { ASSERT(!(alloc_flags & XFS_ALLOC_FLAG_FREEING)); goto out_agbp_relse; } need = xfs_alloc_min_freelist(mp, pag); if (!xfs_alloc_space_available(args, need, alloc_flags | XFS_ALLOC_FLAG_CHECK)) goto out_agbp_relse; /* * Get the a.g. freespace buffer. * Can fail if we're not blocking on locks, and it's held. */ if (!agbp) { error = xfs_alloc_read_agf(pag, tp, alloc_flags, &agbp); if (error) { /* Couldn't lock the AGF so skip this AG. */ if (error == -EAGAIN) error = 0; goto out_no_agbp; } } /* reset a padding mismatched agfl before final free space check */ if (xfs_perag_agfl_needs_reset(pag)) xfs_agfl_reset(tp, agbp, pag); /* If there isn't enough total space or single-extent, reject it. */ need = xfs_alloc_min_freelist(mp, pag); if (!xfs_alloc_space_available(args, need, alloc_flags)) goto out_agbp_relse; if (IS_ENABLED(CONFIG_XFS_DEBUG) && args->alloc_minlen_only) { int stat; error = xfs_exact_minlen_extent_available(args, agbp, &stat); if (error || !stat) goto out_agbp_relse; } /* * Make the freelist shorter if it's too long. * * Note that from this point onwards, we will always release the agf and * agfl buffers on error. This handles the case where we error out and * the buffers are clean or may not have been joined to the transaction * and hence need to be released manually. If they have been joined to * the transaction, then xfs_trans_brelse() will handle them * appropriately based on the recursion count and dirty state of the * buffer. * * XXX (dgc): When we have lots of free space, does this buy us * anything other than extra overhead when we need to put more blocks * back on the free list? Maybe we should only do this when space is * getting low or the AGFL is more than half full? * * The NOSHRINK flag prevents the AGFL from being shrunk if it's too * big; the NORMAP flag prevents AGFL expand/shrink operations from * updating the rmapbt. Both flags are used in xfs_repair while we're * rebuilding the rmapbt, and neither are used by the kernel. They're * both required to ensure that rmaps are correctly recorded for the * regenerated AGFL, bnobt, and cntbt. See repair/phase5.c and * repair/rmap.c in xfsprogs for details. */ memset(&targs, 0, sizeof(targs)); /* struct copy below */ if (alloc_flags & XFS_ALLOC_FLAG_NORMAP) targs.oinfo = XFS_RMAP_OINFO_SKIP_UPDATE; else targs.oinfo = XFS_RMAP_OINFO_AG; while (!(alloc_flags & XFS_ALLOC_FLAG_NOSHRINK) && pag->pagf_flcount > need) { error = xfs_alloc_get_freelist(pag, tp, agbp, &bno, 0); if (error) goto out_agbp_relse; /* * Defer the AGFL block free. * * This helps to prevent log reservation overruns due to too * many allocation operations in a transaction. AGFL frees are * prone to this problem because for one they are always freed * one at a time. Further, an immediate AGFL block free can * cause a btree join and require another block free before the * real allocation can proceed. * Deferring the free disconnects freeing up the AGFL slot from * freeing the block. */ error = xfs_free_extent_later(tp, xfs_agbno_to_fsb(pag, bno), 1, &targs.oinfo, XFS_AG_RESV_AGFL, 0); if (error) goto out_agbp_relse; } targs.tp = tp; targs.mp = mp; targs.agbp = agbp; targs.agno = args->agno; targs.alignment = targs.minlen = targs.prod = 1; targs.pag = pag; error = xfs_alloc_read_agfl(pag, tp, &agflbp); if (error) goto out_agbp_relse; /* Make the freelist longer if it's too short. */ while (pag->pagf_flcount < need) { targs.agbno = 0; targs.maxlen = need - pag->pagf_flcount; targs.resv = XFS_AG_RESV_AGFL; /* Allocate as many blocks as possible at once. */ error = xfs_alloc_ag_vextent_size(&targs, alloc_flags); if (error) goto out_agflbp_relse; /* * Stop if we run out. Won't happen if callers are obeying * the restrictions correctly. Can happen for free calls * on a completely full ag. */ if (targs.agbno == NULLAGBLOCK) { if (alloc_flags & XFS_ALLOC_FLAG_FREEING) break; goto out_agflbp_relse; } if (!xfs_rmap_should_skip_owner_update(&targs.oinfo)) { error = xfs_rmap_alloc(tp, agbp, pag, targs.agbno, targs.len, &targs.oinfo); if (error) goto out_agflbp_relse; } error = xfs_alloc_update_counters(tp, agbp, -((long)(targs.len))); if (error) goto out_agflbp_relse; /* * Put each allocated block on the list. */ for (bno = targs.agbno; bno < targs.agbno + targs.len; bno++) { error = xfs_alloc_put_freelist(pag, tp, agbp, agflbp, bno, 0); if (error) goto out_agflbp_relse; } } xfs_trans_brelse(tp, agflbp); args->agbp = agbp; return 0; out_agflbp_relse: xfs_trans_brelse(tp, agflbp); out_agbp_relse: if (agbp) xfs_trans_brelse(tp, agbp); out_no_agbp: args->agbp = NULL; return error; } /* * Get a block from the freelist. * Returns with the buffer for the block gotten. */ int xfs_alloc_get_freelist( struct xfs_perag *pag, struct xfs_trans *tp, struct xfs_buf *agbp, xfs_agblock_t *bnop, int btreeblk) { struct xfs_agf *agf = agbp->b_addr; struct xfs_buf *agflbp; xfs_agblock_t bno; __be32 *agfl_bno; int error; uint32_t logflags; struct xfs_mount *mp = tp->t_mountp; /* * Freelist is empty, give up. */ if (!agf->agf_flcount) { *bnop = NULLAGBLOCK; return 0; } /* * Read the array of free blocks. */ error = xfs_alloc_read_agfl(pag, tp, &agflbp); if (error) return error; /* * Get the block number and update the data structures. */ agfl_bno = xfs_buf_to_agfl_bno(agflbp); bno = be32_to_cpu(agfl_bno[be32_to_cpu(agf->agf_flfirst)]); if (XFS_IS_CORRUPT(tp->t_mountp, !xfs_verify_agbno(pag, bno))) return -EFSCORRUPTED; be32_add_cpu(&agf->agf_flfirst, 1); xfs_trans_brelse(tp, agflbp); if (be32_to_cpu(agf->agf_flfirst) == xfs_agfl_size(mp)) agf->agf_flfirst = 0; ASSERT(!xfs_perag_agfl_needs_reset(pag)); be32_add_cpu(&agf->agf_flcount, -1); pag->pagf_flcount--; logflags = XFS_AGF_FLFIRST | XFS_AGF_FLCOUNT; if (btreeblk) { be32_add_cpu(&agf->agf_btreeblks, 1); pag->pagf_btreeblks++; logflags |= XFS_AGF_BTREEBLKS; } xfs_alloc_log_agf(tp, agbp, logflags); *bnop = bno; return 0; } /* * Log the given fields from the agf structure. */ void xfs_alloc_log_agf( struct xfs_trans *tp, struct xfs_buf *bp, uint32_t fields) { int first; /* first byte offset */ int last; /* last byte offset */ static const short offsets[] = { offsetof(xfs_agf_t, agf_magicnum), offsetof(xfs_agf_t, agf_versionnum), offsetof(xfs_agf_t, agf_seqno), offsetof(xfs_agf_t, agf_length), offsetof(xfs_agf_t, agf_bno_root), /* also cnt/rmap root */ offsetof(xfs_agf_t, agf_bno_level), /* also cnt/rmap levels */ offsetof(xfs_agf_t, agf_flfirst), offsetof(xfs_agf_t, agf_fllast), offsetof(xfs_agf_t, agf_flcount), offsetof(xfs_agf_t, agf_freeblks), offsetof(xfs_agf_t, agf_longest), offsetof(xfs_agf_t, agf_btreeblks), offsetof(xfs_agf_t, agf_uuid), offsetof(xfs_agf_t, agf_rmap_blocks), offsetof(xfs_agf_t, agf_refcount_blocks), offsetof(xfs_agf_t, agf_refcount_root), offsetof(xfs_agf_t, agf_refcount_level), /* needed so that we don't log the whole rest of the structure: */ offsetof(xfs_agf_t, agf_spare64), sizeof(xfs_agf_t) }; trace_xfs_agf(tp->t_mountp, bp->b_addr, fields, _RET_IP_); xfs_trans_buf_set_type(tp, bp, XFS_BLFT_AGF_BUF); xfs_btree_offsets(fields, offsets, XFS_AGF_NUM_BITS, &first, &last); xfs_trans_log_buf(tp, bp, (uint)first, (uint)last); } /* * Put the block on the freelist for the allocation group. */ int xfs_alloc_put_freelist( struct xfs_perag *pag, struct xfs_trans *tp, struct xfs_buf *agbp, struct xfs_buf *agflbp, xfs_agblock_t bno, int btreeblk) { struct xfs_mount *mp = tp->t_mountp; struct xfs_agf *agf = agbp->b_addr; __be32 *blockp; int error; uint32_t logflags; __be32 *agfl_bno; int startoff; if (!agflbp) { error = xfs_alloc_read_agfl(pag, tp, &agflbp); if (error) return error; } be32_add_cpu(&agf->agf_fllast, 1); if (be32_to_cpu(agf->agf_fllast) == xfs_agfl_size(mp)) agf->agf_fllast = 0; ASSERT(!xfs_perag_agfl_needs_reset(pag)); be32_add_cpu(&agf->agf_flcount, 1); pag->pagf_flcount++; logflags = XFS_AGF_FLLAST | XFS_AGF_FLCOUNT; if (btreeblk) { be32_add_cpu(&agf->agf_btreeblks, -1); pag->pagf_btreeblks--; logflags |= XFS_AGF_BTREEBLKS; } ASSERT(be32_to_cpu(agf->agf_flcount) <= xfs_agfl_size(mp)); agfl_bno = xfs_buf_to_agfl_bno(agflbp); blockp = &agfl_bno[be32_to_cpu(agf->agf_fllast)]; *blockp = cpu_to_be32(bno); startoff = (char *)blockp - (char *)agflbp->b_addr; xfs_alloc_log_agf(tp, agbp, logflags); xfs_trans_buf_set_type(tp, agflbp, XFS_BLFT_AGFL_BUF); xfs_trans_log_buf(tp, agflbp, startoff, startoff + sizeof(xfs_agblock_t) - 1); return 0; } /* * Check that this AGF/AGI header's sequence number and length matches the AG * number and size in fsblocks. */ xfs_failaddr_t xfs_validate_ag_length( struct xfs_buf *bp, uint32_t seqno, uint32_t length) { struct xfs_mount *mp = bp->b_mount; /* * During growfs operations, the perag is not fully initialised, * so we can't use it for any useful checking. growfs ensures we can't * use it by using uncached buffers that don't have the perag attached * so we can detect and avoid this problem. */ if (bp->b_pag && seqno != pag_agno(bp->b_pag)) return __this_address; /* * Only the last AG in the filesystem is allowed to be shorter * than the AG size recorded in the superblock. */ if (length != mp->m_sb.sb_agblocks) { /* * During growfs, the new last AG can get here before we * have updated the superblock. Give it a pass on the seqno * check. */ if (bp->b_pag && seqno != mp->m_sb.sb_agcount - 1) return __this_address; if (length < XFS_MIN_AG_BLOCKS) return __this_address; if (length > mp->m_sb.sb_agblocks) return __this_address; } return NULL; } /* * Verify the AGF is consistent. * * We do not verify the AGFL indexes in the AGF are fully consistent here * because of issues with variable on-disk structure sizes. Instead, we check * the agfl indexes for consistency when we initialise the perag from the AGF * information after a read completes. * * If the index is inconsistent, then we mark the perag as needing an AGFL * reset. The first AGFL update performed then resets the AGFL indexes and * refills the AGFL with known good free blocks, allowing the filesystem to * continue operating normally at the cost of a few leaked free space blocks. */ static xfs_failaddr_t xfs_agf_verify( struct xfs_buf *bp) { struct xfs_mount *mp = bp->b_mount; struct xfs_agf *agf = bp->b_addr; xfs_failaddr_t fa; uint32_t agf_seqno = be32_to_cpu(agf->agf_seqno); uint32_t agf_length = be32_to_cpu(agf->agf_length); if (xfs_has_crc(mp)) { if (!uuid_equal(&agf->agf_uuid, &mp->m_sb.sb_meta_uuid)) return __this_address; if (!xfs_log_check_lsn(mp, be64_to_cpu(agf->agf_lsn))) return __this_address; } if (!xfs_verify_magic(bp, agf->agf_magicnum)) return __this_address; if (!XFS_AGF_GOOD_VERSION(be32_to_cpu(agf->agf_versionnum))) return __this_address; /* * Both agf_seqno and agf_length need to validated before anything else * block number related in the AGF or AGFL can be checked. */ fa = xfs_validate_ag_length(bp, agf_seqno, agf_length); if (fa) return fa; if (be32_to_cpu(agf->agf_flfirst) >= xfs_agfl_size(mp)) return __this_address; if (be32_to_cpu(agf->agf_fllast) >= xfs_agfl_size(mp)) return __this_address; if (be32_to_cpu(agf->agf_flcount) > xfs_agfl_size(mp)) return __this_address; if (be32_to_cpu(agf->agf_freeblks) < be32_to_cpu(agf->agf_longest) || be32_to_cpu(agf->agf_freeblks) > agf_length) return __this_address; if (be32_to_cpu(agf->agf_bno_level) < 1 || be32_to_cpu(agf->agf_cnt_level) < 1 || be32_to_cpu(agf->agf_bno_level) > mp->m_alloc_maxlevels || be32_to_cpu(agf->agf_cnt_level) > mp->m_alloc_maxlevels) return __this_address; if (xfs_has_lazysbcount(mp) && be32_to_cpu(agf->agf_btreeblks) > agf_length) return __this_address; if (xfs_has_rmapbt(mp)) { if (be32_to_cpu(agf->agf_rmap_blocks) > agf_length) return __this_address; if (be32_to_cpu(agf->agf_rmap_level) < 1 || be32_to_cpu(agf->agf_rmap_level) > mp->m_rmap_maxlevels) return __this_address; } if (xfs_has_reflink(mp)) { if (be32_to_cpu(agf->agf_refcount_blocks) > agf_length) return __this_address; if (be32_to_cpu(agf->agf_refcount_level) < 1 || be32_to_cpu(agf->agf_refcount_level) > mp->m_refc_maxlevels) return __this_address; } return NULL; } static void xfs_agf_read_verify( struct xfs_buf *bp) { struct xfs_mount *mp = bp->b_mount; xfs_failaddr_t fa; if (xfs_has_crc(mp) && !xfs_buf_verify_cksum(bp, XFS_AGF_CRC_OFF)) xfs_verifier_error(bp, -EFSBADCRC, __this_address); else { fa = xfs_agf_verify(bp); if (XFS_TEST_ERROR(fa, mp, XFS_ERRTAG_ALLOC_READ_AGF)) xfs_verifier_error(bp, -EFSCORRUPTED, fa); } } static void xfs_agf_write_verify( struct xfs_buf *bp) { struct xfs_mount *mp = bp->b_mount; struct xfs_buf_log_item *bip = bp->b_log_item; struct xfs_agf *agf = bp->b_addr; xfs_failaddr_t fa; fa = xfs_agf_verify(bp); if (fa) { xfs_verifier_error(bp, -EFSCORRUPTED, fa); return; } if (!xfs_has_crc(mp)) return; if (bip) agf->agf_lsn = cpu_to_be64(bip->bli_item.li_lsn); xfs_buf_update_cksum(bp, XFS_AGF_CRC_OFF); } const struct xfs_buf_ops xfs_agf_buf_ops = { .name = "xfs_agf", .magic = { cpu_to_be32(XFS_AGF_MAGIC), cpu_to_be32(XFS_AGF_MAGIC) }, .verify_read = xfs_agf_read_verify, .verify_write = xfs_agf_write_verify, .verify_struct = xfs_agf_verify, }; /* * Read in the allocation group header (free/alloc section). */ int xfs_read_agf( struct xfs_perag *pag, struct xfs_trans *tp, int flags, struct xfs_buf **agfbpp) { struct xfs_mount *mp = pag_mount(pag); int error; trace_xfs_read_agf(pag); error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, XFS_AG_DADDR(mp, pag_agno(pag), XFS_AGF_DADDR(mp)), XFS_FSS_TO_BB(mp, 1), flags, agfbpp, &xfs_agf_buf_ops); if (xfs_metadata_is_sick(error)) xfs_ag_mark_sick(pag, XFS_SICK_AG_AGF); if (error) return error; xfs_buf_set_ref(*agfbpp, XFS_AGF_REF); return 0; } /* * Read in the allocation group header (free/alloc section) and initialise the * perag structure if necessary. If the caller provides @agfbpp, then return the * locked buffer to the caller, otherwise free it. */ int xfs_alloc_read_agf( struct xfs_perag *pag, struct xfs_trans *tp, int flags, struct xfs_buf **agfbpp) { struct xfs_mount *mp = pag_mount(pag); struct xfs_buf *agfbp; struct xfs_agf *agf; int error; int allocbt_blks; trace_xfs_alloc_read_agf(pag); /* We don't support trylock when freeing. */ ASSERT((flags & (XFS_ALLOC_FLAG_FREEING | XFS_ALLOC_FLAG_TRYLOCK)) != (XFS_ALLOC_FLAG_FREEING | XFS_ALLOC_FLAG_TRYLOCK)); error = xfs_read_agf(pag, tp, (flags & XFS_ALLOC_FLAG_TRYLOCK) ? XBF_TRYLOCK : 0, &agfbp); if (error) return error; agf = agfbp->b_addr; if (!xfs_perag_initialised_agf(pag)) { pag->pagf_freeblks = be32_to_cpu(agf->agf_freeblks); pag->pagf_btreeblks = be32_to_cpu(agf->agf_btreeblks); pag->pagf_flcount = be32_to_cpu(agf->agf_flcount); pag->pagf_longest = be32_to_cpu(agf->agf_longest); pag->pagf_bno_level = be32_to_cpu(agf->agf_bno_level); pag->pagf_cnt_level = be32_to_cpu(agf->agf_cnt_level); pag->pagf_rmap_level = be32_to_cpu(agf->agf_rmap_level); pag->pagf_refcount_level = be32_to_cpu(agf->agf_refcount_level); if (xfs_agfl_needs_reset(mp, agf)) set_bit(XFS_AGSTATE_AGFL_NEEDS_RESET, &pag->pag_opstate); else clear_bit(XFS_AGSTATE_AGFL_NEEDS_RESET, &pag->pag_opstate); /* * Update the in-core allocbt counter. Filter out the rmapbt * subset of the btreeblks counter because the rmapbt is managed * by perag reservation. Subtract one for the rmapbt root block * because the rmap counter includes it while the btreeblks * counter only tracks non-root blocks. */ allocbt_blks = pag->pagf_btreeblks; if (xfs_has_rmapbt(mp)) allocbt_blks -= be32_to_cpu(agf->agf_rmap_blocks) - 1; if (allocbt_blks > 0) atomic64_add(allocbt_blks, &mp->m_allocbt_blks); set_bit(XFS_AGSTATE_AGF_INIT, &pag->pag_opstate); } #ifdef DEBUG else if (!xfs_is_shutdown(mp)) { ASSERT(pag->pagf_freeblks == be32_to_cpu(agf->agf_freeblks)); ASSERT(pag->pagf_btreeblks == be32_to_cpu(agf->agf_btreeblks)); ASSERT(pag->pagf_flcount == be32_to_cpu(agf->agf_flcount)); ASSERT(pag->pagf_longest == be32_to_cpu(agf->agf_longest)); ASSERT(pag->pagf_bno_level == be32_to_cpu(agf->agf_bno_level)); ASSERT(pag->pagf_cnt_level == be32_to_cpu(agf->agf_cnt_level)); } #endif if (agfbpp) *agfbpp = agfbp; else xfs_trans_brelse(tp, agfbp); return 0; } /* * Pre-proces allocation arguments to set initial state that we don't require * callers to set up correctly, as well as bounds check the allocation args * that are set up. */ static int xfs_alloc_vextent_check_args( struct xfs_alloc_arg *args, xfs_fsblock_t target, xfs_agnumber_t *minimum_agno) { struct xfs_mount *mp = args->mp; xfs_agblock_t agsize; args->fsbno = NULLFSBLOCK; *minimum_agno = 0; if (args->tp->t_highest_agno != NULLAGNUMBER) *minimum_agno = args->tp->t_highest_agno; /* * Just fix this up, for the case where the last a.g. is shorter * (or there's only one a.g.) and the caller couldn't easily figure * that out (xfs_bmap_alloc). */ agsize = mp->m_sb.sb_agblocks; if (args->maxlen > agsize) args->maxlen = agsize; if (args->alignment == 0) args->alignment = 1; ASSERT(args->minlen > 0); ASSERT(args->maxlen > 0); ASSERT(args->alignment > 0); ASSERT(args->resv != XFS_AG_RESV_AGFL); ASSERT(XFS_FSB_TO_AGNO(mp, target) < mp->m_sb.sb_agcount); ASSERT(XFS_FSB_TO_AGBNO(mp, target) < agsize); ASSERT(args->minlen <= args->maxlen); ASSERT(args->minlen <= agsize); ASSERT(args->mod < args->prod); if (XFS_FSB_TO_AGNO(mp, target) >= mp->m_sb.sb_agcount || XFS_FSB_TO_AGBNO(mp, target) >= agsize || args->minlen > args->maxlen || args->minlen > agsize || args->mod >= args->prod) { trace_xfs_alloc_vextent_badargs(args); return -ENOSPC; } if (args->agno != NULLAGNUMBER && *minimum_agno > args->agno) { trace_xfs_alloc_vextent_skip_deadlock(args); return -ENOSPC; } return 0; } /* * Prepare an AG for allocation. If the AG is not prepared to accept the * allocation, return failure. * * XXX(dgc): The complexity of "need_pag" will go away as all caller paths are * modified to hold their own perag references. */ static int xfs_alloc_vextent_prepare_ag( struct xfs_alloc_arg *args, uint32_t alloc_flags) { bool need_pag = !args->pag; int error; if (need_pag) args->pag = xfs_perag_get(args->mp, args->agno); args->agbp = NULL; error = xfs_alloc_fix_freelist(args, alloc_flags); if (error) { trace_xfs_alloc_vextent_nofix(args); if (need_pag) xfs_perag_put(args->pag); args->agbno = NULLAGBLOCK; return error; } if (!args->agbp) { /* cannot allocate in this AG at all */ trace_xfs_alloc_vextent_noagbp(args); args->agbno = NULLAGBLOCK; return 0; } args->wasfromfl = 0; return 0; } /* * Post-process allocation results to account for the allocation if it succeed * and set the allocated block number correctly for the caller. * * XXX: we should really be returning ENOSPC for ENOSPC, not * hiding it behind a "successful" NULLFSBLOCK allocation. */ static int xfs_alloc_vextent_finish( struct xfs_alloc_arg *args, xfs_agnumber_t minimum_agno, int alloc_error, bool drop_perag) { struct xfs_mount *mp = args->mp; int error = 0; /* * We can end up here with a locked AGF. If we failed, the caller is * likely going to try to allocate again with different parameters, and * that can widen the AGs that are searched for free space. If we have * to do BMBT block allocation, we have to do a new allocation. * * Hence leaving this function with the AGF locked opens up potential * ABBA AGF deadlocks because a future allocation attempt in this * transaction may attempt to lock a lower number AGF. * * We can't release the AGF until the transaction is commited, so at * this point we must update the "first allocation" tracker to point at * this AG if the tracker is empty or points to a lower AG. This allows * the next allocation attempt to be modified appropriately to avoid * deadlocks. */ if (args->agbp && (args->tp->t_highest_agno == NULLAGNUMBER || args->agno > minimum_agno)) args->tp->t_highest_agno = args->agno; /* * If the allocation failed with an error or we had an ENOSPC result, * preserve the returned error whilst also marking the allocation result * as "no extent allocated". This ensures that callers that fail to * capture the error will still treat it as a failed allocation. */ if (alloc_error || args->agbno == NULLAGBLOCK) { args->fsbno = NULLFSBLOCK; error = alloc_error; goto out_drop_perag; } args->fsbno = xfs_agbno_to_fsb(args->pag, args->agbno); ASSERT(args->len >= args->minlen); ASSERT(args->len <= args->maxlen); ASSERT(args->agbno % args->alignment == 0); XFS_AG_CHECK_DADDR(mp, XFS_FSB_TO_DADDR(mp, args->fsbno), args->len); /* if not file data, insert new block into the reverse map btree */ if (!xfs_rmap_should_skip_owner_update(&args->oinfo)) { error = xfs_rmap_alloc(args->tp, args->agbp, args->pag, args->agbno, args->len, &args->oinfo); if (error) goto out_drop_perag; } if (!args->wasfromfl) { error = xfs_alloc_update_counters(args->tp, args->agbp, -((long)(args->len))); if (error) goto out_drop_perag; ASSERT(!xfs_extent_busy_search(pag_group(args->pag), args->agbno, args->len)); } xfs_ag_resv_alloc_extent(args->pag, args->resv, args); XFS_STATS_INC(mp, xs_allocx); XFS_STATS_ADD(mp, xs_allocb, args->len); trace_xfs_alloc_vextent_finish(args); out_drop_perag: if (drop_perag && args->pag) { xfs_perag_rele(args->pag); args->pag = NULL; } return error; } /* * Allocate within a single AG only. This uses a best-fit length algorithm so if * you need an exact sized allocation without locality constraints, this is the * fastest way to do it. * * Caller is expected to hold a perag reference in args->pag. */ int xfs_alloc_vextent_this_ag( struct xfs_alloc_arg *args, xfs_agnumber_t agno) { xfs_agnumber_t minimum_agno; uint32_t alloc_flags = 0; int error; ASSERT(args->pag != NULL); ASSERT(pag_agno(args->pag) == agno); args->agno = agno; args->agbno = 0; trace_xfs_alloc_vextent_this_ag(args); error = xfs_alloc_vextent_check_args(args, xfs_agbno_to_fsb(args->pag, 0), &minimum_agno); if (error) { if (error == -ENOSPC) return 0; return error; } error = xfs_alloc_vextent_prepare_ag(args, alloc_flags); if (!error && args->agbp) error = xfs_alloc_ag_vextent_size(args, alloc_flags); return xfs_alloc_vextent_finish(args, minimum_agno, error, false); } /* * Iterate all AGs trying to allocate an extent starting from @start_ag. * * If the incoming allocation type is XFS_ALLOCTYPE_NEAR_BNO, it means the * allocation attempts in @start_agno have locality information. If we fail to * allocate in that AG, then we revert to anywhere-in-AG for all the other AGs * we attempt to allocation in as there is no locality optimisation possible for * those allocations. * * On return, args->pag may be left referenced if we finish before the "all * failed" return point. The allocation finish still needs the perag, and * so the caller will release it once they've finished the allocation. * * When we wrap the AG iteration at the end of the filesystem, we have to be * careful not to wrap into AGs below ones we already have locked in the * transaction if we are doing a blocking iteration. This will result in an * out-of-order locking of AGFs and hence can cause deadlocks. */ static int xfs_alloc_vextent_iterate_ags( struct xfs_alloc_arg *args, xfs_agnumber_t minimum_agno, xfs_agnumber_t start_agno, xfs_agblock_t target_agbno, uint32_t alloc_flags) { struct xfs_mount *mp = args->mp; xfs_agnumber_t restart_agno = minimum_agno; xfs_agnumber_t agno; int error = 0; if (alloc_flags & XFS_ALLOC_FLAG_TRYLOCK) restart_agno = 0; restart: for_each_perag_wrap_range(mp, start_agno, restart_agno, mp->m_sb.sb_agcount, agno, args->pag) { args->agno = agno; error = xfs_alloc_vextent_prepare_ag(args, alloc_flags); if (error) break; if (!args->agbp) { trace_xfs_alloc_vextent_loopfailed(args); continue; } /* * Allocation is supposed to succeed now, so break out of the * loop regardless of whether we succeed or not. */ if (args->agno == start_agno && target_agbno) { args->agbno = target_agbno; error = xfs_alloc_ag_vextent_near(args, alloc_flags); } else { args->agbno = 0; error = xfs_alloc_ag_vextent_size(args, alloc_flags); } break; } if (error) { xfs_perag_rele(args->pag); args->pag = NULL; return error; } if (args->agbp) return 0; /* * We didn't find an AG we can alloation from. If we were given * constraining flags by the caller, drop them and retry the allocation * without any constraints being set. */ if (alloc_flags & XFS_ALLOC_FLAG_TRYLOCK) { alloc_flags &= ~XFS_ALLOC_FLAG_TRYLOCK; restart_agno = minimum_agno; goto restart; } ASSERT(args->pag == NULL); trace_xfs_alloc_vextent_allfailed(args); return 0; } /* * Iterate from the AGs from the start AG to the end of the filesystem, trying * to allocate blocks. It starts with a near allocation attempt in the initial * AG, then falls back to anywhere-in-ag after the first AG fails. It will wrap * back to zero if allowed by previous allocations in this transaction, * otherwise will wrap back to the start AG and run a second blocking pass to * the end of the filesystem. */ int xfs_alloc_vextent_start_ag( struct xfs_alloc_arg *args, xfs_fsblock_t target) { struct xfs_mount *mp = args->mp; xfs_agnumber_t minimum_agno; xfs_agnumber_t start_agno; xfs_agnumber_t rotorstep = xfs_rotorstep; bool bump_rotor = false; uint32_t alloc_flags = XFS_ALLOC_FLAG_TRYLOCK; int error; ASSERT(args->pag == NULL); args->agno = NULLAGNUMBER; args->agbno = NULLAGBLOCK; trace_xfs_alloc_vextent_start_ag(args); error = xfs_alloc_vextent_check_args(args, target, &minimum_agno); if (error) { if (error == -ENOSPC) return 0; return error; } if ((args->datatype & XFS_ALLOC_INITIAL_USER_DATA) && xfs_is_inode32(mp)) { target = XFS_AGB_TO_FSB(mp, ((mp->m_agfrotor / rotorstep) % mp->m_sb.sb_agcount), 0); bump_rotor = 1; } start_agno = max(minimum_agno, XFS_FSB_TO_AGNO(mp, target)); error = xfs_alloc_vextent_iterate_ags(args, minimum_agno, start_agno, XFS_FSB_TO_AGBNO(mp, target), alloc_flags); if (bump_rotor) { if (args->agno == start_agno) mp->m_agfrotor = (mp->m_agfrotor + 1) % (mp->m_sb.sb_agcount * rotorstep); else mp->m_agfrotor = (args->agno * rotorstep + 1) % (mp->m_sb.sb_agcount * rotorstep); } return xfs_alloc_vextent_finish(args, minimum_agno, error, true); } /* * Iterate from the agno indicated via @target through to the end of the * filesystem attempting blocking allocation. This does not wrap or try a second * pass, so will not recurse into AGs lower than indicated by the target. */ int xfs_alloc_vextent_first_ag( struct xfs_alloc_arg *args, xfs_fsblock_t target) { struct xfs_mount *mp = args->mp; xfs_agnumber_t minimum_agno; xfs_agnumber_t start_agno; uint32_t alloc_flags = XFS_ALLOC_FLAG_TRYLOCK; int error; ASSERT(args->pag == NULL); args->agno = NULLAGNUMBER; args->agbno = NULLAGBLOCK; trace_xfs_alloc_vextent_first_ag(args); error = xfs_alloc_vextent_check_args(args, target, &minimum_agno); if (error) { if (error == -ENOSPC) return 0; return error; } start_agno = max(minimum_agno, XFS_FSB_TO_AGNO(mp, target)); error = xfs_alloc_vextent_iterate_ags(args, minimum_agno, start_agno, XFS_FSB_TO_AGBNO(mp, target), alloc_flags); return xfs_alloc_vextent_finish(args, minimum_agno, error, true); } /* * Allocate at the exact block target or fail. Caller is expected to hold a * perag reference in args->pag. */ int xfs_alloc_vextent_exact_bno( struct xfs_alloc_arg *args, xfs_fsblock_t target) { struct xfs_mount *mp = args->mp; xfs_agnumber_t minimum_agno; int error; ASSERT(args->pag != NULL); ASSERT(pag_agno(args->pag) == XFS_FSB_TO_AGNO(mp, target)); args->agno = XFS_FSB_TO_AGNO(mp, target); args->agbno = XFS_FSB_TO_AGBNO(mp, target); trace_xfs_alloc_vextent_exact_bno(args); error = xfs_alloc_vextent_check_args(args, target, &minimum_agno); if (error) { if (error == -ENOSPC) return 0; return error; } error = xfs_alloc_vextent_prepare_ag(args, 0); if (!error && args->agbp) error = xfs_alloc_ag_vextent_exact(args); return xfs_alloc_vextent_finish(args, minimum_agno, error, false); } /* * Allocate an extent as close to the target as possible. If there are not * viable candidates in the AG, then fail the allocation. * * Caller may or may not have a per-ag reference in args->pag. */ int xfs_alloc_vextent_near_bno( struct xfs_alloc_arg *args, xfs_fsblock_t target) { struct xfs_mount *mp = args->mp; xfs_agnumber_t minimum_agno; bool needs_perag = args->pag == NULL; uint32_t alloc_flags = 0; int error; if (!needs_perag) ASSERT(pag_agno(args->pag) == XFS_FSB_TO_AGNO(mp, target)); args->agno = XFS_FSB_TO_AGNO(mp, target); args->agbno = XFS_FSB_TO_AGBNO(mp, target); trace_xfs_alloc_vextent_near_bno(args); error = xfs_alloc_vextent_check_args(args, target, &minimum_agno); if (error) { if (error == -ENOSPC) return 0; return error; } if (needs_perag) args->pag = xfs_perag_grab(mp, args->agno); error = xfs_alloc_vextent_prepare_ag(args, alloc_flags); if (!error && args->agbp) error = xfs_alloc_ag_vextent_near(args, alloc_flags); return xfs_alloc_vextent_finish(args, minimum_agno, error, needs_perag); } /* Ensure that the freelist is at full capacity. */ int xfs_free_extent_fix_freelist( struct xfs_trans *tp, struct xfs_perag *pag, struct xfs_buf **agbp) { struct xfs_alloc_arg args; int error; memset(&args, 0, sizeof(struct xfs_alloc_arg)); args.tp = tp; args.mp = tp->t_mountp; args.agno = pag_agno(pag); args.pag = pag; /* * validate that the block number is legal - the enables us to detect * and handle a silent filesystem corruption rather than crashing. */ if (args.agno >= args.mp->m_sb.sb_agcount) return -EFSCORRUPTED; error = xfs_alloc_fix_freelist(&args, XFS_ALLOC_FLAG_FREEING); if (error) return error; *agbp = args.agbp; return 0; } /* * Free an extent. * Just break up the extent address and hand off to xfs_free_ag_extent * after fixing up the freelist. */ int __xfs_free_extent( struct xfs_trans *tp, struct xfs_perag *pag, xfs_agblock_t agbno, xfs_extlen_t len, const struct xfs_owner_info *oinfo, enum xfs_ag_resv_type type, bool skip_discard) { struct xfs_mount *mp = tp->t_mountp; struct xfs_buf *agbp; struct xfs_agf *agf; int error; unsigned int busy_flags = 0; ASSERT(len != 0); ASSERT(type != XFS_AG_RESV_AGFL); if (XFS_TEST_ERROR(false, mp, XFS_ERRTAG_FREE_EXTENT)) return -EIO; error = xfs_free_extent_fix_freelist(tp, pag, &agbp); if (error) { if (xfs_metadata_is_sick(error)) xfs_ag_mark_sick(pag, XFS_SICK_AG_BNOBT); return error; } agf = agbp->b_addr; if (XFS_IS_CORRUPT(mp, agbno >= mp->m_sb.sb_agblocks)) { xfs_ag_mark_sick(pag, XFS_SICK_AG_BNOBT); error = -EFSCORRUPTED; goto err_release; } /* validate the extent size is legal now we have the agf locked */ if (XFS_IS_CORRUPT(mp, agbno + len > be32_to_cpu(agf->agf_length))) { xfs_ag_mark_sick(pag, XFS_SICK_AG_BNOBT); error = -EFSCORRUPTED; goto err_release; } error = xfs_free_ag_extent(tp, agbp, agbno, len, oinfo, type); if (error) goto err_release; if (skip_discard) busy_flags |= XFS_EXTENT_BUSY_SKIP_DISCARD; xfs_extent_busy_insert(tp, pag_group(pag), agbno, len, busy_flags); return 0; err_release: xfs_trans_brelse(tp, agbp); return error; } struct xfs_alloc_query_range_info { xfs_alloc_query_range_fn fn; void *priv; }; /* Format btree record and pass to our callback. */ STATIC int xfs_alloc_query_range_helper( struct xfs_btree_cur *cur, const union xfs_btree_rec *rec, void *priv) { struct xfs_alloc_query_range_info *query = priv; struct xfs_alloc_rec_incore irec; xfs_failaddr_t fa; xfs_alloc_btrec_to_irec(rec, &irec); fa = xfs_alloc_check_irec(to_perag(cur->bc_group), &irec); if (fa) return xfs_alloc_complain_bad_rec(cur, fa, &irec); return query->fn(cur, &irec, query->priv); } /* Find all free space within a given range of blocks. */ int xfs_alloc_query_range( struct xfs_btree_cur *cur, const struct xfs_alloc_rec_incore *low_rec, const struct xfs_alloc_rec_incore *high_rec, xfs_alloc_query_range_fn fn, void *priv) { union xfs_btree_irec low_brec = { .a = *low_rec }; union xfs_btree_irec high_brec = { .a = *high_rec }; struct xfs_alloc_query_range_info query = { .priv = priv, .fn = fn }; ASSERT(xfs_btree_is_bno(cur->bc_ops)); return xfs_btree_query_range(cur, &low_brec, &high_brec, xfs_alloc_query_range_helper, &query); } /* Find all free space records. */ int xfs_alloc_query_all( struct xfs_btree_cur *cur, xfs_alloc_query_range_fn fn, void *priv) { struct xfs_alloc_query_range_info query; ASSERT(xfs_btree_is_bno(cur->bc_ops)); query.priv = priv; query.fn = fn; return xfs_btree_query_all(cur, xfs_alloc_query_range_helper, &query); } /* * Scan part of the keyspace of the free space and tell us if the area has no * records, is fully mapped by records, or is partially filled. */ int xfs_alloc_has_records( struct xfs_btree_cur *cur, xfs_agblock_t bno, xfs_extlen_t len, enum xbtree_recpacking *outcome) { union xfs_btree_irec low; union xfs_btree_irec high; memset(&low, 0, sizeof(low)); low.a.ar_startblock = bno; memset(&high, 0xFF, sizeof(high)); high.a.ar_startblock = bno + len - 1; return xfs_btree_has_records(cur, &low, &high, NULL, outcome); } /* * Walk all the blocks in the AGFL. The @walk_fn can return any negative * error code or XFS_ITER_*. */ int xfs_agfl_walk( struct xfs_mount *mp, struct xfs_agf *agf, struct xfs_buf *agflbp, xfs_agfl_walk_fn walk_fn, void *priv) { __be32 *agfl_bno; unsigned int i; int error; agfl_bno = xfs_buf_to_agfl_bno(agflbp); i = be32_to_cpu(agf->agf_flfirst); /* Nothing to walk in an empty AGFL. */ if (agf->agf_flcount == cpu_to_be32(0)) return 0; /* Otherwise, walk from first to last, wrapping as needed. */ for (;;) { error = walk_fn(mp, be32_to_cpu(agfl_bno[i]), priv); if (error) return error; if (i == be32_to_cpu(agf->agf_fllast)) break; if (++i == xfs_agfl_size(mp)) i = 0; } return 0; } int __init xfs_extfree_intent_init_cache(void) { xfs_extfree_item_cache = kmem_cache_create("xfs_extfree_intent", sizeof(struct xfs_extent_free_item), 0, 0, NULL); return xfs_extfree_item_cache != NULL ? 0 : -ENOMEM; } void xfs_extfree_intent_destroy_cache(void) { kmem_cache_destroy(xfs_extfree_item_cache); xfs_extfree_item_cache = NULL; }
43 15 29 4 39 62 19 43 8 2 4 2 2 7 7 6 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 // SPDX-License-Identifier: GPL-2.0-only /* * Copyright (c) 2016 Anders K. Pedersen <akp@cohaesio.com> */ #include <linux/kernel.h> #include <linux/netlink.h> #include <linux/netfilter.h> #include <linux/netfilter/nf_tables.h> #include <net/dst.h> #include <net/ip6_route.h> #include <net/route.h> #include <net/netfilter/nf_tables.h> #include <net/netfilter/nf_tables_core.h> struct nft_rt { enum nft_rt_keys key:8; u8 dreg; }; static u16 get_tcpmss(const struct nft_pktinfo *pkt, const struct dst_entry *skbdst) { u32 minlen = sizeof(struct ipv6hdr), mtu = dst_mtu(skbdst); const struct sk_buff *skb = pkt->skb; struct dst_entry *dst = NULL; struct flowi fl; memset(&fl, 0, sizeof(fl)); switch (nft_pf(pkt)) { case NFPROTO_IPV4: fl.u.ip4.daddr = ip_hdr(skb)->saddr; minlen = sizeof(struct iphdr) + sizeof(struct tcphdr); break; case NFPROTO_IPV6: fl.u.ip6.daddr = ipv6_hdr(skb)->saddr; minlen = sizeof(struct ipv6hdr) + sizeof(struct tcphdr); break; } nf_route(nft_net(pkt), &dst, &fl, false, nft_pf(pkt)); if (dst) { mtu = min(mtu, dst_mtu(dst)); dst_release(dst); } if (mtu <= minlen || mtu > 0xffff) return TCP_MSS_DEFAULT; return mtu - minlen; } void nft_rt_get_eval(const struct nft_expr *expr, struct nft_regs *regs, const struct nft_pktinfo *pkt) { const struct nft_rt *priv = nft_expr_priv(expr); const struct sk_buff *skb = pkt->skb; u32 *dest = &regs->data[priv->dreg]; const struct dst_entry *dst; dst = skb_dst(skb); if (!dst) goto err; switch (priv->key) { #ifdef CONFIG_IP_ROUTE_CLASSID case NFT_RT_CLASSID: *dest = dst->tclassid; break; #endif case NFT_RT_NEXTHOP4: if (nft_pf(pkt) != NFPROTO_IPV4) goto err; *dest = (__force u32)rt_nexthop(dst_rtable(dst), ip_hdr(skb)->daddr); break; case NFT_RT_NEXTHOP6: if (nft_pf(pkt) != NFPROTO_IPV6) goto err; memcpy(dest, rt6_nexthop(dst_rt6_info(dst), &ipv6_hdr(skb)->daddr), sizeof(struct in6_addr)); break; case NFT_RT_TCPMSS: nft_reg_store16(dest, get_tcpmss(pkt, dst)); break; #ifdef CONFIG_XFRM case NFT_RT_XFRM: nft_reg_store8(dest, !!dst->xfrm); break; #endif default: WARN_ON(1); goto err; } return; err: regs->verdict.code = NFT_BREAK; } static const struct nla_policy nft_rt_policy[NFTA_RT_MAX + 1] = { [NFTA_RT_DREG] = { .type = NLA_U32 }, [NFTA_RT_KEY] = NLA_POLICY_MAX(NLA_BE32, 255), }; static int nft_rt_get_init(const struct nft_ctx *ctx, const struct nft_expr *expr, const struct nlattr * const tb[]) { struct nft_rt *priv = nft_expr_priv(expr); unsigned int len; if (tb[NFTA_RT_KEY] == NULL || tb[NFTA_RT_DREG] == NULL) return -EINVAL; priv->key = ntohl(nla_get_be32(tb[NFTA_RT_KEY])); switch (priv->key) { #ifdef CONFIG_IP_ROUTE_CLASSID case NFT_RT_CLASSID: #endif case NFT_RT_NEXTHOP4: len = sizeof(u32); break; case NFT_RT_NEXTHOP6: len = sizeof(struct in6_addr); break; case NFT_RT_TCPMSS: len = sizeof(u16); break; #ifdef CONFIG_XFRM case NFT_RT_XFRM: len = sizeof(u8); break; #endif default: return -EOPNOTSUPP; } return nft_parse_register_store(ctx, tb[NFTA_RT_DREG], &priv->dreg, NULL, NFT_DATA_VALUE, len); } static int nft_rt_get_dump(struct sk_buff *skb, const struct nft_expr *expr, bool reset) { const struct nft_rt *priv = nft_expr_priv(expr); if (nla_put_be32(skb, NFTA_RT_KEY, htonl(priv->key))) goto nla_put_failure; if (nft_dump_register(skb, NFTA_RT_DREG, priv->dreg)) goto nla_put_failure; return 0; nla_put_failure: return -1; } static int nft_rt_validate(const struct nft_ctx *ctx, const struct nft_expr *expr) { const struct nft_rt *priv = nft_expr_priv(expr); unsigned int hooks; if (ctx->family != NFPROTO_IPV4 && ctx->family != NFPROTO_IPV6 && ctx->family != NFPROTO_INET) return -EOPNOTSUPP; switch (priv->key) { case NFT_RT_NEXTHOP4: case NFT_RT_NEXTHOP6: case NFT_RT_CLASSID: case NFT_RT_XFRM: return 0; case NFT_RT_TCPMSS: hooks = (1 << NF_INET_FORWARD) | (1 << NF_INET_LOCAL_OUT) | (1 << NF_INET_POST_ROUTING); break; default: return -EINVAL; } return nft_chain_validate_hooks(ctx->chain, hooks); } static const struct nft_expr_ops nft_rt_get_ops = { .type = &nft_rt_type, .size = NFT_EXPR_SIZE(sizeof(struct nft_rt)), .eval = nft_rt_get_eval, .init = nft_rt_get_init, .dump = nft_rt_get_dump, .validate = nft_rt_validate, .reduce = NFT_REDUCE_READONLY, }; struct nft_expr_type nft_rt_type __read_mostly = { .name = "rt", .ops = &nft_rt_get_ops, .policy = nft_rt_policy, .maxattr = NFTA_RT_MAX, .owner = THIS_MODULE, };
42 42 88 52 4 4 4 4 3 4 4 4 4 4 1 1 1 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 // SPDX-License-Identifier: GPL-2.0-or-later /****************************************************************************** * vlanproc.c VLAN Module. /proc filesystem interface. * * This module is completely hardware-independent and provides * access to the router using Linux /proc filesystem. * * Author: Ben Greear, <greearb@candelatech.com> coppied from wanproc.c * by: Gene Kozin <genek@compuserve.com> * * Copyright: (c) 1998 Ben Greear * * ============================================================================ * Jan 20, 1998 Ben Greear Initial Version *****************************************************************************/ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/errno.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/fs.h> #include <linux/netdevice.h> #include <linux/if_vlan.h> #include <net/net_namespace.h> #include <net/netns/generic.h> #include "vlanproc.h" #include "vlan.h" /****** Function Prototypes *************************************************/ /* Methods for preparing data for reading proc entries */ static int vlan_seq_show(struct seq_file *seq, void *v); static void *vlan_seq_start(struct seq_file *seq, loff_t *pos); static void *vlan_seq_next(struct seq_file *seq, void *v, loff_t *pos); static void vlan_seq_stop(struct seq_file *seq, void *); static int vlandev_seq_show(struct seq_file *seq, void *v); /* * Global Data */ /* * Names of the proc directory entries */ static const char name_root[] = "vlan"; static const char name_conf[] = "config"; /* * Structures for interfacing with the /proc filesystem. * VLAN creates its own directory /proc/net/vlan with the following * entries: * config device status/configuration * <device> entry for each device */ /* * Generic /proc/net/vlan/<file> file and inode operations */ static const struct seq_operations vlan_seq_ops = { .start = vlan_seq_start, .next = vlan_seq_next, .stop = vlan_seq_stop, .show = vlan_seq_show, }; /* * Proc filesystem directory entries. */ /* Strings */ static const char *const vlan_name_type_str[VLAN_NAME_TYPE_HIGHEST] = { [VLAN_NAME_TYPE_RAW_PLUS_VID] = "VLAN_NAME_TYPE_RAW_PLUS_VID", [VLAN_NAME_TYPE_PLUS_VID_NO_PAD] = "VLAN_NAME_TYPE_PLUS_VID_NO_PAD", [VLAN_NAME_TYPE_RAW_PLUS_VID_NO_PAD] = "VLAN_NAME_TYPE_RAW_PLUS_VID_NO_PAD", [VLAN_NAME_TYPE_PLUS_VID] = "VLAN_NAME_TYPE_PLUS_VID", }; /* * Interface functions */ /* * Clean up /proc/net/vlan entries */ void vlan_proc_cleanup(struct net *net) { struct vlan_net *vn = net_generic(net, vlan_net_id); if (vn->proc_vlan_conf) remove_proc_entry(name_conf, vn->proc_vlan_dir); if (vn->proc_vlan_dir) remove_proc_entry(name_root, net->proc_net); /* Dynamically added entries should be cleaned up as their vlan_device * is removed, so we should not have to take care of it here... */ } /* * Create /proc/net/vlan entries */ int __net_init vlan_proc_init(struct net *net) { struct vlan_net *vn = net_generic(net, vlan_net_id); vn->proc_vlan_dir = proc_net_mkdir(net, name_root, net->proc_net); if (!vn->proc_vlan_dir) goto err; vn->proc_vlan_conf = proc_create_net(name_conf, S_IFREG | 0600, vn->proc_vlan_dir, &vlan_seq_ops, sizeof(struct seq_net_private)); if (!vn->proc_vlan_conf) goto err; return 0; err: pr_err("can't create entry in proc filesystem!\n"); vlan_proc_cleanup(net); return -ENOBUFS; } /* * Add directory entry for VLAN device. */ int vlan_proc_add_dev(struct net_device *vlandev) { struct vlan_dev_priv *vlan = vlan_dev_priv(vlandev); struct vlan_net *vn = net_generic(dev_net(vlandev), vlan_net_id); if (!strcmp(vlandev->name, name_conf)) return -EINVAL; vlan->dent = proc_create_single_data(vlandev->name, S_IFREG | 0600, vn->proc_vlan_dir, vlandev_seq_show, vlandev); if (!vlan->dent) return -ENOBUFS; return 0; } /* * Delete directory entry for VLAN device. */ void vlan_proc_rem_dev(struct net_device *vlandev) { /** NOTE: This will consume the memory pointed to by dent, it seems. */ proc_remove(vlan_dev_priv(vlandev)->dent); vlan_dev_priv(vlandev)->dent = NULL; } /****** Proc filesystem entry points ****************************************/ /* * The following few functions build the content of /proc/net/vlan/config */ static void *vlan_seq_from_index(struct seq_file *seq, loff_t *pos) { unsigned long ifindex = *pos; struct net_device *dev; for_each_netdev_dump(seq_file_net(seq), dev, ifindex) { if (!is_vlan_dev(dev)) continue; *pos = dev->ifindex; return dev; } return NULL; } static void *vlan_seq_start(struct seq_file *seq, loff_t *pos) __acquires(rcu) { rcu_read_lock(); if (*pos == 0) return SEQ_START_TOKEN; return vlan_seq_from_index(seq, pos); } static void *vlan_seq_next(struct seq_file *seq, void *v, loff_t *pos) { ++*pos; return vlan_seq_from_index(seq, pos); } static void vlan_seq_stop(struct seq_file *seq, void *v) __releases(rcu) { rcu_read_unlock(); } static int vlan_seq_show(struct seq_file *seq, void *v) { struct net *net = seq_file_net(seq); struct vlan_net *vn = net_generic(net, vlan_net_id); if (v == SEQ_START_TOKEN) { const char *nmtype = NULL; seq_puts(seq, "VLAN Dev name | VLAN ID\n"); if (vn->name_type < ARRAY_SIZE(vlan_name_type_str)) nmtype = vlan_name_type_str[vn->name_type]; seq_printf(seq, "Name-Type: %s\n", nmtype ? nmtype : "UNKNOWN"); } else { const struct net_device *vlandev = v; const struct vlan_dev_priv *vlan = vlan_dev_priv(vlandev); seq_printf(seq, "%-15s| %d | %s\n", vlandev->name, vlan->vlan_id, vlan->real_dev->name); } return 0; } static int vlandev_seq_show(struct seq_file *seq, void *offset) { struct net_device *vlandev = (struct net_device *) seq->private; const struct vlan_dev_priv *vlan = vlan_dev_priv(vlandev); struct rtnl_link_stats64 temp; const struct rtnl_link_stats64 *stats; static const char fmt64[] = "%30s %12llu\n"; int i; if (!is_vlan_dev(vlandev)) return 0; stats = dev_get_stats(vlandev, &temp); seq_printf(seq, "%s VID: %d REORDER_HDR: %i dev->priv_flags: %x\n", vlandev->name, vlan->vlan_id, (int)(vlan->flags & 1), (u32)vlandev->priv_flags); seq_printf(seq, fmt64, "total frames received", stats->rx_packets); seq_printf(seq, fmt64, "total bytes received", stats->rx_bytes); seq_printf(seq, fmt64, "Broadcast/Multicast Rcvd", stats->multicast); seq_puts(seq, "\n"); seq_printf(seq, fmt64, "total frames transmitted", stats->tx_packets); seq_printf(seq, fmt64, "total bytes transmitted", stats->tx_bytes); seq_printf(seq, "Device: %s", vlan->real_dev->name); /* now show all PRIORITY mappings relating to this VLAN */ seq_printf(seq, "\nINGRESS priority mappings: " "0:%u 1:%u 2:%u 3:%u 4:%u 5:%u 6:%u 7:%u\n", vlan->ingress_priority_map[0], vlan->ingress_priority_map[1], vlan->ingress_priority_map[2], vlan->ingress_priority_map[3], vlan->ingress_priority_map[4], vlan->ingress_priority_map[5], vlan->ingress_priority_map[6], vlan->ingress_priority_map[7]); seq_printf(seq, " EGRESS priority mappings: "); for (i = 0; i < 16; i++) { const struct vlan_priority_tci_mapping *mp = vlan->egress_priority_map[i]; while (mp) { seq_printf(seq, "%u:%d ", mp->priority, ((mp->vlan_qos >> 13) & 0x7)); mp = mp->next; } } seq_puts(seq, "\n"); return 0; }
1598 303 1384 193 343 1599 32 50 365 130 185 148 4 9 460 118 638 118 12 885 13 282 46 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_HIGHMEM_H #define _LINUX_HIGHMEM_H #include <linux/fs.h> #include <linux/kernel.h> #include <linux/bug.h> #include <linux/cacheflush.h> #include <linux/kmsan.h> #include <linux/mm.h> #include <linux/uaccess.h> #include <linux/hardirq.h> #include "highmem-internal.h" /** * kmap - Map a page for long term usage * @page: Pointer to the page to be mapped * * Returns: The virtual address of the mapping * * Can only be invoked from preemptible task context because on 32bit * systems with CONFIG_HIGHMEM enabled this function might sleep. * * For systems with CONFIG_HIGHMEM=n and for pages in the low memory area * this returns the virtual address of the direct kernel mapping. * * The returned virtual address is globally visible and valid up to the * point where it is unmapped via kunmap(). The pointer can be handed to * other contexts. * * For highmem pages on 32bit systems this can be slow as the mapping space * is limited and protected by a global lock. In case that there is no * mapping slot available the function blocks until a slot is released via * kunmap(). */ static inline void *kmap(struct page *page); /** * kunmap - Unmap the virtual address mapped by kmap() * @page: Pointer to the page which was mapped by kmap() * * Counterpart to kmap(). A NOOP for CONFIG_HIGHMEM=n and for mappings of * pages in the low memory area. */ static inline void kunmap(struct page *page); /** * kmap_to_page - Get the page for a kmap'ed address * @addr: The address to look up * * Returns: The page which is mapped to @addr. */ static inline struct page *kmap_to_page(void *addr); /** * kmap_flush_unused - Flush all unused kmap mappings in order to * remove stray mappings */ static inline void kmap_flush_unused(void); /** * kmap_local_page - Map a page for temporary usage * @page: Pointer to the page to be mapped * * Returns: The virtual address of the mapping * * Can be invoked from any context, including interrupts. * * Requires careful handling when nesting multiple mappings because the map * management is stack based. The unmap has to be in the reverse order of * the map operation: * * addr1 = kmap_local_page(page1); * addr2 = kmap_local_page(page2); * ... * kunmap_local(addr2); * kunmap_local(addr1); * * Unmapping addr1 before addr2 is invalid and causes malfunction. * * Contrary to kmap() mappings the mapping is only valid in the context of * the caller and cannot be handed to other contexts. * * On CONFIG_HIGHMEM=n kernels and for low memory pages this returns the * virtual address of the direct mapping. Only real highmem pages are * temporarily mapped. * * While kmap_local_page() is significantly faster than kmap() for the highmem * case it comes with restrictions about the pointer validity. * * On HIGHMEM enabled systems mapping a highmem page has the side effect of * disabling migration in order to keep the virtual address stable across * preemption. No caller of kmap_local_page() can rely on this side effect. */ static inline void *kmap_local_page(struct page *page); /** * kmap_local_folio - Map a page in this folio for temporary usage * @folio: The folio containing the page. * @offset: The byte offset within the folio which identifies the page. * * Requires careful handling when nesting multiple mappings because the map * management is stack based. The unmap has to be in the reverse order of * the map operation:: * * addr1 = kmap_local_folio(folio1, offset1); * addr2 = kmap_local_folio(folio2, offset2); * ... * kunmap_local(addr2); * kunmap_local(addr1); * * Unmapping addr1 before addr2 is invalid and causes malfunction. * * Contrary to kmap() mappings the mapping is only valid in the context of * the caller and cannot be handed to other contexts. * * On CONFIG_HIGHMEM=n kernels and for low memory pages this returns the * virtual address of the direct mapping. Only real highmem pages are * temporarily mapped. * * While it is significantly faster than kmap() for the highmem case it * comes with restrictions about the pointer validity. * * On HIGHMEM enabled systems mapping a highmem page has the side effect of * disabling migration in order to keep the virtual address stable across * preemption. No caller of kmap_local_folio() can rely on this side effect. * * Context: Can be invoked from any context. * Return: The virtual address of @offset. */ static inline void *kmap_local_folio(struct folio *folio, size_t offset); /** * kmap_atomic - Atomically map a page for temporary usage - Deprecated! * @page: Pointer to the page to be mapped * * Returns: The virtual address of the mapping * * In fact a wrapper around kmap_local_page() which also disables pagefaults * and, depending on PREEMPT_RT configuration, also CPU migration and * preemption. Therefore users should not count on the latter two side effects. * * Mappings should always be released by kunmap_atomic(). * * Do not use in new code. Use kmap_local_page() instead. * * It is used in atomic context when code wants to access the contents of a * page that might be allocated from high memory (see __GFP_HIGHMEM), for * example a page in the pagecache. The API has two functions, and they * can be used in a manner similar to the following:: * * // Find the page of interest. * struct page *page = find_get_page(mapping, offset); * * // Gain access to the contents of that page. * void *vaddr = kmap_atomic(page); * * // Do something to the contents of that page. * memset(vaddr, 0, PAGE_SIZE); * * // Unmap that page. * kunmap_atomic(vaddr); * * Note that the kunmap_atomic() call takes the result of the kmap_atomic() * call, not the argument. * * If you need to map two pages because you want to copy from one page to * another you need to keep the kmap_atomic calls strictly nested, like: * * vaddr1 = kmap_atomic(page1); * vaddr2 = kmap_atomic(page2); * * memcpy(vaddr1, vaddr2, PAGE_SIZE); * * kunmap_atomic(vaddr2); * kunmap_atomic(vaddr1); */ static inline void *kmap_atomic(struct page *page); /* Highmem related interfaces for management code */ static inline unsigned long nr_free_highpages(void); static inline unsigned long totalhigh_pages(void); #ifndef ARCH_HAS_FLUSH_ANON_PAGE static inline void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr) { } #endif #ifndef ARCH_IMPLEMENTS_FLUSH_KERNEL_VMAP_RANGE static inline void flush_kernel_vmap_range(void *vaddr, int size) { } static inline void invalidate_kernel_vmap_range(void *vaddr, int size) { } #endif /* when CONFIG_HIGHMEM is not set these will be plain clear/copy_page */ #ifndef clear_user_highpage static inline void clear_user_highpage(struct page *page, unsigned long vaddr) { void *addr = kmap_local_page(page); clear_user_page(addr, vaddr, page); kunmap_local(addr); } #endif #ifndef vma_alloc_zeroed_movable_folio /** * vma_alloc_zeroed_movable_folio - Allocate a zeroed page for a VMA. * @vma: The VMA the page is to be allocated for. * @vaddr: The virtual address the page will be inserted into. * * This function will allocate a page suitable for inserting into this * VMA at this virtual address. It may be allocated from highmem or * the movable zone. An architecture may provide its own implementation. * * Return: A folio containing one allocated and zeroed page or NULL if * we are out of memory. */ static inline struct folio *vma_alloc_zeroed_movable_folio(struct vm_area_struct *vma, unsigned long vaddr) { struct folio *folio; folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma, vaddr); if (folio && user_alloc_needs_zeroing()) clear_user_highpage(&folio->page, vaddr); return folio; } #endif static inline void clear_highpage(struct page *page) { void *kaddr = kmap_local_page(page); clear_page(kaddr); kunmap_local(kaddr); } static inline void clear_highpage_kasan_tagged(struct page *page) { void *kaddr = kmap_local_page(page); clear_page(kasan_reset_tag(kaddr)); kunmap_local(kaddr); } #ifndef __HAVE_ARCH_TAG_CLEAR_HIGHPAGE static inline void tag_clear_highpage(struct page *page) { } #endif /* * If we pass in a base or tail page, we can zero up to PAGE_SIZE. * If we pass in a head page, we can zero up to the size of the compound page. */ #ifdef CONFIG_HIGHMEM void zero_user_segments(struct page *page, unsigned start1, unsigned end1, unsigned start2, unsigned end2); #else static inline void zero_user_segments(struct page *page, unsigned start1, unsigned end1, unsigned start2, unsigned end2) { void *kaddr = kmap_local_page(page); unsigned int i; BUG_ON(end1 > page_size(page) || end2 > page_size(page)); if (end1 > start1) memset(kaddr + start1, 0, end1 - start1); if (end2 > start2) memset(kaddr + start2, 0, end2 - start2); kunmap_local(kaddr); for (i = 0; i < compound_nr(page); i++) flush_dcache_page(page + i); } #endif static inline void zero_user_segment(struct page *page, unsigned start, unsigned end) { zero_user_segments(page, start, end, 0, 0); } static inline void zero_user(struct page *page, unsigned start, unsigned size) { zero_user_segments(page, start, start + size, 0, 0); } #ifndef __HAVE_ARCH_COPY_USER_HIGHPAGE static inline void copy_user_highpage(struct page *to, struct page *from, unsigned long vaddr, struct vm_area_struct *vma) { char *vfrom, *vto; vfrom = kmap_local_page(from); vto = kmap_local_page(to); copy_user_page(vto, vfrom, vaddr, to); kmsan_unpoison_memory(page_address(to), PAGE_SIZE); kunmap_local(vto); kunmap_local(vfrom); } #endif #ifndef __HAVE_ARCH_COPY_HIGHPAGE static inline void copy_highpage(struct page *to, struct page *from) { char *vfrom, *vto; vfrom = kmap_local_page(from); vto = kmap_local_page(to); copy_page(vto, vfrom); kmsan_copy_page_meta(to, from); kunmap_local(vto); kunmap_local(vfrom); } #endif #ifdef copy_mc_to_kernel /* * If architecture supports machine check exception handling, define the * #MC versions of copy_user_highpage and copy_highpage. They copy a memory * page with #MC in source page (@from) handled, and return the number * of bytes not copied if there was a #MC, otherwise 0 for success. */ static inline int copy_mc_user_highpage(struct page *to, struct page *from, unsigned long vaddr, struct vm_area_struct *vma) { unsigned long ret; char *vfrom, *vto; vfrom = kmap_local_page(from); vto = kmap_local_page(to); ret = copy_mc_to_kernel(vto, vfrom, PAGE_SIZE); if (!ret) kmsan_unpoison_memory(page_address(to), PAGE_SIZE); kunmap_local(vto); kunmap_local(vfrom); if (ret) memory_failure_queue(page_to_pfn(from), 0); return ret; } static inline int copy_mc_highpage(struct page *to, struct page *from) { unsigned long ret; char *vfrom, *vto; vfrom = kmap_local_page(from); vto = kmap_local_page(to); ret = copy_mc_to_kernel(vto, vfrom, PAGE_SIZE); if (!ret) kmsan_copy_page_meta(to, from); kunmap_local(vto); kunmap_local(vfrom); if (ret) memory_failure_queue(page_to_pfn(from), 0); return ret; } #else static inline int copy_mc_user_highpage(struct page *to, struct page *from, unsigned long vaddr, struct vm_area_struct *vma) { copy_user_highpage(to, from, vaddr, vma); return 0; } static inline int copy_mc_highpage(struct page *to, struct page *from) { copy_highpage(to, from); return 0; } #endif static inline void memcpy_page(struct page *dst_page, size_t dst_off, struct page *src_page, size_t src_off, size_t len) { char *dst = kmap_local_page(dst_page); char *src = kmap_local_page(src_page); VM_BUG_ON(dst_off + len > PAGE_SIZE || src_off + len > PAGE_SIZE); memcpy(dst + dst_off, src + src_off, len); kunmap_local(src); kunmap_local(dst); } static inline void memset_page(struct page *page, size_t offset, int val, size_t len) { char *addr = kmap_local_page(page); VM_BUG_ON(offset + len > PAGE_SIZE); memset(addr + offset, val, len); kunmap_local(addr); } static inline void memcpy_from_page(char *to, struct page *page, size_t offset, size_t len) { char *from = kmap_local_page(page); VM_BUG_ON(offset + len > PAGE_SIZE); memcpy(to, from + offset, len); kunmap_local(from); } static inline void memcpy_to_page(struct page *page, size_t offset, const char *from, size_t len) { char *to = kmap_local_page(page); VM_BUG_ON(offset + len > PAGE_SIZE); memcpy(to + offset, from, len); flush_dcache_page(page); kunmap_local(to); } static inline void memzero_page(struct page *page, size_t offset, size_t len) { char *addr = kmap_local_page(page); VM_BUG_ON(offset + len > PAGE_SIZE); memset(addr + offset, 0, len); flush_dcache_page(page); kunmap_local(addr); } /** * memcpy_from_folio - Copy a range of bytes from a folio. * @to: The memory to copy to. * @folio: The folio to read from. * @offset: The first byte in the folio to read. * @len: The number of bytes to copy. */ static inline void memcpy_from_folio(char *to, struct folio *folio, size_t offset, size_t len) { VM_BUG_ON(offset + len > folio_size(folio)); do { const char *from = kmap_local_folio(folio, offset); size_t chunk = len; if (folio_test_highmem(folio) && chunk > PAGE_SIZE - offset_in_page(offset)) chunk = PAGE_SIZE - offset_in_page(offset); memcpy(to, from, chunk); kunmap_local(from); to += chunk; offset += chunk; len -= chunk; } while (len > 0); } /** * memcpy_to_folio - Copy a range of bytes to a folio. * @folio: The folio to write to. * @offset: The first byte in the folio to store to. * @from: The memory to copy from. * @len: The number of bytes to copy. */ static inline void memcpy_to_folio(struct folio *folio, size_t offset, const char *from, size_t len) { VM_BUG_ON(offset + len > folio_size(folio)); do { char *to = kmap_local_folio(folio, offset); size_t chunk = len; if (folio_test_highmem(folio) && chunk > PAGE_SIZE - offset_in_page(offset)) chunk = PAGE_SIZE - offset_in_page(offset); memcpy(to, from, chunk); kunmap_local(to); from += chunk; offset += chunk; len -= chunk; } while (len > 0); flush_dcache_folio(folio); } /** * folio_zero_tail - Zero the tail of a folio. * @folio: The folio to zero. * @offset: The byte offset in the folio to start zeroing at. * @kaddr: The address the folio is currently mapped to. * * If you have already used kmap_local_folio() to map a folio, written * some data to it and now need to zero the end of the folio (and flush * the dcache), you can use this function. If you do not have the * folio kmapped (eg the folio has been partially populated by DMA), * use folio_zero_range() or folio_zero_segment() instead. * * Return: An address which can be passed to kunmap_local(). */ static inline __must_check void *folio_zero_tail(struct folio *folio, size_t offset, void *kaddr) { size_t len = folio_size(folio) - offset; if (folio_test_highmem(folio)) { size_t max = PAGE_SIZE - offset_in_page(offset); while (len > max) { memset(kaddr, 0, max); kunmap_local(kaddr); len -= max; offset += max; max = PAGE_SIZE; kaddr = kmap_local_folio(folio, offset); } } memset(kaddr, 0, len); flush_dcache_folio(folio); return kaddr; } /** * folio_fill_tail - Copy some data to a folio and pad with zeroes. * @folio: The destination folio. * @offset: The offset into @folio at which to start copying. * @from: The data to copy. * @len: How many bytes of data to copy. * * This function is most useful for filesystems which support inline data. * When they want to copy data from the inode into the page cache, this * function does everything for them. It supports large folios even on * HIGHMEM configurations. */ static inline void folio_fill_tail(struct folio *folio, size_t offset, const char *from, size_t len) { char *to = kmap_local_folio(folio, offset); VM_BUG_ON(offset + len > folio_size(folio)); if (folio_test_highmem(folio)) { size_t max = PAGE_SIZE - offset_in_page(offset); while (len > max) { memcpy(to, from, max); kunmap_local(to); len -= max; from += max; offset += max; max = PAGE_SIZE; to = kmap_local_folio(folio, offset); } } memcpy(to, from, len); to = folio_zero_tail(folio, offset + len, to + len); kunmap_local(to); } /** * memcpy_from_file_folio - Copy some bytes from a file folio. * @to: The destination buffer. * @folio: The folio to copy from. * @pos: The position in the file. * @len: The maximum number of bytes to copy. * * Copy up to @len bytes from this folio. This may be limited by PAGE_SIZE * if the folio comes from HIGHMEM, and by the size of the folio. * * Return: The number of bytes copied from the folio. */ static inline size_t memcpy_from_file_folio(char *to, struct folio *folio, loff_t pos, size_t len) { size_t offset = offset_in_folio(folio, pos); char *from = kmap_local_folio(folio, offset); if (folio_test_highmem(folio)) { offset = offset_in_page(offset); len = min_t(size_t, len, PAGE_SIZE - offset); } else len = min(len, folio_size(folio) - offset); memcpy(to, from, len); kunmap_local(from); return len; } /** * folio_zero_segments() - Zero two byte ranges in a folio. * @folio: The folio to write to. * @start1: The first byte to zero. * @xend1: One more than the last byte in the first range. * @start2: The first byte to zero in the second range. * @xend2: One more than the last byte in the second range. */ static inline void folio_zero_segments(struct folio *folio, size_t start1, size_t xend1, size_t start2, size_t xend2) { zero_user_segments(&folio->page, start1, xend1, start2, xend2); } /** * folio_zero_segment() - Zero a byte range in a folio. * @folio: The folio to write to. * @start: The first byte to zero. * @xend: One more than the last byte to zero. */ static inline void folio_zero_segment(struct folio *folio, size_t start, size_t xend) { zero_user_segments(&folio->page, start, xend, 0, 0); } /** * folio_zero_range() - Zero a byte range in a folio. * @folio: The folio to write to. * @start: The first byte to zero. * @length: The number of bytes to zero. */ static inline void folio_zero_range(struct folio *folio, size_t start, size_t length) { zero_user_segments(&folio->page, start, start + length, 0, 0); } /** * folio_release_kmap - Unmap a folio and drop a refcount. * @folio: The folio to release. * @addr: The address previously returned by a call to kmap_local_folio(). * * It is common, eg in directory handling to kmap a folio. This function * unmaps the folio and drops the refcount that was being held to keep the * folio alive while we accessed it. */ static inline void folio_release_kmap(struct folio *folio, void *addr) { kunmap_local(addr); folio_put(folio); } static inline void unmap_and_put_page(struct page *page, void *addr) { folio_release_kmap(page_folio(page), addr); } #endif /* _LINUX_HIGHMEM_H */
5 1 3 9 9 2 9 1 3 4 3 2 2 2 5 8 1 4 3 2 1 1 1 1 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 // SPDX-License-Identifier: GPL-2.0-only /* fs/fat/nfs.c */ #include <linux/exportfs.h> #include "fat.h" struct fat_fid { u32 i_gen; u32 i_pos_low; u16 i_pos_hi; u16 parent_i_pos_hi; u32 parent_i_pos_low; u32 parent_i_gen; }; #define FAT_FID_SIZE_WITHOUT_PARENT 3 #define FAT_FID_SIZE_WITH_PARENT (sizeof(struct fat_fid)/sizeof(u32)) /* * Look up a directory inode given its starting cluster. */ static struct inode *fat_dget(struct super_block *sb, int i_logstart) { struct msdos_sb_info *sbi = MSDOS_SB(sb); struct hlist_head *head; struct msdos_inode_info *i; struct inode *inode = NULL; head = sbi->dir_hashtable + fat_dir_hash(i_logstart); spin_lock(&sbi->dir_hash_lock); hlist_for_each_entry(i, head, i_dir_hash) { BUG_ON(i->vfs_inode.i_sb != sb); if (i->i_logstart != i_logstart) continue; inode = igrab(&i->vfs_inode); if (inode) break; } spin_unlock(&sbi->dir_hash_lock); return inode; } static struct inode *fat_ilookup(struct super_block *sb, u64 ino, loff_t i_pos) { if (MSDOS_SB(sb)->options.nfs == FAT_NFS_NOSTALE_RO) return fat_iget(sb, i_pos); else { if ((ino < MSDOS_ROOT_INO) || (ino == MSDOS_FSINFO_INO)) return NULL; return ilookup(sb, ino); } } static struct inode *__fat_nfs_get_inode(struct super_block *sb, u64 ino, u32 generation, loff_t i_pos) { struct inode *inode = fat_ilookup(sb, ino, i_pos); if (inode && generation && (inode->i_generation != generation)) { iput(inode); inode = NULL; } if (inode == NULL && MSDOS_SB(sb)->options.nfs == FAT_NFS_NOSTALE_RO) { struct buffer_head *bh = NULL; struct msdos_dir_entry *de ; sector_t blocknr; int offset; fat_get_blknr_offset(MSDOS_SB(sb), i_pos, &blocknr, &offset); bh = sb_bread(sb, blocknr); if (!bh) { fat_msg(sb, KERN_ERR, "unable to read block(%llu) for building NFS inode", (llu)blocknr); return inode; } de = (struct msdos_dir_entry *)bh->b_data; /* If a file is deleted on server and client is not updated * yet, we must not build the inode upon a lookup call. */ if (IS_FREE(de[offset].name)) inode = NULL; else inode = fat_build_inode(sb, &de[offset], i_pos); brelse(bh); } return inode; } static struct inode *fat_nfs_get_inode(struct super_block *sb, u64 ino, u32 generation) { return __fat_nfs_get_inode(sb, ino, generation, 0); } static int fat_encode_fh_nostale(struct inode *inode, __u32 *fh, int *lenp, struct inode *parent) { int len = *lenp; struct msdos_sb_info *sbi = MSDOS_SB(inode->i_sb); struct fat_fid *fid = (struct fat_fid *) fh; loff_t i_pos; int type = FILEID_FAT_WITHOUT_PARENT; if (parent) { if (len < FAT_FID_SIZE_WITH_PARENT) { *lenp = FAT_FID_SIZE_WITH_PARENT; return FILEID_INVALID; } } else { if (len < FAT_FID_SIZE_WITHOUT_PARENT) { *lenp = FAT_FID_SIZE_WITHOUT_PARENT; return FILEID_INVALID; } } i_pos = fat_i_pos_read(sbi, inode); *lenp = FAT_FID_SIZE_WITHOUT_PARENT; fid->i_gen = inode->i_generation; fid->i_pos_low = i_pos & 0xFFFFFFFF; fid->i_pos_hi = (i_pos >> 32) & 0xFFFF; if (parent) { i_pos = fat_i_pos_read(sbi, parent); fid->parent_i_pos_hi = (i_pos >> 32) & 0xFFFF; fid->parent_i_pos_low = i_pos & 0xFFFFFFFF; fid->parent_i_gen = parent->i_generation; type = FILEID_FAT_WITH_PARENT; *lenp = FAT_FID_SIZE_WITH_PARENT; } else { /* * We need to initialize this field because the fh is actually * 12 bytes long */ fid->parent_i_pos_hi = 0; } return type; } /* * Map a NFS file handle to a corresponding dentry. * The dentry may or may not be connected to the filesystem root. */ static struct dentry *fat_fh_to_dentry(struct super_block *sb, struct fid *fid, int fh_len, int fh_type) { return generic_fh_to_dentry(sb, fid, fh_len, fh_type, fat_nfs_get_inode); } static struct dentry *fat_fh_to_dentry_nostale(struct super_block *sb, struct fid *fh, int fh_len, int fh_type) { struct inode *inode = NULL; struct fat_fid *fid = (struct fat_fid *)fh; loff_t i_pos; switch (fh_type) { case FILEID_FAT_WITHOUT_PARENT: if (fh_len < FAT_FID_SIZE_WITHOUT_PARENT) return NULL; break; case FILEID_FAT_WITH_PARENT: if (fh_len < FAT_FID_SIZE_WITH_PARENT) return NULL; break; default: return NULL; } i_pos = fid->i_pos_hi; i_pos = (i_pos << 32) | (fid->i_pos_low); inode = __fat_nfs_get_inode(sb, 0, fid->i_gen, i_pos); return d_obtain_alias(inode); } /* * Find the parent for a file specified by NFS handle. * This requires that the handle contain the i_ino of the parent. */ static struct dentry *fat_fh_to_parent(struct super_block *sb, struct fid *fid, int fh_len, int fh_type) { return generic_fh_to_parent(sb, fid, fh_len, fh_type, fat_nfs_get_inode); } static struct dentry *fat_fh_to_parent_nostale(struct super_block *sb, struct fid *fh, int fh_len, int fh_type) { struct inode *inode = NULL; struct fat_fid *fid = (struct fat_fid *)fh; loff_t i_pos; if (fh_len < FAT_FID_SIZE_WITH_PARENT) return NULL; switch (fh_type) { case FILEID_FAT_WITH_PARENT: i_pos = fid->parent_i_pos_hi; i_pos = (i_pos << 32) | (fid->parent_i_pos_low); inode = __fat_nfs_get_inode(sb, 0, fid->parent_i_gen, i_pos); break; } return d_obtain_alias(inode); } /* * Rebuild the parent for a directory that is not connected * to the filesystem root */ static struct inode *fat_rebuild_parent(struct super_block *sb, int parent_logstart) { int search_clus, clus_to_match; struct msdos_dir_entry *de; struct inode *parent = NULL; struct inode *dummy_grand_parent = NULL; struct fat_slot_info sinfo; struct msdos_sb_info *sbi = MSDOS_SB(sb); sector_t blknr = fat_clus_to_blknr(sbi, parent_logstart); struct buffer_head *parent_bh = sb_bread(sb, blknr); if (!parent_bh) { fat_msg(sb, KERN_ERR, "unable to read cluster of parent directory"); return NULL; } de = (struct msdos_dir_entry *) parent_bh->b_data; clus_to_match = fat_get_start(sbi, &de[0]); search_clus = fat_get_start(sbi, &de[1]); dummy_grand_parent = fat_dget(sb, search_clus); if (!dummy_grand_parent) { dummy_grand_parent = new_inode(sb); if (!dummy_grand_parent) { brelse(parent_bh); return parent; } dummy_grand_parent->i_ino = iunique(sb, MSDOS_ROOT_INO); fat_fill_inode(dummy_grand_parent, &de[1]); MSDOS_I(dummy_grand_parent)->i_pos = -1; } if (!fat_scan_logstart(dummy_grand_parent, clus_to_match, &sinfo)) parent = fat_build_inode(sb, sinfo.de, sinfo.i_pos); brelse(parent_bh); iput(dummy_grand_parent); return parent; } /* * Find the parent for a directory that is not currently connected to * the filesystem root. * * On entry, the caller holds d_inode(child_dir)->i_mutex. */ static struct dentry *fat_get_parent(struct dentry *child_dir) { struct super_block *sb = child_dir->d_sb; struct buffer_head *bh = NULL; struct msdos_dir_entry *de; struct inode *parent_inode = NULL; struct msdos_sb_info *sbi = MSDOS_SB(sb); if (!fat_get_dotdot_entry(d_inode(child_dir), &bh, &de)) { int parent_logstart = fat_get_start(sbi, de); parent_inode = fat_dget(sb, parent_logstart); if (!parent_inode && sbi->options.nfs == FAT_NFS_NOSTALE_RO) parent_inode = fat_rebuild_parent(sb, parent_logstart); } brelse(bh); return d_obtain_alias(parent_inode); } const struct export_operations fat_export_ops = { .encode_fh = generic_encode_ino32_fh, .fh_to_dentry = fat_fh_to_dentry, .fh_to_parent = fat_fh_to_parent, .get_parent = fat_get_parent, }; const struct export_operations fat_export_ops_nostale = { .encode_fh = fat_encode_fh_nostale, .fh_to_dentry = fat_fh_to_dentry_nostale, .fh_to_parent = fat_fh_to_parent_nostale, .get_parent = fat_get_parent, };
5 4 3 2 1 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __NET_SCHED_RED_H #define __NET_SCHED_RED_H #include <linux/types.h> #include <linux/bug.h> #include <net/pkt_sched.h> #include <net/inet_ecn.h> #include <net/dsfield.h> #include <linux/reciprocal_div.h> /* Random Early Detection (RED) algorithm. ======================================= Source: Sally Floyd and Van Jacobson, "Random Early Detection Gateways for Congestion Avoidance", 1993, IEEE/ACM Transactions on Networking. This file codes a "divisionless" version of RED algorithm as written down in Fig.17 of the paper. Short description. ------------------ When a new packet arrives we calculate the average queue length: avg = (1-W)*avg + W*current_queue_len, W is the filter time constant (chosen as 2^(-Wlog)), it controls the inertia of the algorithm. To allow larger bursts, W should be decreased. if (avg > th_max) -> packet marked (dropped). if (avg < th_min) -> packet passes. if (th_min < avg < th_max) we calculate probability: Pb = max_P * (avg - th_min)/(th_max-th_min) and mark (drop) packet with this probability. Pb changes from 0 (at avg==th_min) to max_P (avg==th_max). max_P should be small (not 1), usually 0.01..0.02 is good value. max_P is chosen as a number, so that max_P/(th_max-th_min) is a negative power of two in order arithmetic to contain only shifts. Parameters, settable by user: ----------------------------- qth_min - bytes (should be < qth_max/2) qth_max - bytes (should be at least 2*qth_min and less limit) Wlog - bits (<32) log(1/W). Plog - bits (<32) Plog is related to max_P by formula: max_P = (qth_max-qth_min)/2^Plog; F.e. if qth_max=128K and qth_min=32K, then Plog=22 corresponds to max_P=0.02 Scell_log Stab Lookup table for log((1-W)^(t/t_ave). NOTES: Upper bound on W. ----------------- If you want to allow bursts of L packets of size S, you should choose W: L + 1 - th_min/S < (1-(1-W)^L)/W th_min/S = 32 th_min/S = 4 log(W) L -1 33 -2 35 -3 39 -4 46 -5 57 -6 75 -7 101 -8 135 -9 190 etc. */ /* * Adaptative RED : An Algorithm for Increasing the Robustness of RED's AQM * (Sally FLoyd, Ramakrishna Gummadi, and Scott Shenker) August 2001 * * Every 500 ms: * if (avg > target and max_p <= 0.5) * increase max_p : max_p += alpha; * else if (avg < target and max_p >= 0.01) * decrease max_p : max_p *= beta; * * target :[qth_min + 0.4*(qth_min - qth_max), * qth_min + 0.6*(qth_min - qth_max)]. * alpha : min(0.01, max_p / 4) * beta : 0.9 * max_P is a Q0.32 fixed point number (with 32 bits mantissa) * max_P between 0.01 and 0.5 (1% - 50%) [ Its no longer a negative power of two ] */ #define RED_ONE_PERCENT ((u32)DIV_ROUND_CLOSEST(1ULL<<32, 100)) #define MAX_P_MIN (1 * RED_ONE_PERCENT) #define MAX_P_MAX (50 * RED_ONE_PERCENT) #define MAX_P_ALPHA(val) min(MAX_P_MIN, val / 4) #define RED_STAB_SIZE 256 #define RED_STAB_MASK (RED_STAB_SIZE - 1) struct red_stats { u32 prob_drop; /* Early probability drops */ u32 prob_mark; /* Early probability marks */ u32 forced_drop; /* Forced drops, qavg > max_thresh */ u32 forced_mark; /* Forced marks, qavg > max_thresh */ u32 pdrop; /* Drops due to queue limits */ }; struct red_parms { /* Parameters */ u32 qth_min; /* Min avg length threshold: Wlog scaled */ u32 qth_max; /* Max avg length threshold: Wlog scaled */ u32 Scell_max; u32 max_P; /* probability, [0 .. 1.0] 32 scaled */ /* reciprocal_value(max_P / qth_delta) */ struct reciprocal_value max_P_reciprocal; u32 qth_delta; /* max_th - min_th */ u32 target_min; /* min_th + 0.4*(max_th - min_th) */ u32 target_max; /* min_th + 0.6*(max_th - min_th) */ u8 Scell_log; u8 Wlog; /* log(W) */ u8 Plog; /* random number bits */ u8 Stab[RED_STAB_SIZE]; }; struct red_vars { /* Variables */ int qcount; /* Number of packets since last random number generation */ u32 qR; /* Cached random number */ unsigned long qavg; /* Average queue length: Wlog scaled */ ktime_t qidlestart; /* Start of current idle period */ }; static inline u32 red_maxp(u8 Plog) { return Plog < 32 ? (~0U >> Plog) : ~0U; } static inline void red_set_vars(struct red_vars *v) { /* Reset average queue length, the value is strictly bound * to the parameters below, resetting hurts a bit but leaving * it might result in an unreasonable qavg for a while. --TGR */ v->qavg = 0; v->qcount = -1; } static inline bool red_check_params(u32 qth_min, u32 qth_max, u8 Wlog, u8 Scell_log, u8 *stab) { if (fls(qth_min) + Wlog >= 32) return false; if (fls(qth_max) + Wlog >= 32) return false; if (Scell_log >= 32) return false; if (qth_max < qth_min) return false; if (stab) { int i; for (i = 0; i < RED_STAB_SIZE; i++) if (stab[i] >= 32) return false; } return true; } static inline int red_get_flags(unsigned char qopt_flags, unsigned char historic_mask, struct nlattr *flags_attr, unsigned char supported_mask, struct nla_bitfield32 *p_flags, unsigned char *p_userbits, struct netlink_ext_ack *extack) { struct nla_bitfield32 flags; if (qopt_flags && flags_attr) { NL_SET_ERR_MSG_MOD(extack, "flags should be passed either through qopt, or through a dedicated attribute"); return -EINVAL; } if (flags_attr) { flags = nla_get_bitfield32(flags_attr); } else { flags.selector = historic_mask; flags.value = qopt_flags & historic_mask; } *p_flags = flags; *p_userbits = qopt_flags & ~historic_mask; return 0; } static inline int red_validate_flags(unsigned char flags, struct netlink_ext_ack *extack) { if ((flags & TC_RED_NODROP) && !(flags & TC_RED_ECN)) { NL_SET_ERR_MSG_MOD(extack, "nodrop mode is only meaningful with ECN"); return -EINVAL; } return 0; } static inline void red_set_parms(struct red_parms *p, u32 qth_min, u32 qth_max, u8 Wlog, u8 Plog, u8 Scell_log, u8 *stab, u32 max_P) { int delta = qth_max - qth_min; u32 max_p_delta; WRITE_ONCE(p->qth_min, qth_min << Wlog); WRITE_ONCE(p->qth_max, qth_max << Wlog); WRITE_ONCE(p->Wlog, Wlog); WRITE_ONCE(p->Plog, Plog); if (delta <= 0) delta = 1; p->qth_delta = delta; if (!max_P) { max_P = red_maxp(Plog); max_P *= delta; /* max_P = (qth_max - qth_min)/2^Plog */ } WRITE_ONCE(p->max_P, max_P); max_p_delta = max_P / delta; max_p_delta = max(max_p_delta, 1U); p->max_P_reciprocal = reciprocal_value(max_p_delta); /* RED Adaptative target : * [min_th + 0.4*(min_th - max_th), * min_th + 0.6*(min_th - max_th)]. */ delta /= 5; p->target_min = qth_min + 2*delta; p->target_max = qth_min + 3*delta; WRITE_ONCE(p->Scell_log, Scell_log); p->Scell_max = (255 << Scell_log); if (stab) memcpy(p->Stab, stab, sizeof(p->Stab)); } static inline int red_is_idling(const struct red_vars *v) { return v->qidlestart != 0; } static inline void red_start_of_idle_period(struct red_vars *v) { v->qidlestart = ktime_get(); } static inline void red_end_of_idle_period(struct red_vars *v) { v->qidlestart = 0; } static inline void red_restart(struct red_vars *v) { red_end_of_idle_period(v); v->qavg = 0; v->qcount = -1; } static inline unsigned long red_calc_qavg_from_idle_time(const struct red_parms *p, const struct red_vars *v) { s64 delta = ktime_us_delta(ktime_get(), v->qidlestart); long us_idle = min_t(s64, delta, p->Scell_max); int shift; /* * The problem: ideally, average length queue recalculation should * be done over constant clock intervals. This is too expensive, so * that the calculation is driven by outgoing packets. * When the queue is idle we have to model this clock by hand. * * SF+VJ proposed to "generate": * * m = idletime / (average_pkt_size / bandwidth) * * dummy packets as a burst after idle time, i.e. * * v->qavg *= (1-W)^m * * This is an apparently overcomplicated solution (f.e. we have to * precompute a table to make this calculation in reasonable time) * I believe that a simpler model may be used here, * but it is field for experiments. */ shift = p->Stab[(us_idle >> p->Scell_log) & RED_STAB_MASK]; if (shift) return v->qavg >> shift; else { /* Approximate initial part of exponent with linear function: * * (1-W)^m ~= 1-mW + ... * * Seems, it is the best solution to * problem of too coarse exponent tabulation. */ us_idle = (v->qavg * (u64)us_idle) >> p->Scell_log; if (us_idle < (v->qavg >> 1)) return v->qavg - us_idle; else return v->qavg >> 1; } } static inline unsigned long red_calc_qavg_no_idle_time(const struct red_parms *p, const struct red_vars *v, unsigned int backlog) { /* * NOTE: v->qavg is fixed point number with point at Wlog. * The formula below is equivalent to floating point * version: * * qavg = qavg*(1-W) + backlog*W; * * --ANK (980924) */ return v->qavg + (backlog - (v->qavg >> p->Wlog)); } static inline unsigned long red_calc_qavg(const struct red_parms *p, const struct red_vars *v, unsigned int backlog) { if (!red_is_idling(v)) return red_calc_qavg_no_idle_time(p, v, backlog); else return red_calc_qavg_from_idle_time(p, v); } static inline u32 red_random(const struct red_parms *p) { return reciprocal_divide(get_random_u32(), p->max_P_reciprocal); } static inline int red_mark_probability(const struct red_parms *p, const struct red_vars *v, unsigned long qavg) { /* The formula used below causes questions. OK. qR is random number in the interval (0..1/max_P)*(qth_max-qth_min) i.e. 0..(2^Plog). If we used floating point arithmetic, it would be: (2^Plog)*rnd_num, where rnd_num is less 1. Taking into account, that qavg have fixed point at Wlog, two lines below have the following floating point equivalent: max_P*(qavg - qth_min)/(qth_max-qth_min) < rnd/qcount Any questions? --ANK (980924) */ return !(((qavg - p->qth_min) >> p->Wlog) * v->qcount < v->qR); } enum { RED_BELOW_MIN_THRESH, RED_BETWEEN_TRESH, RED_ABOVE_MAX_TRESH, }; static inline int red_cmp_thresh(const struct red_parms *p, unsigned long qavg) { if (qavg < p->qth_min) return RED_BELOW_MIN_THRESH; else if (qavg >= p->qth_max) return RED_ABOVE_MAX_TRESH; else return RED_BETWEEN_TRESH; } enum { RED_DONT_MARK, RED_PROB_MARK, RED_HARD_MARK, }; static inline int red_action(const struct red_parms *p, struct red_vars *v, unsigned long qavg) { switch (red_cmp_thresh(p, qavg)) { case RED_BELOW_MIN_THRESH: v->qcount = -1; return RED_DONT_MARK; case RED_BETWEEN_TRESH: if (++v->qcount) { if (red_mark_probability(p, v, qavg)) { v->qcount = 0; v->qR = red_random(p); return RED_PROB_MARK; } } else v->qR = red_random(p); return RED_DONT_MARK; case RED_ABOVE_MAX_TRESH: v->qcount = -1; return RED_HARD_MARK; } BUG(); return RED_DONT_MARK; } static inline void red_adaptative_algo(struct red_parms *p, struct red_vars *v) { unsigned long qavg; u32 max_p_delta; qavg = v->qavg; if (red_is_idling(v)) qavg = red_calc_qavg_from_idle_time(p, v); /* v->qavg is fixed point number with point at Wlog */ qavg >>= p->Wlog; if (qavg > p->target_max && p->max_P <= MAX_P_MAX) p->max_P += MAX_P_ALPHA(p->max_P); /* maxp = maxp + alpha */ else if (qavg < p->target_min && p->max_P >= MAX_P_MIN) p->max_P = (p->max_P/10)*9; /* maxp = maxp * Beta */ max_p_delta = DIV_ROUND_CLOSEST(p->max_P, p->qth_delta); max_p_delta = max(max_p_delta, 1U); p->max_P_reciprocal = reciprocal_value(max_p_delta); } #endif
3162 3158 3161 3162 6 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 // SPDX-License-Identifier: GPL-2.0-or-later /* * The "hash function" used as the core of the ChaCha stream cipher (RFC7539) * * Copyright (C) 2015 Martin Willi */ #include <linux/bug.h> #include <linux/kernel.h> #include <linux/export.h> #include <linux/bitops.h> #include <linux/string.h> #include <linux/unaligned.h> #include <crypto/chacha.h> static void chacha_permute(u32 *x, int nrounds) { int i; /* whitelist the allowed round counts */ WARN_ON_ONCE(nrounds != 20 && nrounds != 12); for (i = 0; i < nrounds; i += 2) { x[0] += x[4]; x[12] = rol32(x[12] ^ x[0], 16); x[1] += x[5]; x[13] = rol32(x[13] ^ x[1], 16); x[2] += x[6]; x[14] = rol32(x[14] ^ x[2], 16); x[3] += x[7]; x[15] = rol32(x[15] ^ x[3], 16); x[8] += x[12]; x[4] = rol32(x[4] ^ x[8], 12); x[9] += x[13]; x[5] = rol32(x[5] ^ x[9], 12); x[10] += x[14]; x[6] = rol32(x[6] ^ x[10], 12); x[11] += x[15]; x[7] = rol32(x[7] ^ x[11], 12); x[0] += x[4]; x[12] = rol32(x[12] ^ x[0], 8); x[1] += x[5]; x[13] = rol32(x[13] ^ x[1], 8); x[2] += x[6]; x[14] = rol32(x[14] ^ x[2], 8); x[3] += x[7]; x[15] = rol32(x[15] ^ x[3], 8); x[8] += x[12]; x[4] = rol32(x[4] ^ x[8], 7); x[9] += x[13]; x[5] = rol32(x[5] ^ x[9], 7); x[10] += x[14]; x[6] = rol32(x[6] ^ x[10], 7); x[11] += x[15]; x[7] = rol32(x[7] ^ x[11], 7); x[0] += x[5]; x[15] = rol32(x[15] ^ x[0], 16); x[1] += x[6]; x[12] = rol32(x[12] ^ x[1], 16); x[2] += x[7]; x[13] = rol32(x[13] ^ x[2], 16); x[3] += x[4]; x[14] = rol32(x[14] ^ x[3], 16); x[10] += x[15]; x[5] = rol32(x[5] ^ x[10], 12); x[11] += x[12]; x[6] = rol32(x[6] ^ x[11], 12); x[8] += x[13]; x[7] = rol32(x[7] ^ x[8], 12); x[9] += x[14]; x[4] = rol32(x[4] ^ x[9], 12); x[0] += x[5]; x[15] = rol32(x[15] ^ x[0], 8); x[1] += x[6]; x[12] = rol32(x[12] ^ x[1], 8); x[2] += x[7]; x[13] = rol32(x[13] ^ x[2], 8); x[3] += x[4]; x[14] = rol32(x[14] ^ x[3], 8); x[10] += x[15]; x[5] = rol32(x[5] ^ x[10], 7); x[11] += x[12]; x[6] = rol32(x[6] ^ x[11], 7); x[8] += x[13]; x[7] = rol32(x[7] ^ x[8], 7); x[9] += x[14]; x[4] = rol32(x[4] ^ x[9], 7); } } /** * chacha_block_generic - generate one keystream block and increment block counter * @state: input state matrix (16 32-bit words) * @stream: output keystream block (64 bytes) * @nrounds: number of rounds (20 or 12; 20 is recommended) * * This is the ChaCha core, a function from 64-byte strings to 64-byte strings. * The caller has already converted the endianness of the input. This function * also handles incrementing the block counter in the input matrix. */ void chacha_block_generic(u32 *state, u8 *stream, int nrounds) { u32 x[16]; int i; memcpy(x, state, 64); chacha_permute(x, nrounds); for (i = 0; i < ARRAY_SIZE(x); i++) put_unaligned_le32(x[i] + state[i], &stream[i * sizeof(u32)]); state[12]++; } EXPORT_SYMBOL(chacha_block_generic); /** * hchacha_block_generic - abbreviated ChaCha core, for XChaCha * @state: input state matrix (16 32-bit words) * @stream: output (8 32-bit words) * @nrounds: number of rounds (20 or 12; 20 is recommended) * * HChaCha is the ChaCha equivalent of HSalsa and is an intermediate step * towards XChaCha (see https://cr.yp.to/snuffle/xsalsa-20081128.pdf). HChaCha * skips the final addition of the initial state, and outputs only certain words * of the state. It should not be used for streaming directly. */ void hchacha_block_generic(const u32 *state, u32 *stream, int nrounds) { u32 x[16]; memcpy(x, state, 64); chacha_permute(x, nrounds); memcpy(&stream[0], &x[0], 16); memcpy(&stream[4], &x[12], 16); } EXPORT_SYMBOL(hchacha_block_generic);
1044 1043 402 1046 1046 17297 17303 14881 6528 6522 15840 15828 14687 2771 839 2159 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 // SPDX-License-Identifier: GPL-2.0-or-later #define pr_fmt(fmt) "ref_tracker: " fmt #include <linux/export.h> #include <linux/list_sort.h> #include <linux/ref_tracker.h> #include <linux/slab.h> #include <linux/stacktrace.h> #include <linux/stackdepot.h> #define REF_TRACKER_STACK_ENTRIES 16 #define STACK_BUF_SIZE 1024 struct ref_tracker { struct list_head head; /* anchor into dir->list or dir->quarantine */ bool dead; depot_stack_handle_t alloc_stack_handle; depot_stack_handle_t free_stack_handle; }; struct ref_tracker_dir_stats { int total; int count; struct { depot_stack_handle_t stack_handle; unsigned int count; } stacks[]; }; static struct ref_tracker_dir_stats * ref_tracker_get_stats(struct ref_tracker_dir *dir, unsigned int limit) { struct ref_tracker_dir_stats *stats; struct ref_tracker *tracker; stats = kmalloc(struct_size(stats, stacks, limit), GFP_NOWAIT | __GFP_NOWARN); if (!stats) return ERR_PTR(-ENOMEM); stats->total = 0; stats->count = 0; list_for_each_entry(tracker, &dir->list, head) { depot_stack_handle_t stack = tracker->alloc_stack_handle; int i; ++stats->total; for (i = 0; i < stats->count; ++i) if (stats->stacks[i].stack_handle == stack) break; if (i >= limit) continue; if (i >= stats->count) { stats->stacks[i].stack_handle = stack; stats->stacks[i].count = 0; ++stats->count; } ++stats->stacks[i].count; } return stats; } struct ostream { char *buf; int size, used; }; #define pr_ostream(stream, fmt, args...) \ ({ \ struct ostream *_s = (stream); \ \ if (!_s->buf) { \ pr_err(fmt, ##args); \ } else { \ int ret, len = _s->size - _s->used; \ ret = snprintf(_s->buf + _s->used, len, pr_fmt(fmt), ##args); \ _s->used += min(ret, len); \ } \ }) static void __ref_tracker_dir_pr_ostream(struct ref_tracker_dir *dir, unsigned int display_limit, struct ostream *s) { struct ref_tracker_dir_stats *stats; unsigned int i = 0, skipped; depot_stack_handle_t stack; char *sbuf; lockdep_assert_held(&dir->lock); if (list_empty(&dir->list)) return; stats = ref_tracker_get_stats(dir, display_limit); if (IS_ERR(stats)) { pr_ostream(s, "%s@%pK: couldn't get stats, error %pe\n", dir->name, dir, stats); return; } sbuf = kmalloc(STACK_BUF_SIZE, GFP_NOWAIT | __GFP_NOWARN); for (i = 0, skipped = stats->total; i < stats->count; ++i) { stack = stats->stacks[i].stack_handle; if (sbuf && !stack_depot_snprint(stack, sbuf, STACK_BUF_SIZE, 4)) sbuf[0] = 0; pr_ostream(s, "%s@%pK has %d/%d users at\n%s\n", dir->name, dir, stats->stacks[i].count, stats->total, sbuf); skipped -= stats->stacks[i].count; } if (skipped) pr_ostream(s, "%s@%pK skipped reports about %d/%d users.\n", dir->name, dir, skipped, stats->total); kfree(sbuf); kfree(stats); } void ref_tracker_dir_print_locked(struct ref_tracker_dir *dir, unsigned int display_limit) { struct ostream os = {}; __ref_tracker_dir_pr_ostream(dir, display_limit, &os); } EXPORT_SYMBOL(ref_tracker_dir_print_locked); void ref_tracker_dir_print(struct ref_tracker_dir *dir, unsigned int display_limit) { unsigned long flags; spin_lock_irqsave(&dir->lock, flags); ref_tracker_dir_print_locked(dir, display_limit); spin_unlock_irqrestore(&dir->lock, flags); } EXPORT_SYMBOL(ref_tracker_dir_print); int ref_tracker_dir_snprint(struct ref_tracker_dir *dir, char *buf, size_t size) { struct ostream os = { .buf = buf, .size = size }; unsigned long flags; spin_lock_irqsave(&dir->lock, flags); __ref_tracker_dir_pr_ostream(dir, 16, &os); spin_unlock_irqrestore(&dir->lock, flags); return os.used; } EXPORT_SYMBOL(ref_tracker_dir_snprint); void ref_tracker_dir_exit(struct ref_tracker_dir *dir) { struct ref_tracker *tracker, *n; unsigned long flags; bool leak = false; dir->dead = true; spin_lock_irqsave(&dir->lock, flags); list_for_each_entry_safe(tracker, n, &dir->quarantine, head) { list_del(&tracker->head); kfree(tracker); dir->quarantine_avail++; } if (!list_empty(&dir->list)) { ref_tracker_dir_print_locked(dir, 16); leak = true; list_for_each_entry_safe(tracker, n, &dir->list, head) { list_del(&tracker->head); kfree(tracker); } } spin_unlock_irqrestore(&dir->lock, flags); WARN_ON_ONCE(leak); WARN_ON_ONCE(refcount_read(&dir->untracked) != 1); WARN_ON_ONCE(refcount_read(&dir->no_tracker) != 1); } EXPORT_SYMBOL(ref_tracker_dir_exit); int ref_tracker_alloc(struct ref_tracker_dir *dir, struct ref_tracker **trackerp, gfp_t gfp) { unsigned long entries[REF_TRACKER_STACK_ENTRIES]; struct ref_tracker *tracker; unsigned int nr_entries; gfp_t gfp_mask = gfp | __GFP_NOWARN; unsigned long flags; WARN_ON_ONCE(dir->dead); if (!trackerp) { refcount_inc(&dir->no_tracker); return 0; } if (gfp & __GFP_DIRECT_RECLAIM) gfp_mask |= __GFP_NOFAIL; *trackerp = tracker = kzalloc(sizeof(*tracker), gfp_mask); if (unlikely(!tracker)) { pr_err_once("memory allocation failure, unreliable refcount tracker.\n"); refcount_inc(&dir->untracked); return -ENOMEM; } nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 1); tracker->alloc_stack_handle = stack_depot_save(entries, nr_entries, gfp); spin_lock_irqsave(&dir->lock, flags); list_add(&tracker->head, &dir->list); spin_unlock_irqrestore(&dir->lock, flags); return 0; } EXPORT_SYMBOL_GPL(ref_tracker_alloc); int ref_tracker_free(struct ref_tracker_dir *dir, struct ref_tracker **trackerp) { unsigned long entries[REF_TRACKER_STACK_ENTRIES]; depot_stack_handle_t stack_handle; struct ref_tracker *tracker; unsigned int nr_entries; unsigned long flags; WARN_ON_ONCE(dir->dead); if (!trackerp) { refcount_dec(&dir->no_tracker); return 0; } tracker = *trackerp; if (!tracker) { refcount_dec(&dir->untracked); return -EEXIST; } nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 1); stack_handle = stack_depot_save(entries, nr_entries, GFP_NOWAIT | __GFP_NOWARN); spin_lock_irqsave(&dir->lock, flags); if (tracker->dead) { pr_err("reference already released.\n"); if (tracker->alloc_stack_handle) { pr_err("allocated in:\n"); stack_depot_print(tracker->alloc_stack_handle); } if (tracker->free_stack_handle) { pr_err("freed in:\n"); stack_depot_print(tracker->free_stack_handle); } spin_unlock_irqrestore(&dir->lock, flags); WARN_ON_ONCE(1); return -EINVAL; } tracker->dead = true; tracker->free_stack_handle = stack_handle; list_move_tail(&tracker->head, &dir->quarantine); if (!dir->quarantine_avail) { tracker = list_first_entry(&dir->quarantine, struct ref_tracker, head); list_del(&tracker->head); } else { dir->quarantine_avail--; tracker = NULL; } spin_unlock_irqrestore(&dir->lock, flags); kfree(tracker); return 0; } EXPORT_SYMBOL_GPL(ref_tracker_free);
542 82 543 21 4 2 3 547 477 3 4 473 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 // SPDX-License-Identifier: GPL-2.0-or-later /* Key permission checking * * Copyright (C) 2005 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) */ #include <linux/export.h> #include <linux/security.h> #include "internal.h" /** * key_task_permission - Check a key can be used * @key_ref: The key to check. * @cred: The credentials to use. * @need_perm: The permission required. * * Check to see whether permission is granted to use a key in the desired way, * but permit the security modules to override. * * The caller must hold either a ref on cred or must hold the RCU readlock. * * Returns 0 if successful, -EACCES if access is denied based on the * permissions bits or the LSM check. */ int key_task_permission(const key_ref_t key_ref, const struct cred *cred, enum key_need_perm need_perm) { struct key *key; key_perm_t kperm, mask; int ret; switch (need_perm) { default: WARN_ON(1); return -EACCES; case KEY_NEED_UNLINK: case KEY_SYSADMIN_OVERRIDE: case KEY_AUTHTOKEN_OVERRIDE: case KEY_DEFER_PERM_CHECK: goto lsm; case KEY_NEED_VIEW: mask = KEY_OTH_VIEW; break; case KEY_NEED_READ: mask = KEY_OTH_READ; break; case KEY_NEED_WRITE: mask = KEY_OTH_WRITE; break; case KEY_NEED_SEARCH: mask = KEY_OTH_SEARCH; break; case KEY_NEED_LINK: mask = KEY_OTH_LINK; break; case KEY_NEED_SETATTR: mask = KEY_OTH_SETATTR; break; } key = key_ref_to_ptr(key_ref); /* use the second 8-bits of permissions for keys the caller owns */ if (uid_eq(key->uid, cred->fsuid)) { kperm = key->perm >> 16; goto use_these_perms; } /* use the third 8-bits of permissions for keys the caller has a group * membership in common with */ if (gid_valid(key->gid) && key->perm & KEY_GRP_ALL) { if (gid_eq(key->gid, cred->fsgid)) { kperm = key->perm >> 8; goto use_these_perms; } ret = groups_search(cred->group_info, key->gid); if (ret) { kperm = key->perm >> 8; goto use_these_perms; } } /* otherwise use the least-significant 8-bits */ kperm = key->perm; use_these_perms: /* use the top 8-bits of permissions for keys the caller possesses * - possessor permissions are additive with other permissions */ if (is_key_possessed(key_ref)) kperm |= key->perm >> 24; if ((kperm & mask) != mask) return -EACCES; /* let LSM be the final arbiter */ lsm: return security_key_permission(key_ref, cred, need_perm); } EXPORT_SYMBOL(key_task_permission); /** * key_validate - Validate a key. * @key: The key to be validated. * * Check that a key is valid, returning 0 if the key is okay, -ENOKEY if the * key is invalidated, -EKEYREVOKED if the key's type has been removed or if * the key has been revoked or -EKEYEXPIRED if the key has expired. */ int key_validate(const struct key *key) { unsigned long flags = READ_ONCE(key->flags); time64_t expiry = READ_ONCE(key->expiry); if (flags & (1 << KEY_FLAG_INVALIDATED)) return -ENOKEY; /* check it's still accessible */ if (flags & ((1 << KEY_FLAG_REVOKED) | (1 << KEY_FLAG_DEAD))) return -EKEYREVOKED; /* check it hasn't expired */ if (expiry) { if (ktime_get_real_seconds() >= expiry) return -EKEYEXPIRED; } return 0; } EXPORT_SYMBOL(key_validate);
17 17 14 18 1 17 2 15 1 16 13 15 13 1 10 2 11 17 17 17 17 17 17 17 19 16 19 19 16 16 16 16 16 16 16 16 15 15 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 17 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 /* * Copyright (c) 2016 Intel Corporation * * Permission to use, copy, modify, distribute, and sell this software and its * documentation for any purpose is hereby granted without fee, provided that * the above copyright notice appear in all copies and that both that copyright * notice and this permission notice appear in supporting documentation, and * that the name of the copyright holders not be used in advertising or * publicity pertaining to distribution of the software without specific, * written prior permission. The copyright holders make no representations * about the suitability of this software for any purpose. It is provided "as * is" without express or implied warranty. * * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE * OF THIS SOFTWARE. */ #include <linux/uaccess.h> #include <drm/drm_drv.h> #include <drm/drm_encoder.h> #include <drm/drm_file.h> #include <drm/drm_framebuffer.h> #include <drm/drm_managed.h> #include <drm/drm_mode_config.h> #include <drm/drm_print.h> #include <linux/dma-resv.h> #include "drm_crtc_internal.h" #include "drm_internal.h" int drm_modeset_register_all(struct drm_device *dev) { int ret; ret = drm_plane_register_all(dev); if (ret) goto err_plane; ret = drm_crtc_register_all(dev); if (ret) goto err_crtc; ret = drm_encoder_register_all(dev); if (ret) goto err_encoder; ret = drm_connector_register_all(dev); if (ret) goto err_connector; return 0; err_connector: drm_encoder_unregister_all(dev); err_encoder: drm_crtc_unregister_all(dev); err_crtc: drm_plane_unregister_all(dev); err_plane: return ret; } void drm_modeset_unregister_all(struct drm_device *dev) { drm_connector_unregister_all(dev); drm_encoder_unregister_all(dev); drm_crtc_unregister_all(dev); drm_plane_unregister_all(dev); } /** * drm_mode_getresources - get graphics configuration * @dev: drm device for the ioctl * @data: data pointer for the ioctl * @file_priv: drm file for the ioctl call * * Construct a set of configuration description structures and return * them to the user, including CRTC, connector and framebuffer configuration. * * Called by the user via ioctl. * * Returns: * Zero on success, negative errno on failure. */ int drm_mode_getresources(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_mode_card_res *card_res = data; struct drm_framebuffer *fb; struct drm_connector *connector; struct drm_crtc *crtc; struct drm_encoder *encoder; int count, ret = 0; uint32_t __user *fb_id; uint32_t __user *crtc_id; uint32_t __user *connector_id; uint32_t __user *encoder_id; struct drm_connector_list_iter conn_iter; if (!drm_core_check_feature(dev, DRIVER_MODESET)) return -EOPNOTSUPP; mutex_lock(&file_priv->fbs_lock); count = 0; fb_id = u64_to_user_ptr(card_res->fb_id_ptr); list_for_each_entry(fb, &file_priv->fbs, filp_head) { if (count < card_res->count_fbs && put_user(fb->base.id, fb_id + count)) { mutex_unlock(&file_priv->fbs_lock); return -EFAULT; } count++; } card_res->count_fbs = count; mutex_unlock(&file_priv->fbs_lock); card_res->max_height = dev->mode_config.max_height; card_res->min_height = dev->mode_config.min_height; card_res->max_width = dev->mode_config.max_width; card_res->min_width = dev->mode_config.min_width; count = 0; crtc_id = u64_to_user_ptr(card_res->crtc_id_ptr); drm_for_each_crtc(crtc, dev) { if (drm_lease_held(file_priv, crtc->base.id)) { if (count < card_res->count_crtcs && put_user(crtc->base.id, crtc_id + count)) return -EFAULT; count++; } } card_res->count_crtcs = count; count = 0; encoder_id = u64_to_user_ptr(card_res->encoder_id_ptr); drm_for_each_encoder(encoder, dev) { if (count < card_res->count_encoders && put_user(encoder->base.id, encoder_id + count)) return -EFAULT; count++; } card_res->count_encoders = count; drm_connector_list_iter_begin(dev, &conn_iter); count = 0; connector_id = u64_to_user_ptr(card_res->connector_id_ptr); drm_for_each_connector_iter(connector, &conn_iter) { /* only expose writeback connectors if userspace understands them */ if (!file_priv->writeback_connectors && (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)) continue; if (drm_lease_held(file_priv, connector->base.id)) { if (count < card_res->count_connectors && put_user(connector->base.id, connector_id + count)) { drm_connector_list_iter_end(&conn_iter); return -EFAULT; } count++; } } card_res->count_connectors = count; drm_connector_list_iter_end(&conn_iter); return ret; } /** * drm_mode_config_reset - call ->reset callbacks * @dev: drm device * * This functions calls all the crtc's, encoder's and connector's ->reset * callback. Drivers can use this in e.g. their driver load or resume code to * reset hardware and software state. */ void drm_mode_config_reset(struct drm_device *dev) { struct drm_crtc *crtc; struct drm_plane *plane; struct drm_encoder *encoder; struct drm_connector *connector; struct drm_connector_list_iter conn_iter; drm_for_each_plane(plane, dev) if (plane->funcs->reset) plane->funcs->reset(plane); drm_for_each_crtc(crtc, dev) if (crtc->funcs->reset) crtc->funcs->reset(crtc); drm_for_each_encoder(encoder, dev) if (encoder->funcs && encoder->funcs->reset) encoder->funcs->reset(encoder); drm_connector_list_iter_begin(dev, &conn_iter); drm_for_each_connector_iter(connector, &conn_iter) if (connector->funcs->reset) connector->funcs->reset(connector); drm_connector_list_iter_end(&conn_iter); } EXPORT_SYMBOL(drm_mode_config_reset); /* * Global properties */ static const struct drm_prop_enum_list drm_plane_type_enum_list[] = { { DRM_PLANE_TYPE_OVERLAY, "Overlay" }, { DRM_PLANE_TYPE_PRIMARY, "Primary" }, { DRM_PLANE_TYPE_CURSOR, "Cursor" }, }; static int drm_mode_create_standard_properties(struct drm_device *dev) { struct drm_property *prop; int ret; ret = drm_connector_create_standard_properties(dev); if (ret) return ret; prop = drm_property_create_enum(dev, DRM_MODE_PROP_IMMUTABLE, "type", drm_plane_type_enum_list, ARRAY_SIZE(drm_plane_type_enum_list)); if (!prop) return -ENOMEM; dev->mode_config.plane_type_property = prop; prop = drm_property_create_range(dev, DRM_MODE_PROP_ATOMIC, "SRC_X", 0, UINT_MAX); if (!prop) return -ENOMEM; dev->mode_config.prop_src_x = prop; prop = drm_property_create_range(dev, DRM_MODE_PROP_ATOMIC, "SRC_Y", 0, UINT_MAX); if (!prop) return -ENOMEM; dev->mode_config.prop_src_y = prop; prop = drm_property_create_range(dev, DRM_MODE_PROP_ATOMIC, "SRC_W", 0, UINT_MAX); if (!prop) return -ENOMEM; dev->mode_config.prop_src_w = prop; prop = drm_property_create_range(dev, DRM_MODE_PROP_ATOMIC, "SRC_H", 0, UINT_MAX); if (!prop) return -ENOMEM; dev->mode_config.prop_src_h = prop; prop = drm_property_create_signed_range(dev, DRM_MODE_PROP_ATOMIC, "CRTC_X", INT_MIN, INT_MAX); if (!prop) return -ENOMEM; dev->mode_config.prop_crtc_x = prop; prop = drm_property_create_signed_range(dev, DRM_MODE_PROP_ATOMIC, "CRTC_Y", INT_MIN, INT_MAX); if (!prop) return -ENOMEM; dev->mode_config.prop_crtc_y = prop; prop = drm_property_create_range(dev, DRM_MODE_PROP_ATOMIC, "CRTC_W", 0, INT_MAX); if (!prop) return -ENOMEM; dev->mode_config.prop_crtc_w = prop; prop = drm_property_create_range(dev, DRM_MODE_PROP_ATOMIC, "CRTC_H", 0, INT_MAX); if (!prop) return -ENOMEM; dev->mode_config.prop_crtc_h = prop; prop = drm_property_create_object(dev, DRM_MODE_PROP_ATOMIC, "FB_ID", DRM_MODE_OBJECT_FB); if (!prop) return -ENOMEM; dev->mode_config.prop_fb_id = prop; prop = drm_property_create_signed_range(dev, DRM_MODE_PROP_ATOMIC, "IN_FENCE_FD", -1, INT_MAX); if (!prop) return -ENOMEM; dev->mode_config.prop_in_fence_fd = prop; prop = drm_property_create_range(dev, DRM_MODE_PROP_ATOMIC, "OUT_FENCE_PTR", 0, U64_MAX); if (!prop) return -ENOMEM; dev->mode_config.prop_out_fence_ptr = prop; prop = drm_property_create_object(dev, DRM_MODE_PROP_ATOMIC, "CRTC_ID", DRM_MODE_OBJECT_CRTC); if (!prop) return -ENOMEM; dev->mode_config.prop_crtc_id = prop; prop = drm_property_create(dev, DRM_MODE_PROP_ATOMIC | DRM_MODE_PROP_BLOB, "FB_DAMAGE_CLIPS", 0); if (!prop) return -ENOMEM; dev->mode_config.prop_fb_damage_clips = prop; prop = drm_property_create_bool(dev, DRM_MODE_PROP_ATOMIC, "ACTIVE"); if (!prop) return -ENOMEM; dev->mode_config.prop_active = prop; prop = drm_property_create(dev, DRM_MODE_PROP_ATOMIC | DRM_MODE_PROP_BLOB, "MODE_ID", 0); if (!prop) return -ENOMEM; dev->mode_config.prop_mode_id = prop; prop = drm_property_create_bool(dev, 0, "VRR_ENABLED"); if (!prop) return -ENOMEM; dev->mode_config.prop_vrr_enabled = prop; prop = drm_property_create(dev, DRM_MODE_PROP_BLOB, "DEGAMMA_LUT", 0); if (!prop) return -ENOMEM; dev->mode_config.degamma_lut_property = prop; prop = drm_property_create_range(dev, DRM_MODE_PROP_IMMUTABLE, "DEGAMMA_LUT_SIZE", 0, UINT_MAX); if (!prop) return -ENOMEM; dev->mode_config.degamma_lut_size_property = prop; prop = drm_property_create(dev, DRM_MODE_PROP_BLOB, "CTM", 0); if (!prop) return -ENOMEM; dev->mode_config.ctm_property = prop; prop = drm_property_create(dev, DRM_MODE_PROP_BLOB, "GAMMA_LUT", 0); if (!prop) return -ENOMEM; dev->mode_config.gamma_lut_property = prop; prop = drm_property_create_range(dev, DRM_MODE_PROP_IMMUTABLE, "GAMMA_LUT_SIZE", 0, UINT_MAX); if (!prop) return -ENOMEM; dev->mode_config.gamma_lut_size_property = prop; prop = drm_property_create(dev, DRM_MODE_PROP_IMMUTABLE | DRM_MODE_PROP_BLOB, "IN_FORMATS", 0); if (!prop) return -ENOMEM; dev->mode_config.modifiers_property = prop; prop = drm_property_create(dev, DRM_MODE_PROP_IMMUTABLE | DRM_MODE_PROP_BLOB, "SIZE_HINTS", 0); if (!prop) return -ENOMEM; dev->mode_config.size_hints_property = prop; return 0; } static void drm_mode_config_init_release(struct drm_device *dev, void *ptr) { drm_mode_config_cleanup(dev); } /** * drmm_mode_config_init - managed DRM mode_configuration structure * initialization * @dev: DRM device * * Initialize @dev's mode_config structure, used for tracking the graphics * configuration of @dev. * * Since this initializes the modeset locks, no locking is possible. Which is no * problem, since this should happen single threaded at init time. It is the * driver's problem to ensure this guarantee. * * Cleanup is automatically handled through registering drm_mode_config_cleanup * with drmm_add_action(). * * Returns: 0 on success, negative error value on failure. */ int drmm_mode_config_init(struct drm_device *dev) { int ret; mutex_init(&dev->mode_config.mutex); drm_modeset_lock_init(&dev->mode_config.connection_mutex); mutex_init(&dev->mode_config.idr_mutex); mutex_init(&dev->mode_config.fb_lock); mutex_init(&dev->mode_config.blob_lock); INIT_LIST_HEAD(&dev->mode_config.fb_list); INIT_LIST_HEAD(&dev->mode_config.crtc_list); INIT_LIST_HEAD(&dev->mode_config.connector_list); INIT_LIST_HEAD(&dev->mode_config.encoder_list); INIT_LIST_HEAD(&dev->mode_config.property_list); INIT_LIST_HEAD(&dev->mode_config.property_blob_list); INIT_LIST_HEAD(&dev->mode_config.plane_list); INIT_LIST_HEAD(&dev->mode_config.privobj_list); idr_init_base(&dev->mode_config.object_idr, 1); idr_init_base(&dev->mode_config.tile_idr, 1); ida_init(&dev->mode_config.connector_ida); spin_lock_init(&dev->mode_config.connector_list_lock); init_llist_head(&dev->mode_config.connector_free_list); INIT_WORK(&dev->mode_config.connector_free_work, drm_connector_free_work_fn); ret = drm_mode_create_standard_properties(dev); if (ret) { drm_mode_config_cleanup(dev); return ret; } /* Just to be sure */ dev->mode_config.num_fb = 0; dev->mode_config.num_connector = 0; dev->mode_config.num_crtc = 0; dev->mode_config.num_encoder = 0; dev->mode_config.num_total_plane = 0; if (IS_ENABLED(CONFIG_LOCKDEP)) { struct drm_modeset_acquire_ctx modeset_ctx; struct ww_acquire_ctx resv_ctx; struct dma_resv resv; int ret; dma_resv_init(&resv); drm_modeset_acquire_init(&modeset_ctx, 0); ret = drm_modeset_lock(&dev->mode_config.connection_mutex, &modeset_ctx); if (ret == -EDEADLK) ret = drm_modeset_backoff(&modeset_ctx); might_fault(); ww_acquire_init(&resv_ctx, &reservation_ww_class); ret = dma_resv_lock(&resv, &resv_ctx); if (ret == -EDEADLK) dma_resv_lock_slow(&resv, &resv_ctx); dma_resv_unlock(&resv); ww_acquire_fini(&resv_ctx); drm_modeset_drop_locks(&modeset_ctx); drm_modeset_acquire_fini(&modeset_ctx); dma_resv_fini(&resv); } return drmm_add_action_or_reset(dev, drm_mode_config_init_release, NULL); } EXPORT_SYMBOL(drmm_mode_config_init); /** * drm_mode_config_cleanup - free up DRM mode_config info * @dev: DRM device * * Free up all the connectors and CRTCs associated with this DRM device, then * free up the framebuffers and associated buffer objects. * * Note that since this /should/ happen single-threaded at driver/device * teardown time, no locking is required. It's the driver's job to ensure that * this guarantee actually holds true. * * FIXME: With the managed drmm_mode_config_init() it is no longer necessary for * drivers to explicitly call this function. */ void drm_mode_config_cleanup(struct drm_device *dev) { struct drm_connector *connector; struct drm_connector_list_iter conn_iter; struct drm_crtc *crtc, *ct; struct drm_encoder *encoder, *enct; struct drm_framebuffer *fb, *fbt; struct drm_property *property, *pt; struct drm_property_blob *blob, *bt; struct drm_plane *plane, *plt; list_for_each_entry_safe(encoder, enct, &dev->mode_config.encoder_list, head) { encoder->funcs->destroy(encoder); } drm_connector_list_iter_begin(dev, &conn_iter); drm_for_each_connector_iter(connector, &conn_iter) { /* drm_connector_list_iter holds an full reference to the * current connector itself, which means it is inherently safe * against unreferencing the current connector - but not against * deleting it right away. */ drm_connector_put(connector); } drm_connector_list_iter_end(&conn_iter); /* connector_iter drops references in a work item. */ flush_work(&dev->mode_config.connector_free_work); if (WARN_ON(!list_empty(&dev->mode_config.connector_list))) { drm_connector_list_iter_begin(dev, &conn_iter); drm_for_each_connector_iter(connector, &conn_iter) DRM_ERROR("connector %s leaked!\n", connector->name); drm_connector_list_iter_end(&conn_iter); } list_for_each_entry_safe(property, pt, &dev->mode_config.property_list, head) { drm_property_destroy(dev, property); } list_for_each_entry_safe(plane, plt, &dev->mode_config.plane_list, head) { plane->funcs->destroy(plane); } list_for_each_entry_safe(crtc, ct, &dev->mode_config.crtc_list, head) { crtc->funcs->destroy(crtc); } list_for_each_entry_safe(blob, bt, &dev->mode_config.property_blob_list, head_global) { drm_property_blob_put(blob); } /* * Single-threaded teardown context, so it's not required to grab the * fb_lock to protect against concurrent fb_list access. Contrary, it * would actually deadlock with the drm_framebuffer_cleanup function. * * Also, if there are any framebuffers left, that's a driver leak now, * so politely WARN about this. */ WARN_ON(!list_empty(&dev->mode_config.fb_list)); list_for_each_entry_safe(fb, fbt, &dev->mode_config.fb_list, head) { struct drm_printer p = drm_dbg_printer(dev, DRM_UT_KMS, "[leaked fb]"); drm_printf(&p, "framebuffer[%u]:\n", fb->base.id); drm_framebuffer_print_info(&p, 1, fb); drm_framebuffer_free(&fb->base.refcount); } ida_destroy(&dev->mode_config.connector_ida); idr_destroy(&dev->mode_config.tile_idr); idr_destroy(&dev->mode_config.object_idr); drm_modeset_lock_fini(&dev->mode_config.connection_mutex); } EXPORT_SYMBOL(drm_mode_config_cleanup); static u32 full_encoder_mask(struct drm_device *dev) { struct drm_encoder *encoder; u32 encoder_mask = 0; drm_for_each_encoder(encoder, dev) encoder_mask |= drm_encoder_mask(encoder); return encoder_mask; } /* * For some reason we want the encoder itself included in * possible_clones. Make life easy for drivers by allowing them * to leave possible_clones unset if no cloning is possible. */ static void fixup_encoder_possible_clones(struct drm_encoder *encoder) { if (encoder->possible_clones == 0) encoder->possible_clones = drm_encoder_mask(encoder); } static void validate_encoder_possible_clones(struct drm_encoder *encoder) { struct drm_device *dev = encoder->dev; u32 encoder_mask = full_encoder_mask(dev); struct drm_encoder *other; drm_for_each_encoder(other, dev) { WARN(!!(encoder->possible_clones & drm_encoder_mask(other)) != !!(other->possible_clones & drm_encoder_mask(encoder)), "possible_clones mismatch: " "[ENCODER:%d:%s] mask=0x%x possible_clones=0x%x vs. " "[ENCODER:%d:%s] mask=0x%x possible_clones=0x%x\n", encoder->base.id, encoder->name, drm_encoder_mask(encoder), encoder->possible_clones, other->base.id, other->name, drm_encoder_mask(other), other->possible_clones); } WARN((encoder->possible_clones & drm_encoder_mask(encoder)) == 0 || (encoder->possible_clones & ~encoder_mask) != 0, "Bogus possible_clones: " "[ENCODER:%d:%s] possible_clones=0x%x (full encoder mask=0x%x)\n", encoder->base.id, encoder->name, encoder->possible_clones, encoder_mask); } static u32 full_crtc_mask(struct drm_device *dev) { struct drm_crtc *crtc; u32 crtc_mask = 0; drm_for_each_crtc(crtc, dev) crtc_mask |= drm_crtc_mask(crtc); return crtc_mask; } static void validate_encoder_possible_crtcs(struct drm_encoder *encoder) { u32 crtc_mask = full_crtc_mask(encoder->dev); WARN((encoder->possible_crtcs & crtc_mask) == 0 || (encoder->possible_crtcs & ~crtc_mask) != 0, "Bogus possible_crtcs: " "[ENCODER:%d:%s] possible_crtcs=0x%x (full crtc mask=0x%x)\n", encoder->base.id, encoder->name, encoder->possible_crtcs, crtc_mask); } void drm_mode_config_validate(struct drm_device *dev) { struct drm_encoder *encoder; struct drm_crtc *crtc; struct drm_plane *plane; u32 primary_with_crtc = 0, cursor_with_crtc = 0; unsigned int num_primary = 0; if (!drm_core_check_feature(dev, DRIVER_MODESET)) return; drm_for_each_encoder(encoder, dev) fixup_encoder_possible_clones(encoder); drm_for_each_encoder(encoder, dev) { validate_encoder_possible_clones(encoder); validate_encoder_possible_crtcs(encoder); } drm_for_each_crtc(crtc, dev) { WARN(!crtc->primary, "Missing primary plane on [CRTC:%d:%s]\n", crtc->base.id, crtc->name); WARN(crtc->cursor && crtc->funcs->cursor_set, "[CRTC:%d:%s] must not have both a cursor plane and a cursor_set func", crtc->base.id, crtc->name); WARN(crtc->cursor && crtc->funcs->cursor_set2, "[CRTC:%d:%s] must not have both a cursor plane and a cursor_set2 func", crtc->base.id, crtc->name); WARN(crtc->cursor && crtc->funcs->cursor_move, "[CRTC:%d:%s] must not have both a cursor plane and a cursor_move func", crtc->base.id, crtc->name); if (crtc->primary) { WARN(!(crtc->primary->possible_crtcs & drm_crtc_mask(crtc)), "Bogus primary plane possible_crtcs: [PLANE:%d:%s] must be compatible with [CRTC:%d:%s]\n", crtc->primary->base.id, crtc->primary->name, crtc->base.id, crtc->name); WARN(primary_with_crtc & drm_plane_mask(crtc->primary), "Primary plane [PLANE:%d:%s] used for multiple CRTCs", crtc->primary->base.id, crtc->primary->name); primary_with_crtc |= drm_plane_mask(crtc->primary); } if (crtc->cursor) { WARN(!(crtc->cursor->possible_crtcs & drm_crtc_mask(crtc)), "Bogus cursor plane possible_crtcs: [PLANE:%d:%s] must be compatible with [CRTC:%d:%s]\n", crtc->cursor->base.id, crtc->cursor->name, crtc->base.id, crtc->name); WARN(cursor_with_crtc & drm_plane_mask(crtc->cursor), "Cursor plane [PLANE:%d:%s] used for multiple CRTCs", crtc->cursor->base.id, crtc->cursor->name); cursor_with_crtc |= drm_plane_mask(crtc->cursor); } } drm_for_each_plane(plane, dev) { if (plane->type == DRM_PLANE_TYPE_PRIMARY) num_primary++; } WARN(num_primary != dev->mode_config.num_crtc, "Must have as many primary planes as there are CRTCs, but have %u primary planes and %u CRTCs", num_primary, dev->mode_config.num_crtc); }
25 3 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 // SPDX-License-Identifier: GPL-2.0-only /* * vivid-cec.c - A Virtual Video Test Driver, cec emulation * * Copyright 2016 Cisco Systems, Inc. and/or its affiliates. All rights reserved. */ #include <linux/delay.h> #include <media/cec.h> #include "vivid-core.h" #include "vivid-cec.h" #define CEC_START_BIT_US 4500 #define CEC_DATA_BIT_US 2400 #define CEC_MARGIN_US 350 struct xfer_on_bus { struct cec_adapter *adap; u8 status; }; static bool find_dest_adap(struct vivid_dev *dev, struct cec_adapter *adap, u8 dest) { unsigned int i, j; if (dest >= 0xf) return false; if (adap != dev->cec_rx_adap && dev->cec_rx_adap && dev->cec_rx_adap->is_configured && cec_has_log_addr(dev->cec_rx_adap, dest)) return true; for (i = 0, j = 0; i < dev->num_inputs; i++) { unsigned int menu_idx = dev->input_is_connected_to_output[i]; if (dev->input_type[i] != HDMI) continue; j++; if (menu_idx < FIXED_MENU_ITEMS) continue; struct vivid_dev *dev_tx = vivid_ctrl_hdmi_to_output_instance[menu_idx]; unsigned int output = vivid_ctrl_hdmi_to_output_index[menu_idx]; if (!dev_tx) continue; unsigned int hdmi_output = dev_tx->output_to_iface_index[output]; if (adap == dev_tx->cec_tx_adap[hdmi_output]) continue; if (!dev_tx->cec_tx_adap[hdmi_output]->is_configured) continue; if (cec_has_log_addr(dev_tx->cec_tx_adap[hdmi_output], dest)) return true; } return false; } static bool xfer_ready(struct vivid_dev *dev) { unsigned int i; bool ready = false; spin_lock(&dev->cec_xfers_slock); for (i = 0; i < ARRAY_SIZE(dev->xfers); i++) { if (dev->xfers[i].sft && dev->xfers[i].sft <= dev->cec_sft) { ready = true; break; } } spin_unlock(&dev->cec_xfers_slock); return ready; } /* * If an adapter tries to send successive messages, it must wait for the * longest signal-free time between its transmissions. But, if another * adapter sends a message in the interim, then the wait can be reduced * because the messages are no longer successive. Make these adjustments * if necessary. Should be called holding cec_xfers_slock. */ static void adjust_sfts(struct vivid_dev *dev) { unsigned int i; u8 initiator; for (i = 0; i < ARRAY_SIZE(dev->xfers); i++) { if (dev->xfers[i].sft <= CEC_SIGNAL_FREE_TIME_RETRY) continue; initiator = dev->xfers[i].msg[0] >> 4; if (initiator == dev->last_initiator) dev->xfers[i].sft = CEC_SIGNAL_FREE_TIME_NEXT_XFER; else dev->xfers[i].sft = CEC_SIGNAL_FREE_TIME_NEW_INITIATOR; } } /* * The main emulation of the bus on which CEC adapters attempt to send * messages to each other. The bus keeps track of how long it has been * signal-free and accepts a pending transmission only if the state of * the bus matches the transmission's signal-free requirements. It calls * cec_transmit_attempt_done() for all transmits that enter the bus and * cec_received_msg() for successful transmits. */ int vivid_cec_bus_thread(void *_dev) { u32 last_sft; unsigned int i, j; unsigned int dest; ktime_t start, end; s64 delta_us, retry_us; struct vivid_dev *dev = _dev; dev->cec_sft = CEC_SIGNAL_FREE_TIME_NEXT_XFER; for (;;) { bool first = true; int wait_xfer_us = 0; bool valid_dest = false; int wait_arb_lost_us = 0; unsigned int first_idx = 0; unsigned int first_status = 0; struct cec_msg first_msg = {}; struct xfer_on_bus xfers_on_bus[MAX_OUTPUTS] = {}; wait_event_interruptible(dev->kthread_waitq_cec, xfer_ready(dev) || kthread_should_stop()); if (kthread_should_stop()) break; last_sft = dev->cec_sft; dev->cec_sft = 0; /* * Move the messages that are ready onto the bus. The adapter with * the most leading zeros will win control of the bus and any other * adapters will lose arbitration. */ spin_lock(&dev->cec_xfers_slock); for (i = 0; i < ARRAY_SIZE(dev->xfers); i++) { if (!dev->xfers[i].sft || dev->xfers[i].sft > last_sft) continue; if (first) { first = false; first_idx = i; xfers_on_bus[first_idx].adap = dev->xfers[i].adap; memcpy(first_msg.msg, dev->xfers[i].msg, dev->xfers[i].len); first_msg.len = dev->xfers[i].len; } else { xfers_on_bus[i].adap = dev->xfers[i].adap; xfers_on_bus[i].status = CEC_TX_STATUS_ARB_LOST; /* * For simplicity wait for all 4 bits of the initiator's * address even though HDMI specification uses bit-level * precision. */ wait_arb_lost_us = 4 * CEC_DATA_BIT_US + CEC_START_BIT_US; } dev->xfers[i].sft = 0; } dev->last_initiator = cec_msg_initiator(&first_msg); adjust_sfts(dev); spin_unlock(&dev->cec_xfers_slock); dest = cec_msg_destination(&first_msg); valid_dest = cec_msg_is_broadcast(&first_msg); if (!valid_dest) valid_dest = find_dest_adap(dev, xfers_on_bus[first_idx].adap, dest); if (valid_dest) { first_status = CEC_TX_STATUS_OK; /* * Message length is in bytes, but each byte is transmitted in * a block of 10 bits. */ wait_xfer_us = first_msg.len * 10 * CEC_DATA_BIT_US; } else { first_status = CEC_TX_STATUS_NACK; /* * A message that is not acknowledged stops transmitting after * the header block of 10 bits. */ wait_xfer_us = 10 * CEC_DATA_BIT_US; } wait_xfer_us += CEC_START_BIT_US; xfers_on_bus[first_idx].status = first_status; /* Sleep as if sending messages on a real hardware bus. */ start = ktime_get(); if (wait_arb_lost_us) { usleep_range(wait_arb_lost_us - CEC_MARGIN_US, wait_arb_lost_us); for (i = 0; i < ARRAY_SIZE(xfers_on_bus); i++) { if (xfers_on_bus[i].status != CEC_TX_STATUS_ARB_LOST) continue; cec_transmit_attempt_done(xfers_on_bus[i].adap, CEC_TX_STATUS_ARB_LOST); } if (kthread_should_stop()) break; } wait_xfer_us -= wait_arb_lost_us; usleep_range(wait_xfer_us - CEC_MARGIN_US, wait_xfer_us); cec_transmit_attempt_done(xfers_on_bus[first_idx].adap, first_status); if (kthread_should_stop()) break; if (first_status == CEC_TX_STATUS_OK) { if (xfers_on_bus[first_idx].adap != dev->cec_rx_adap) cec_received_msg(dev->cec_rx_adap, &first_msg); for (i = 0, j = 0; i < dev->num_inputs; i++) { unsigned int menu_idx = dev->input_is_connected_to_output[i]; if (dev->input_type[i] != HDMI) continue; j++; if (menu_idx < FIXED_MENU_ITEMS) continue; struct vivid_dev *dev_tx = vivid_ctrl_hdmi_to_output_instance[menu_idx]; unsigned int output = vivid_ctrl_hdmi_to_output_index[menu_idx]; if (!dev_tx) continue; unsigned int hdmi_output = dev_tx->output_to_iface_index[output]; if (xfers_on_bus[first_idx].adap != dev_tx->cec_tx_adap[hdmi_output]) cec_received_msg(dev_tx->cec_tx_adap[hdmi_output], &first_msg); } } end = ktime_get(); /* * If the emulated transfer took more or less time than it should * have, then compensate by adjusting the wait time needed for the * bus to be signal-free for 3 bit periods (the retry time). */ delta_us = div_s64(end - start, 1000); delta_us -= wait_xfer_us + wait_arb_lost_us; retry_us = CEC_SIGNAL_FREE_TIME_RETRY * CEC_DATA_BIT_US - delta_us; if (retry_us > CEC_MARGIN_US) usleep_range(retry_us - CEC_MARGIN_US, retry_us); dev->cec_sft = CEC_SIGNAL_FREE_TIME_RETRY; /* * If there are no messages that need to be retried, check if any * adapters that did not just transmit a message are ready to * transmit. If none of these adapters are ready, then increase * the signal-free time so that the bus is available to all * adapters and go back to waiting for a transmission. */ while (dev->cec_sft >= CEC_SIGNAL_FREE_TIME_RETRY && dev->cec_sft < CEC_SIGNAL_FREE_TIME_NEXT_XFER && !xfer_ready(dev) && !kthread_should_stop()) { usleep_range(2 * CEC_DATA_BIT_US - CEC_MARGIN_US, 2 * CEC_DATA_BIT_US); dev->cec_sft += 2; } } return 0; } static int vivid_cec_adap_enable(struct cec_adapter *adap, bool enable) { adap->cec_pin_is_high = true; return 0; } static int vivid_cec_adap_log_addr(struct cec_adapter *adap, u8 log_addr) { return 0; } static int vivid_cec_adap_transmit(struct cec_adapter *adap, u8 attempts, u32 signal_free_time, struct cec_msg *msg) { struct vivid_dev *dev = cec_get_drvdata(adap); struct vivid_dev *dev_rx = dev; u8 idx = cec_msg_initiator(msg); u8 output = 0; if (dev->cec_rx_adap != adap) { int i; for (i = 0; i < dev->num_hdmi_outputs; i++) if (dev->cec_tx_adap[i] == adap) break; if (i == dev->num_hdmi_outputs) return -ENONET; output = dev->hdmi_index_to_output_index[i]; dev_rx = dev->output_to_input_instance[output]; if (!dev_rx) return -ENONET; } spin_lock(&dev_rx->cec_xfers_slock); dev_rx->xfers[idx].adap = adap; memcpy(dev_rx->xfers[idx].msg, msg->msg, CEC_MAX_MSG_SIZE); dev_rx->xfers[idx].len = msg->len; dev_rx->xfers[idx].sft = CEC_SIGNAL_FREE_TIME_RETRY; if (signal_free_time > CEC_SIGNAL_FREE_TIME_RETRY) { if (idx == dev_rx->last_initiator) dev_rx->xfers[idx].sft = CEC_SIGNAL_FREE_TIME_NEXT_XFER; else dev_rx->xfers[idx].sft = CEC_SIGNAL_FREE_TIME_NEW_INITIATOR; } spin_unlock(&dev_rx->cec_xfers_slock); wake_up_interruptible(&dev_rx->kthread_waitq_cec); return 0; } static int vivid_received(struct cec_adapter *adap, struct cec_msg *msg) { struct vivid_dev *dev = cec_get_drvdata(adap); struct cec_msg reply; u8 dest = cec_msg_destination(msg); if (cec_msg_is_broadcast(msg)) dest = adap->log_addrs.log_addr[0]; cec_msg_init(&reply, dest, cec_msg_initiator(msg)); switch (cec_msg_opcode(msg)) { case CEC_MSG_SET_OSD_STRING: { u8 disp_ctl; char osd[14]; if (!cec_is_sink(adap)) return -ENOMSG; cec_ops_set_osd_string(msg, &disp_ctl, osd); switch (disp_ctl) { case CEC_OP_DISP_CTL_DEFAULT: strscpy(dev->osd, osd, sizeof(dev->osd)); dev->osd_jiffies = jiffies; break; case CEC_OP_DISP_CTL_UNTIL_CLEARED: strscpy(dev->osd, osd, sizeof(dev->osd)); dev->osd_jiffies = 0; break; case CEC_OP_DISP_CTL_CLEAR: dev->osd[0] = 0; dev->osd_jiffies = 0; break; default: cec_msg_feature_abort(&reply, cec_msg_opcode(msg), CEC_OP_ABORT_INVALID_OP); cec_transmit_msg(adap, &reply, false); break; } break; } case CEC_MSG_VENDOR_COMMAND_WITH_ID: { u32 vendor_id; u8 size; const u8 *vendor_cmd; /* * If we receive <Vendor Command With ID> with our vendor ID * and with a payload of size 1, and the payload value is odd, * then we reply with the same message, but with the payload * byte incremented by 1. * * If the size is 1 and the payload value is even, then we * ignore the message. * * The reason we reply to odd instead of even payload values * is that it allows for testing of the corner case where the * reply value is 0 (0xff + 1 % 256). * * For other sizes we Feature Abort. * * This is added for the specific purpose of testing the * CEC_MSG_FL_REPLY_VENDOR_ID flag using vivid. */ cec_ops_vendor_command_with_id(msg, &vendor_id, &size, &vendor_cmd); if (vendor_id != adap->log_addrs.vendor_id) break; if (size == 1) { // Ignore even op values if (!(vendor_cmd[0] & 1)) break; reply.len = msg->len; memcpy(reply.msg + 1, msg->msg + 1, msg->len - 1); reply.msg[msg->len - 1]++; } else { cec_msg_feature_abort(&reply, cec_msg_opcode(msg), CEC_OP_ABORT_INVALID_OP); } cec_transmit_msg(adap, &reply, false); break; } default: return -ENOMSG; } return 0; } static const struct cec_adap_ops vivid_cec_adap_ops = { .adap_enable = vivid_cec_adap_enable, .adap_log_addr = vivid_cec_adap_log_addr, .adap_transmit = vivid_cec_adap_transmit, .received = vivid_received, }; struct cec_adapter *vivid_cec_alloc_adap(struct vivid_dev *dev, unsigned int idx, bool is_source) { u32 caps = CEC_CAP_DEFAULTS | CEC_CAP_MONITOR_ALL | CEC_CAP_MONITOR_PIN; char name[32]; snprintf(name, sizeof(name), "vivid-%03d-vid-%s%d", dev->inst, is_source ? "out" : "cap", idx); return cec_allocate_adapter(&vivid_cec_adap_ops, dev, name, caps, CEC_MAX_LOG_ADDRS); }
71 24 24 55 100 20 20 19 80 79 100 54 103 103 20 20 20 20 19 54 44 4 12 2 7 5 2 5 9 6 7 2 1 3 1 1 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 // SPDX-License-Identifier: GPL-2.0-or-later /* * Dummy soundcard * Copyright (c) by Jaroslav Kysela <perex@perex.cz> */ #include <linux/init.h> #include <linux/err.h> #include <linux/platform_device.h> #include <linux/jiffies.h> #include <linux/slab.h> #include <linux/time.h> #include <linux/wait.h> #include <linux/hrtimer.h> #include <linux/math64.h> #include <linux/module.h> #include <sound/core.h> #include <sound/control.h> #include <sound/tlv.h> #include <sound/pcm.h> #include <sound/rawmidi.h> #include <sound/info.h> #include <sound/initval.h> MODULE_AUTHOR("Jaroslav Kysela <perex@perex.cz>"); MODULE_DESCRIPTION("Dummy soundcard (/dev/null)"); MODULE_LICENSE("GPL"); #define MAX_PCM_DEVICES 4 #define MAX_PCM_SUBSTREAMS 128 #define MAX_MIDI_DEVICES 2 /* defaults */ #define MAX_BUFFER_SIZE (64*1024) #define MIN_PERIOD_SIZE 64 #define MAX_PERIOD_SIZE MAX_BUFFER_SIZE #define USE_FORMATS (SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S16_LE) #define USE_RATE SNDRV_PCM_RATE_CONTINUOUS | SNDRV_PCM_RATE_8000_48000 #define USE_RATE_MIN 5500 #define USE_RATE_MAX 48000 #define USE_CHANNELS_MIN 1 #define USE_CHANNELS_MAX 2 #define USE_PERIODS_MIN 1 #define USE_PERIODS_MAX 1024 #define USE_MIXER_VOLUME_LEVEL_MIN -50 #define USE_MIXER_VOLUME_LEVEL_MAX 100 static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-MAX */ static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* ID for this card */ static bool enable[SNDRV_CARDS] = {1, [1 ... (SNDRV_CARDS - 1)] = 0}; static char *model[SNDRV_CARDS] = {[0 ... (SNDRV_CARDS - 1)] = NULL}; static int pcm_devs[SNDRV_CARDS] = {[0 ... (SNDRV_CARDS - 1)] = 1}; static int pcm_substreams[SNDRV_CARDS] = {[0 ... (SNDRV_CARDS - 1)] = 8}; //static int midi_devs[SNDRV_CARDS] = {[0 ... (SNDRV_CARDS - 1)] = 2}; static int mixer_volume_level_min = USE_MIXER_VOLUME_LEVEL_MIN; static int mixer_volume_level_max = USE_MIXER_VOLUME_LEVEL_MAX; #ifdef CONFIG_HIGH_RES_TIMERS static bool hrtimer = 1; #endif static bool fake_buffer = 1; module_param_array(index, int, NULL, 0444); MODULE_PARM_DESC(index, "Index value for dummy soundcard."); module_param_array(id, charp, NULL, 0444); MODULE_PARM_DESC(id, "ID string for dummy soundcard."); module_param_array(enable, bool, NULL, 0444); MODULE_PARM_DESC(enable, "Enable this dummy soundcard."); module_param_array(model, charp, NULL, 0444); MODULE_PARM_DESC(model, "Soundcard model."); module_param_array(pcm_devs, int, NULL, 0444); MODULE_PARM_DESC(pcm_devs, "PCM devices # (0-4) for dummy driver."); module_param_array(pcm_substreams, int, NULL, 0444); MODULE_PARM_DESC(pcm_substreams, "PCM substreams # (1-128) for dummy driver."); //module_param_array(midi_devs, int, NULL, 0444); //MODULE_PARM_DESC(midi_devs, "MIDI devices # (0-2) for dummy driver."); module_param(mixer_volume_level_min, int, 0444); MODULE_PARM_DESC(mixer_volume_level_min, "Minimum mixer volume level for dummy driver. Default: -50"); module_param(mixer_volume_level_max, int, 0444); MODULE_PARM_DESC(mixer_volume_level_max, "Maximum mixer volume level for dummy driver. Default: 100"); module_param(fake_buffer, bool, 0444); MODULE_PARM_DESC(fake_buffer, "Fake buffer allocations."); #ifdef CONFIG_HIGH_RES_TIMERS module_param(hrtimer, bool, 0644); MODULE_PARM_DESC(hrtimer, "Use hrtimer as the timer source."); #endif static struct platform_device *devices[SNDRV_CARDS]; #define MIXER_ADDR_MASTER 0 #define MIXER_ADDR_LINE 1 #define MIXER_ADDR_MIC 2 #define MIXER_ADDR_SYNTH 3 #define MIXER_ADDR_CD 4 #define MIXER_ADDR_LAST 4 struct dummy_timer_ops { int (*create)(struct snd_pcm_substream *); void (*free)(struct snd_pcm_substream *); int (*prepare)(struct snd_pcm_substream *); int (*start)(struct snd_pcm_substream *); int (*stop)(struct snd_pcm_substream *); snd_pcm_uframes_t (*pointer)(struct snd_pcm_substream *); }; #define get_dummy_ops(substream) \ (*(const struct dummy_timer_ops **)(substream)->runtime->private_data) struct dummy_model { const char *name; int (*playback_constraints)(struct snd_pcm_runtime *runtime); int (*capture_constraints)(struct snd_pcm_runtime *runtime); u64 formats; size_t buffer_bytes_max; size_t period_bytes_min; size_t period_bytes_max; unsigned int periods_min; unsigned int periods_max; unsigned int rates; unsigned int rate_min; unsigned int rate_max; unsigned int channels_min; unsigned int channels_max; }; struct snd_dummy { struct snd_card *card; const struct dummy_model *model; struct snd_pcm *pcm; struct snd_pcm_hardware pcm_hw; spinlock_t mixer_lock; int mixer_volume[MIXER_ADDR_LAST+1][2]; int capture_source[MIXER_ADDR_LAST+1][2]; int iobox; struct snd_kcontrol *cd_volume_ctl; struct snd_kcontrol *cd_switch_ctl; }; /* * card models */ static int emu10k1_playback_constraints(struct snd_pcm_runtime *runtime) { int err; err = snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS); if (err < 0) return err; err = snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_BUFFER_BYTES, 256, UINT_MAX); if (err < 0) return err; return 0; } static const struct dummy_model model_emu10k1 = { .name = "emu10k1", .playback_constraints = emu10k1_playback_constraints, .buffer_bytes_max = 128 * 1024, }; static const struct dummy_model model_rme9652 = { .name = "rme9652", .buffer_bytes_max = 26 * 64 * 1024, .formats = SNDRV_PCM_FMTBIT_S32_LE, .channels_min = 26, .channels_max = 26, .periods_min = 2, .periods_max = 2, }; static const struct dummy_model model_ice1712 = { .name = "ice1712", .buffer_bytes_max = 256 * 1024, .formats = SNDRV_PCM_FMTBIT_S32_LE, .channels_min = 10, .channels_max = 10, .periods_min = 1, .periods_max = 1024, }; static const struct dummy_model model_uda1341 = { .name = "uda1341", .buffer_bytes_max = 16380, .formats = SNDRV_PCM_FMTBIT_S16_LE, .channels_min = 2, .channels_max = 2, .periods_min = 2, .periods_max = 255, }; static const struct dummy_model model_ac97 = { .name = "ac97", .formats = SNDRV_PCM_FMTBIT_S16_LE, .channels_min = 2, .channels_max = 2, .rates = SNDRV_PCM_RATE_48000, .rate_min = 48000, .rate_max = 48000, }; static const struct dummy_model model_ca0106 = { .name = "ca0106", .formats = SNDRV_PCM_FMTBIT_S16_LE, .buffer_bytes_max = ((65536-64)*8), .period_bytes_max = (65536-64), .periods_min = 2, .periods_max = 8, .channels_min = 2, .channels_max = 2, .rates = SNDRV_PCM_RATE_48000|SNDRV_PCM_RATE_96000|SNDRV_PCM_RATE_192000, .rate_min = 48000, .rate_max = 192000, }; static const struct dummy_model *dummy_models[] = { &model_emu10k1, &model_rme9652, &model_ice1712, &model_uda1341, &model_ac97, &model_ca0106, NULL }; /* * system timer interface */ struct dummy_systimer_pcm { /* ops must be the first item */ const struct dummy_timer_ops *timer_ops; spinlock_t lock; struct timer_list timer; unsigned long base_time; unsigned int frac_pos; /* fractional sample position (based HZ) */ unsigned int frac_period_rest; unsigned int frac_buffer_size; /* buffer_size * HZ */ unsigned int frac_period_size; /* period_size * HZ */ unsigned int rate; int elapsed; struct snd_pcm_substream *substream; }; static void dummy_systimer_rearm(struct dummy_systimer_pcm *dpcm) { mod_timer(&dpcm->timer, jiffies + DIV_ROUND_UP(dpcm->frac_period_rest, dpcm->rate)); } static void dummy_systimer_update(struct dummy_systimer_pcm *dpcm) { unsigned long delta; delta = jiffies - dpcm->base_time; if (!delta) return; dpcm->base_time += delta; delta *= dpcm->rate; dpcm->frac_pos += delta; while (dpcm->frac_pos >= dpcm->frac_buffer_size) dpcm->frac_pos -= dpcm->frac_buffer_size; while (dpcm->frac_period_rest <= delta) { dpcm->elapsed++; dpcm->frac_period_rest += dpcm->frac_period_size; } dpcm->frac_period_rest -= delta; } static int dummy_systimer_start(struct snd_pcm_substream *substream) { struct dummy_systimer_pcm *dpcm = substream->runtime->private_data; spin_lock(&dpcm->lock); dpcm->base_time = jiffies; dummy_systimer_rearm(dpcm); spin_unlock(&dpcm->lock); return 0; } static int dummy_systimer_stop(struct snd_pcm_substream *substream) { struct dummy_systimer_pcm *dpcm = substream->runtime->private_data; spin_lock(&dpcm->lock); del_timer(&dpcm->timer); spin_unlock(&dpcm->lock); return 0; } static int dummy_systimer_prepare(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; struct dummy_systimer_pcm *dpcm = runtime->private_data; dpcm->frac_pos = 0; dpcm->rate = runtime->rate; dpcm->frac_buffer_size = runtime->buffer_size * HZ; dpcm->frac_period_size = runtime->period_size * HZ; dpcm->frac_period_rest = dpcm->frac_period_size; dpcm->elapsed = 0; return 0; } static void dummy_systimer_callback(struct timer_list *t) { struct dummy_systimer_pcm *dpcm = from_timer(dpcm, t, timer); unsigned long flags; int elapsed = 0; spin_lock_irqsave(&dpcm->lock, flags); dummy_systimer_update(dpcm); dummy_systimer_rearm(dpcm); elapsed = dpcm->elapsed; dpcm->elapsed = 0; spin_unlock_irqrestore(&dpcm->lock, flags); if (elapsed) snd_pcm_period_elapsed(dpcm->substream); } static snd_pcm_uframes_t dummy_systimer_pointer(struct snd_pcm_substream *substream) { struct dummy_systimer_pcm *dpcm = substream->runtime->private_data; snd_pcm_uframes_t pos; spin_lock(&dpcm->lock); dummy_systimer_update(dpcm); pos = dpcm->frac_pos / HZ; spin_unlock(&dpcm->lock); return pos; } static int dummy_systimer_create(struct snd_pcm_substream *substream) { struct dummy_systimer_pcm *dpcm; dpcm = kzalloc(sizeof(*dpcm), GFP_KERNEL); if (!dpcm) return -ENOMEM; substream->runtime->private_data = dpcm; timer_setup(&dpcm->timer, dummy_systimer_callback, 0); spin_lock_init(&dpcm->lock); dpcm->substream = substream; return 0; } static void dummy_systimer_free(struct snd_pcm_substream *substream) { kfree(substream->runtime->private_data); } static const struct dummy_timer_ops dummy_systimer_ops = { .create = dummy_systimer_create, .free = dummy_systimer_free, .prepare = dummy_systimer_prepare, .start = dummy_systimer_start, .stop = dummy_systimer_stop, .pointer = dummy_systimer_pointer, }; #ifdef CONFIG_HIGH_RES_TIMERS /* * hrtimer interface */ struct dummy_hrtimer_pcm { /* ops must be the first item */ const struct dummy_timer_ops *timer_ops; ktime_t base_time; ktime_t period_time; atomic_t running; struct hrtimer timer; struct snd_pcm_substream *substream; }; static enum hrtimer_restart dummy_hrtimer_callback(struct hrtimer *timer) { struct dummy_hrtimer_pcm *dpcm; dpcm = container_of(timer, struct dummy_hrtimer_pcm, timer); if (!atomic_read(&dpcm->running)) return HRTIMER_NORESTART; /* * In cases of XRUN and draining, this calls .trigger to stop PCM * substream. */ snd_pcm_period_elapsed(dpcm->substream); if (!atomic_read(&dpcm->running)) return HRTIMER_NORESTART; hrtimer_forward_now(timer, dpcm->period_time); return HRTIMER_RESTART; } static int dummy_hrtimer_start(struct snd_pcm_substream *substream) { struct dummy_hrtimer_pcm *dpcm = substream->runtime->private_data; dpcm->base_time = hrtimer_cb_get_time(&dpcm->timer); hrtimer_start(&dpcm->timer, dpcm->period_time, HRTIMER_MODE_REL_SOFT); atomic_set(&dpcm->running, 1); return 0; } static int dummy_hrtimer_stop(struct snd_pcm_substream *substream) { struct dummy_hrtimer_pcm *dpcm = substream->runtime->private_data; atomic_set(&dpcm->running, 0); if (!hrtimer_callback_running(&dpcm->timer)) hrtimer_cancel(&dpcm->timer); return 0; } static inline void dummy_hrtimer_sync(struct dummy_hrtimer_pcm *dpcm) { hrtimer_cancel(&dpcm->timer); } static snd_pcm_uframes_t dummy_hrtimer_pointer(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; struct dummy_hrtimer_pcm *dpcm = runtime->private_data; u64 delta; u32 pos; delta = ktime_us_delta(hrtimer_cb_get_time(&dpcm->timer), dpcm->base_time); delta = div_u64(delta * runtime->rate + 999999, 1000000); div_u64_rem(delta, runtime->buffer_size, &pos); return pos; } static int dummy_hrtimer_prepare(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; struct dummy_hrtimer_pcm *dpcm = runtime->private_data; unsigned int period, rate; long sec; unsigned long nsecs; dummy_hrtimer_sync(dpcm); period = runtime->period_size; rate = runtime->rate; sec = period / rate; period %= rate; nsecs = div_u64((u64)period * 1000000000UL + rate - 1, rate); dpcm->period_time = ktime_set(sec, nsecs); return 0; } static int dummy_hrtimer_create(struct snd_pcm_substream *substream) { struct dummy_hrtimer_pcm *dpcm; dpcm = kzalloc(sizeof(*dpcm), GFP_KERNEL); if (!dpcm) return -ENOMEM; substream->runtime->private_data = dpcm; hrtimer_init(&dpcm->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_SOFT); dpcm->timer.function = dummy_hrtimer_callback; dpcm->substream = substream; atomic_set(&dpcm->running, 0); return 0; } static void dummy_hrtimer_free(struct snd_pcm_substream *substream) { struct dummy_hrtimer_pcm *dpcm = substream->runtime->private_data; dummy_hrtimer_sync(dpcm); kfree(dpcm); } static const struct dummy_timer_ops dummy_hrtimer_ops = { .create = dummy_hrtimer_create, .free = dummy_hrtimer_free, .prepare = dummy_hrtimer_prepare, .start = dummy_hrtimer_start, .stop = dummy_hrtimer_stop, .pointer = dummy_hrtimer_pointer, }; #endif /* CONFIG_HIGH_RES_TIMERS */ /* * PCM interface */ static int dummy_pcm_trigger(struct snd_pcm_substream *substream, int cmd) { switch (cmd) { case SNDRV_PCM_TRIGGER_START: case SNDRV_PCM_TRIGGER_RESUME: return get_dummy_ops(substream)->start(substream); case SNDRV_PCM_TRIGGER_STOP: case SNDRV_PCM_TRIGGER_SUSPEND: return get_dummy_ops(substream)->stop(substream); } return -EINVAL; } static int dummy_pcm_prepare(struct snd_pcm_substream *substream) { return get_dummy_ops(substream)->prepare(substream); } static snd_pcm_uframes_t dummy_pcm_pointer(struct snd_pcm_substream *substream) { return get_dummy_ops(substream)->pointer(substream); } static const struct snd_pcm_hardware dummy_pcm_hardware = { .info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_RESUME | SNDRV_PCM_INFO_MMAP_VALID), .formats = USE_FORMATS, .rates = USE_RATE, .rate_min = USE_RATE_MIN, .rate_max = USE_RATE_MAX, .channels_min = USE_CHANNELS_MIN, .channels_max = USE_CHANNELS_MAX, .buffer_bytes_max = MAX_BUFFER_SIZE, .period_bytes_min = MIN_PERIOD_SIZE, .period_bytes_max = MAX_PERIOD_SIZE, .periods_min = USE_PERIODS_MIN, .periods_max = USE_PERIODS_MAX, .fifo_size = 0, }; static int dummy_pcm_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *hw_params) { if (fake_buffer) { /* runtime->dma_bytes has to be set manually to allow mmap */ substream->runtime->dma_bytes = params_buffer_bytes(hw_params); return 0; } return 0; } static int dummy_pcm_open(struct snd_pcm_substream *substream) { struct snd_dummy *dummy = snd_pcm_substream_chip(substream); const struct dummy_model *model = dummy->model; struct snd_pcm_runtime *runtime = substream->runtime; const struct dummy_timer_ops *ops; int err; ops = &dummy_systimer_ops; #ifdef CONFIG_HIGH_RES_TIMERS if (hrtimer) ops = &dummy_hrtimer_ops; #endif err = ops->create(substream); if (err < 0) return err; get_dummy_ops(substream) = ops; runtime->hw = dummy->pcm_hw; if (substream->pcm->device & 1) { runtime->hw.info &= ~SNDRV_PCM_INFO_INTERLEAVED; runtime->hw.info |= SNDRV_PCM_INFO_NONINTERLEAVED; } if (substream->pcm->device & 2) runtime->hw.info &= ~(SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_MMAP_VALID); if (model == NULL) return 0; if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { if (model->playback_constraints) err = model->playback_constraints(substream->runtime); } else { if (model->capture_constraints) err = model->capture_constraints(substream->runtime); } if (err < 0) { get_dummy_ops(substream)->free(substream); return err; } return 0; } static int dummy_pcm_close(struct snd_pcm_substream *substream) { get_dummy_ops(substream)->free(substream); return 0; } /* * dummy buffer handling */ static void *dummy_page[2]; static void free_fake_buffer(void) { if (fake_buffer) { int i; for (i = 0; i < 2; i++) if (dummy_page[i]) { free_page((unsigned long)dummy_page[i]); dummy_page[i] = NULL; } } } static int alloc_fake_buffer(void) { int i; if (!fake_buffer) return 0; for (i = 0; i < 2; i++) { dummy_page[i] = (void *)get_zeroed_page(GFP_KERNEL); if (!dummy_page[i]) { free_fake_buffer(); return -ENOMEM; } } return 0; } static int dummy_pcm_copy(struct snd_pcm_substream *substream, int channel, unsigned long pos, struct iov_iter *iter, unsigned long bytes) { return 0; /* do nothing */ } static int dummy_pcm_silence(struct snd_pcm_substream *substream, int channel, unsigned long pos, unsigned long bytes) { return 0; /* do nothing */ } static struct page *dummy_pcm_page(struct snd_pcm_substream *substream, unsigned long offset) { return virt_to_page(dummy_page[substream->stream]); /* the same page */ } static const struct snd_pcm_ops dummy_pcm_ops = { .open = dummy_pcm_open, .close = dummy_pcm_close, .hw_params = dummy_pcm_hw_params, .prepare = dummy_pcm_prepare, .trigger = dummy_pcm_trigger, .pointer = dummy_pcm_pointer, }; static const struct snd_pcm_ops dummy_pcm_ops_no_buf = { .open = dummy_pcm_open, .close = dummy_pcm_close, .hw_params = dummy_pcm_hw_params, .prepare = dummy_pcm_prepare, .trigger = dummy_pcm_trigger, .pointer = dummy_pcm_pointer, .copy = dummy_pcm_copy, .fill_silence = dummy_pcm_silence, .page = dummy_pcm_page, }; static int snd_card_dummy_pcm(struct snd_dummy *dummy, int device, int substreams) { struct snd_pcm *pcm; const struct snd_pcm_ops *ops; int err; err = snd_pcm_new(dummy->card, "Dummy PCM", device, substreams, substreams, &pcm); if (err < 0) return err; dummy->pcm = pcm; if (fake_buffer) ops = &dummy_pcm_ops_no_buf; else ops = &dummy_pcm_ops; snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, ops); snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, ops); pcm->private_data = dummy; pcm->info_flags = 0; strcpy(pcm->name, "Dummy PCM"); if (!fake_buffer) { snd_pcm_set_managed_buffer_all(pcm, SNDRV_DMA_TYPE_CONTINUOUS, NULL, 0, 64*1024); } return 0; } /* * mixer interface */ #define DUMMY_VOLUME(xname, xindex, addr) \ { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE | SNDRV_CTL_ELEM_ACCESS_TLV_READ, \ .name = xname, .index = xindex, \ .info = snd_dummy_volume_info, \ .get = snd_dummy_volume_get, .put = snd_dummy_volume_put, \ .private_value = addr, \ .tlv = { .p = db_scale_dummy } } static int snd_dummy_volume_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; uinfo->count = 2; uinfo->value.integer.min = mixer_volume_level_min; uinfo->value.integer.max = mixer_volume_level_max; return 0; } static int snd_dummy_volume_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_dummy *dummy = snd_kcontrol_chip(kcontrol); int addr = kcontrol->private_value; spin_lock_irq(&dummy->mixer_lock); ucontrol->value.integer.value[0] = dummy->mixer_volume[addr][0]; ucontrol->value.integer.value[1] = dummy->mixer_volume[addr][1]; spin_unlock_irq(&dummy->mixer_lock); return 0; } static int snd_dummy_volume_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_dummy *dummy = snd_kcontrol_chip(kcontrol); int change, addr = kcontrol->private_value; int left, right; left = ucontrol->value.integer.value[0]; if (left < mixer_volume_level_min) left = mixer_volume_level_min; if (left > mixer_volume_level_max) left = mixer_volume_level_max; right = ucontrol->value.integer.value[1]; if (right < mixer_volume_level_min) right = mixer_volume_level_min; if (right > mixer_volume_level_max) right = mixer_volume_level_max; spin_lock_irq(&dummy->mixer_lock); change = dummy->mixer_volume[addr][0] != left || dummy->mixer_volume[addr][1] != right; dummy->mixer_volume[addr][0] = left; dummy->mixer_volume[addr][1] = right; spin_unlock_irq(&dummy->mixer_lock); return change; } static const DECLARE_TLV_DB_SCALE(db_scale_dummy, -4500, 30, 0); #define DUMMY_CAPSRC(xname, xindex, addr) \ { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, .index = xindex, \ .info = snd_dummy_capsrc_info, \ .get = snd_dummy_capsrc_get, .put = snd_dummy_capsrc_put, \ .private_value = addr } #define snd_dummy_capsrc_info snd_ctl_boolean_stereo_info static int snd_dummy_capsrc_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_dummy *dummy = snd_kcontrol_chip(kcontrol); int addr = kcontrol->private_value; spin_lock_irq(&dummy->mixer_lock); ucontrol->value.integer.value[0] = dummy->capture_source[addr][0]; ucontrol->value.integer.value[1] = dummy->capture_source[addr][1]; spin_unlock_irq(&dummy->mixer_lock); return 0; } static int snd_dummy_capsrc_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_dummy *dummy = snd_kcontrol_chip(kcontrol); int change, addr = kcontrol->private_value; int left, right; left = ucontrol->value.integer.value[0] & 1; right = ucontrol->value.integer.value[1] & 1; spin_lock_irq(&dummy->mixer_lock); change = dummy->capture_source[addr][0] != left && dummy->capture_source[addr][1] != right; dummy->capture_source[addr][0] = left; dummy->capture_source[addr][1] = right; spin_unlock_irq(&dummy->mixer_lock); return change; } static int snd_dummy_iobox_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *info) { static const char *const names[] = { "None", "CD Player" }; return snd_ctl_enum_info(info, 1, 2, names); } static int snd_dummy_iobox_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *value) { struct snd_dummy *dummy = snd_kcontrol_chip(kcontrol); value->value.enumerated.item[0] = dummy->iobox; return 0; } static int snd_dummy_iobox_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *value) { struct snd_dummy *dummy = snd_kcontrol_chip(kcontrol); int changed; if (value->value.enumerated.item[0] > 1) return -EINVAL; changed = value->value.enumerated.item[0] != dummy->iobox; if (changed) { dummy->iobox = value->value.enumerated.item[0]; if (dummy->iobox) { dummy->cd_volume_ctl->vd[0].access &= ~SNDRV_CTL_ELEM_ACCESS_INACTIVE; dummy->cd_switch_ctl->vd[0].access &= ~SNDRV_CTL_ELEM_ACCESS_INACTIVE; } else { dummy->cd_volume_ctl->vd[0].access |= SNDRV_CTL_ELEM_ACCESS_INACTIVE; dummy->cd_switch_ctl->vd[0].access |= SNDRV_CTL_ELEM_ACCESS_INACTIVE; } snd_ctl_notify(dummy->card, SNDRV_CTL_EVENT_MASK_INFO, &dummy->cd_volume_ctl->id); snd_ctl_notify(dummy->card, SNDRV_CTL_EVENT_MASK_INFO, &dummy->cd_switch_ctl->id); } return changed; } static const struct snd_kcontrol_new snd_dummy_controls[] = { DUMMY_VOLUME("Master Volume", 0, MIXER_ADDR_MASTER), DUMMY_CAPSRC("Master Capture Switch", 0, MIXER_ADDR_MASTER), DUMMY_VOLUME("Synth Volume", 0, MIXER_ADDR_SYNTH), DUMMY_CAPSRC("Synth Capture Switch", 0, MIXER_ADDR_SYNTH), DUMMY_VOLUME("Line Volume", 0, MIXER_ADDR_LINE), DUMMY_CAPSRC("Line Capture Switch", 0, MIXER_ADDR_LINE), DUMMY_VOLUME("Mic Volume", 0, MIXER_ADDR_MIC), DUMMY_CAPSRC("Mic Capture Switch", 0, MIXER_ADDR_MIC), DUMMY_VOLUME("CD Volume", 0, MIXER_ADDR_CD), DUMMY_CAPSRC("CD Capture Switch", 0, MIXER_ADDR_CD), { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "External I/O Box", .info = snd_dummy_iobox_info, .get = snd_dummy_iobox_get, .put = snd_dummy_iobox_put, }, }; static int snd_card_dummy_new_mixer(struct snd_dummy *dummy) { struct snd_card *card = dummy->card; struct snd_kcontrol *kcontrol; unsigned int idx; int err; spin_lock_init(&dummy->mixer_lock); strcpy(card->mixername, "Dummy Mixer"); dummy->iobox = 1; for (idx = 0; idx < ARRAY_SIZE(snd_dummy_controls); idx++) { kcontrol = snd_ctl_new1(&snd_dummy_controls[idx], dummy); err = snd_ctl_add(card, kcontrol); if (err < 0) return err; if (!strcmp(kcontrol->id.name, "CD Volume")) dummy->cd_volume_ctl = kcontrol; else if (!strcmp(kcontrol->id.name, "CD Capture Switch")) dummy->cd_switch_ctl = kcontrol; } return 0; } #if defined(CONFIG_SND_DEBUG) && defined(CONFIG_SND_PROC_FS) /* * proc interface */ static void print_formats(struct snd_dummy *dummy, struct snd_info_buffer *buffer) { snd_pcm_format_t i; pcm_for_each_format(i) { if (dummy->pcm_hw.formats & pcm_format_to_bits(i)) snd_iprintf(buffer, " %s", snd_pcm_format_name(i)); } } static void print_rates(struct snd_dummy *dummy, struct snd_info_buffer *buffer) { static const int rates[] = { 5512, 8000, 11025, 16000, 22050, 32000, 44100, 48000, 64000, 88200, 96000, 176400, 192000, }; int i; if (dummy->pcm_hw.rates & SNDRV_PCM_RATE_CONTINUOUS) snd_iprintf(buffer, " continuous"); if (dummy->pcm_hw.rates & SNDRV_PCM_RATE_KNOT) snd_iprintf(buffer, " knot"); for (i = 0; i < ARRAY_SIZE(rates); i++) if (dummy->pcm_hw.rates & (1 << i)) snd_iprintf(buffer, " %d", rates[i]); } #define get_dummy_int_ptr(dummy, ofs) \ (unsigned int *)((char *)&((dummy)->pcm_hw) + (ofs)) #define get_dummy_ll_ptr(dummy, ofs) \ (unsigned long long *)((char *)&((dummy)->pcm_hw) + (ofs)) struct dummy_hw_field { const char *name; const char *format; unsigned int offset; unsigned int size; }; #define FIELD_ENTRY(item, fmt) { \ .name = #item, \ .format = fmt, \ .offset = offsetof(struct snd_pcm_hardware, item), \ .size = sizeof(dummy_pcm_hardware.item) } static const struct dummy_hw_field fields[] = { FIELD_ENTRY(formats, "%#llx"), FIELD_ENTRY(rates, "%#x"), FIELD_ENTRY(rate_min, "%d"), FIELD_ENTRY(rate_max, "%d"), FIELD_ENTRY(channels_min, "%d"), FIELD_ENTRY(channels_max, "%d"), FIELD_ENTRY(buffer_bytes_max, "%ld"), FIELD_ENTRY(period_bytes_min, "%ld"), FIELD_ENTRY(period_bytes_max, "%ld"), FIELD_ENTRY(periods_min, "%d"), FIELD_ENTRY(periods_max, "%d"), }; static void dummy_proc_read(struct snd_info_entry *entry, struct snd_info_buffer *buffer) { struct snd_dummy *dummy = entry->private_data; int i; for (i = 0; i < ARRAY_SIZE(fields); i++) { snd_iprintf(buffer, "%s ", fields[i].name); if (fields[i].size == sizeof(int)) snd_iprintf(buffer, fields[i].format, *get_dummy_int_ptr(dummy, fields[i].offset)); else snd_iprintf(buffer, fields[i].format, *get_dummy_ll_ptr(dummy, fields[i].offset)); if (!strcmp(fields[i].name, "formats")) print_formats(dummy, buffer); else if (!strcmp(fields[i].name, "rates")) print_rates(dummy, buffer); snd_iprintf(buffer, "\n"); } } static void dummy_proc_write(struct snd_info_entry *entry, struct snd_info_buffer *buffer) { struct snd_dummy *dummy = entry->private_data; char line[64]; while (!snd_info_get_line(buffer, line, sizeof(line))) { char item[20]; const char *ptr; unsigned long long val; int i; ptr = snd_info_get_str(item, line, sizeof(item)); for (i = 0; i < ARRAY_SIZE(fields); i++) { if (!strcmp(item, fields[i].name)) break; } if (i >= ARRAY_SIZE(fields)) continue; snd_info_get_str(item, ptr, sizeof(item)); if (kstrtoull(item, 0, &val)) continue; if (fields[i].size == sizeof(int)) *get_dummy_int_ptr(dummy, fields[i].offset) = val; else *get_dummy_ll_ptr(dummy, fields[i].offset) = val; } } static void dummy_proc_init(struct snd_dummy *chip) { snd_card_rw_proc_new(chip->card, "dummy_pcm", chip, dummy_proc_read, dummy_proc_write); } #else #define dummy_proc_init(x) #endif /* CONFIG_SND_DEBUG && CONFIG_SND_PROC_FS */ static int snd_dummy_probe(struct platform_device *devptr) { struct snd_card *card; struct snd_dummy *dummy; const struct dummy_model *m = NULL, **mdl; int idx, err; int dev = devptr->id; err = snd_devm_card_new(&devptr->dev, index[dev], id[dev], THIS_MODULE, sizeof(struct snd_dummy), &card); if (err < 0) return err; dummy = card->private_data; dummy->card = card; for (mdl = dummy_models; *mdl && model[dev]; mdl++) { if (strcmp(model[dev], (*mdl)->name) == 0) { pr_info("snd-dummy: Using model '%s' for card %i\n", (*mdl)->name, card->number); m = dummy->model = *mdl; break; } } for (idx = 0; idx < MAX_PCM_DEVICES && idx < pcm_devs[dev]; idx++) { if (pcm_substreams[dev] < 1) pcm_substreams[dev] = 1; if (pcm_substreams[dev] > MAX_PCM_SUBSTREAMS) pcm_substreams[dev] = MAX_PCM_SUBSTREAMS; err = snd_card_dummy_pcm(dummy, idx, pcm_substreams[dev]); if (err < 0) return err; } dummy->pcm_hw = dummy_pcm_hardware; if (m) { if (m->formats) dummy->pcm_hw.formats = m->formats; if (m->buffer_bytes_max) dummy->pcm_hw.buffer_bytes_max = m->buffer_bytes_max; if (m->period_bytes_min) dummy->pcm_hw.period_bytes_min = m->period_bytes_min; if (m->period_bytes_max) dummy->pcm_hw.period_bytes_max = m->period_bytes_max; if (m->periods_min) dummy->pcm_hw.periods_min = m->periods_min; if (m->periods_max) dummy->pcm_hw.periods_max = m->periods_max; if (m->rates) dummy->pcm_hw.rates = m->rates; if (m->rate_min) dummy->pcm_hw.rate_min = m->rate_min; if (m->rate_max) dummy->pcm_hw.rate_max = m->rate_max; if (m->channels_min) dummy->pcm_hw.channels_min = m->channels_min; if (m->channels_max) dummy->pcm_hw.channels_max = m->channels_max; } if (mixer_volume_level_min > mixer_volume_level_max) { pr_warn("snd-dummy: Invalid mixer volume level: min=%d, max=%d. Fall back to default value.\n", mixer_volume_level_min, mixer_volume_level_max); mixer_volume_level_min = USE_MIXER_VOLUME_LEVEL_MIN; mixer_volume_level_max = USE_MIXER_VOLUME_LEVEL_MAX; } err = snd_card_dummy_new_mixer(dummy); if (err < 0) return err; strcpy(card->driver, "Dummy"); strcpy(card->shortname, "Dummy"); sprintf(card->longname, "Dummy %i", dev + 1); dummy_proc_init(dummy); err = snd_card_register(card); if (err < 0) return err; platform_set_drvdata(devptr, card); return 0; } static int snd_dummy_suspend(struct device *pdev) { struct snd_card *card = dev_get_drvdata(pdev); snd_power_change_state(card, SNDRV_CTL_POWER_D3hot); return 0; } static int snd_dummy_resume(struct device *pdev) { struct snd_card *card = dev_get_drvdata(pdev); snd_power_change_state(card, SNDRV_CTL_POWER_D0); return 0; } static DEFINE_SIMPLE_DEV_PM_OPS(snd_dummy_pm, snd_dummy_suspend, snd_dummy_resume); #define SND_DUMMY_DRIVER "snd_dummy" static struct platform_driver snd_dummy_driver = { .probe = snd_dummy_probe, .driver = { .name = SND_DUMMY_DRIVER, .pm = &snd_dummy_pm, }, }; static void snd_dummy_unregister_all(void) { int i; for (i = 0; i < ARRAY_SIZE(devices); ++i) platform_device_unregister(devices[i]); platform_driver_unregister(&snd_dummy_driver); free_fake_buffer(); } static int __init alsa_card_dummy_init(void) { int i, cards, err; err = platform_driver_register(&snd_dummy_driver); if (err < 0) return err; err = alloc_fake_buffer(); if (err < 0) { platform_driver_unregister(&snd_dummy_driver); return err; } cards = 0; for (i = 0; i < SNDRV_CARDS; i++) { struct platform_device *device; if (! enable[i]) continue; device = platform_device_register_simple(SND_DUMMY_DRIVER, i, NULL, 0); if (IS_ERR(device)) continue; if (!platform_get_drvdata(device)) { platform_device_unregister(device); continue; } devices[i] = device; cards++; } if (!cards) { #ifdef MODULE pr_err("Dummy soundcard not found or device busy\n"); #endif snd_dummy_unregister_all(); return -ENODEV; } return 0; } static void __exit alsa_card_dummy_exit(void) { snd_dummy_unregister_all(); } module_init(alsa_card_dummy_init) module_exit(alsa_card_dummy_exit)
2 2 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 // SPDX-License-Identifier: GPL-2.0-only /* * ebt_arp * * Authors: * Bart De Schuymer <bdschuym@pandora.be> * Tim Gardner <timg@tpi.com> * * April, 2002 * */ #include <linux/if_arp.h> #include <linux/if_ether.h> #include <linux/module.h> #include <linux/netfilter/x_tables.h> #include <linux/netfilter_bridge/ebtables.h> #include <linux/netfilter_bridge/ebt_arp.h> static bool ebt_arp_mt(const struct sk_buff *skb, struct xt_action_param *par) { const struct ebt_arp_info *info = par->matchinfo; const struct arphdr *ah; struct arphdr _arph; ah = skb_header_pointer(skb, 0, sizeof(_arph), &_arph); if (ah == NULL) return false; if ((info->bitmask & EBT_ARP_OPCODE) && NF_INVF(info, EBT_ARP_OPCODE, info->opcode != ah->ar_op)) return false; if ((info->bitmask & EBT_ARP_HTYPE) && NF_INVF(info, EBT_ARP_HTYPE, info->htype != ah->ar_hrd)) return false; if ((info->bitmask & EBT_ARP_PTYPE) && NF_INVF(info, EBT_ARP_PTYPE, info->ptype != ah->ar_pro)) return false; if (info->bitmask & (EBT_ARP_SRC_IP | EBT_ARP_DST_IP | EBT_ARP_GRAT)) { const __be32 *sap, *dap; __be32 saddr, daddr; if (ah->ar_pln != sizeof(__be32) || ah->ar_pro != htons(ETH_P_IP)) return false; sap = skb_header_pointer(skb, sizeof(struct arphdr) + ah->ar_hln, sizeof(saddr), &saddr); if (sap == NULL) return false; dap = skb_header_pointer(skb, sizeof(struct arphdr) + 2*ah->ar_hln+sizeof(saddr), sizeof(daddr), &daddr); if (dap == NULL) return false; if ((info->bitmask & EBT_ARP_SRC_IP) && NF_INVF(info, EBT_ARP_SRC_IP, info->saddr != (*sap & info->smsk))) return false; if ((info->bitmask & EBT_ARP_DST_IP) && NF_INVF(info, EBT_ARP_DST_IP, info->daddr != (*dap & info->dmsk))) return false; if ((info->bitmask & EBT_ARP_GRAT) && NF_INVF(info, EBT_ARP_GRAT, *dap != *sap)) return false; } if (info->bitmask & (EBT_ARP_SRC_MAC | EBT_ARP_DST_MAC)) { const unsigned char *mp; unsigned char _mac[ETH_ALEN]; if (ah->ar_hln != ETH_ALEN || ah->ar_hrd != htons(ARPHRD_ETHER)) return false; if (info->bitmask & EBT_ARP_SRC_MAC) { mp = skb_header_pointer(skb, sizeof(struct arphdr), sizeof(_mac), &_mac); if (mp == NULL) return false; if (NF_INVF(info, EBT_ARP_SRC_MAC, !ether_addr_equal_masked(mp, info->smaddr, info->smmsk))) return false; } if (info->bitmask & EBT_ARP_DST_MAC) { mp = skb_header_pointer(skb, sizeof(struct arphdr) + ah->ar_hln + ah->ar_pln, sizeof(_mac), &_mac); if (mp == NULL) return false; if (NF_INVF(info, EBT_ARP_DST_MAC, !ether_addr_equal_masked(mp, info->dmaddr, info->dmmsk))) return false; } } return true; } static int ebt_arp_mt_check(const struct xt_mtchk_param *par) { const struct ebt_arp_info *info = par->matchinfo; const struct ebt_entry *e = par->entryinfo; if ((e->ethproto != htons(ETH_P_ARP) && e->ethproto != htons(ETH_P_RARP)) || e->invflags & EBT_IPROTO) return -EINVAL; if (info->bitmask & ~EBT_ARP_MASK || info->invflags & ~EBT_ARP_MASK) return -EINVAL; return 0; } static struct xt_match ebt_arp_mt_reg __read_mostly = { .name = "arp", .revision = 0, .family = NFPROTO_BRIDGE, .match = ebt_arp_mt, .checkentry = ebt_arp_mt_check, .matchsize = sizeof(struct ebt_arp_info), .me = THIS_MODULE, }; static int __init ebt_arp_init(void) { return xt_register_match(&ebt_arp_mt_reg); } static void __exit ebt_arp_fini(void) { xt_unregister_match(&ebt_arp_mt_reg); } module_init(ebt_arp_init); module_exit(ebt_arp_fini); MODULE_DESCRIPTION("Ebtables: ARP protocol packet match"); MODULE_LICENSE("GPL");
44 94 83 38 85 66 41 41 61 66 84 38 50 50 50 72 111 21 22 45 24 22 14 1 14 7 2 6 95 73 95 77 95 63 7 73 82 72 48 48 48 48 47 48 46 2 48 78 79 63 45 13 13 13 13 13 13 13 13 13 37 56 37 72 71 50 63 17 54 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 // SPDX-License-Identifier: GPL-2.0 #include "misc.h" #include "ctree.h" #include "block-rsv.h" #include "space-info.h" #include "transaction.h" #include "block-group.h" #include "fs.h" #include "accessors.h" /* * HOW DO BLOCK RESERVES WORK * * Think of block_rsv's as buckets for logically grouped metadata * reservations. Each block_rsv has a ->size and a ->reserved. ->size is * how large we want our block rsv to be, ->reserved is how much space is * currently reserved for this block reserve. * * ->failfast exists for the truncate case, and is described below. * * NORMAL OPERATION * * -> Reserve * Entrance: btrfs_block_rsv_add, btrfs_block_rsv_refill * * We call into btrfs_reserve_metadata_bytes() with our bytes, which is * accounted for in space_info->bytes_may_use, and then add the bytes to * ->reserved, and ->size in the case of btrfs_block_rsv_add. * * ->size is an over-estimation of how much we may use for a particular * operation. * * -> Use * Entrance: btrfs_use_block_rsv * * When we do a btrfs_alloc_tree_block() we call into btrfs_use_block_rsv() * to determine the appropriate block_rsv to use, and then verify that * ->reserved has enough space for our tree block allocation. Once * successful we subtract fs_info->nodesize from ->reserved. * * -> Finish * Entrance: btrfs_block_rsv_release * * We are finished with our operation, subtract our individual reservation * from ->size, and then subtract ->size from ->reserved and free up the * excess if there is any. * * There is some logic here to refill the delayed refs rsv or the global rsv * as needed, otherwise the excess is subtracted from * space_info->bytes_may_use. * * TYPES OF BLOCK RESERVES * * BLOCK_RSV_TRANS, BLOCK_RSV_DELOPS, BLOCK_RSV_CHUNK * These behave normally, as described above, just within the confines of the * lifetime of their particular operation (transaction for the whole trans * handle lifetime, for example). * * BLOCK_RSV_GLOBAL * It is impossible to properly account for all the space that may be required * to make our extent tree updates. This block reserve acts as an overflow * buffer in case our delayed refs reserve does not reserve enough space to * update the extent tree. * * We can steal from this in some cases as well, notably on evict() or * truncate() in order to help users recover from ENOSPC conditions. * * BLOCK_RSV_DELALLOC * The individual item sizes are determined by the per-inode size * calculations, which are described with the delalloc code. This is pretty * straightforward, it's just the calculation of ->size encodes a lot of * different items, and thus it gets used when updating inodes, inserting file * extents, and inserting checksums. * * BLOCK_RSV_DELREFS * We keep a running tally of how many delayed refs we have on the system. * We assume each one of these delayed refs are going to use a full * reservation. We use the transaction items and pre-reserve space for every * operation, and use this reservation to refill any gap between ->size and * ->reserved that may exist. * * From there it's straightforward, removing a delayed ref means we remove its * count from ->size and free up reservations as necessary. Since this is * the most dynamic block reserve in the system, we will try to refill this * block reserve first with any excess returned by any other block reserve. * * BLOCK_RSV_EMPTY * This is the fallback block reserve to make us try to reserve space if we * don't have a specific bucket for this allocation. It is mostly used for * updating the device tree and such, since that is a separate pool we're * content to just reserve space from the space_info on demand. * * BLOCK_RSV_TEMP * This is used by things like truncate and iput. We will temporarily * allocate a block reserve, set it to some size, and then truncate bytes * until we have no space left. With ->failfast set we'll simply return * ENOSPC from btrfs_use_block_rsv() to signal that we need to unwind and try * to make a new reservation. This is because these operations are * unbounded, so we want to do as much work as we can, and then back off and * re-reserve. */ static u64 block_rsv_release_bytes(struct btrfs_fs_info *fs_info, struct btrfs_block_rsv *block_rsv, struct btrfs_block_rsv *dest, u64 num_bytes, u64 *qgroup_to_release_ret) { struct btrfs_space_info *space_info = block_rsv->space_info; u64 qgroup_to_release = 0; u64 ret; spin_lock(&block_rsv->lock); if (num_bytes == (u64)-1) { num_bytes = block_rsv->size; qgroup_to_release = block_rsv->qgroup_rsv_size; } block_rsv->size -= num_bytes; if (block_rsv->reserved >= block_rsv->size) { num_bytes = block_rsv->reserved - block_rsv->size; block_rsv->reserved = block_rsv->size; block_rsv->full = true; } else { num_bytes = 0; } if (qgroup_to_release_ret && block_rsv->qgroup_rsv_reserved >= block_rsv->qgroup_rsv_size) { qgroup_to_release = block_rsv->qgroup_rsv_reserved - block_rsv->qgroup_rsv_size; block_rsv->qgroup_rsv_reserved = block_rsv->qgroup_rsv_size; } else { qgroup_to_release = 0; } spin_unlock(&block_rsv->lock); ret = num_bytes; if (num_bytes > 0) { if (dest) { spin_lock(&dest->lock); if (!dest->full) { u64 bytes_to_add; bytes_to_add = dest->size - dest->reserved; bytes_to_add = min(num_bytes, bytes_to_add); dest->reserved += bytes_to_add; if (dest->reserved >= dest->size) dest->full = true; num_bytes -= bytes_to_add; } spin_unlock(&dest->lock); } if (num_bytes) btrfs_space_info_free_bytes_may_use(fs_info, space_info, num_bytes); } if (qgroup_to_release_ret) *qgroup_to_release_ret = qgroup_to_release; return ret; } int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src, struct btrfs_block_rsv *dst, u64 num_bytes, bool update_size) { int ret; ret = btrfs_block_rsv_use_bytes(src, num_bytes); if (ret) return ret; btrfs_block_rsv_add_bytes(dst, num_bytes, update_size); return 0; } void btrfs_init_block_rsv(struct btrfs_block_rsv *rsv, enum btrfs_rsv_type type) { memset(rsv, 0, sizeof(*rsv)); spin_lock_init(&rsv->lock); rsv->type = type; } void btrfs_init_metadata_block_rsv(struct btrfs_fs_info *fs_info, struct btrfs_block_rsv *rsv, enum btrfs_rsv_type type) { btrfs_init_block_rsv(rsv, type); rsv->space_info = btrfs_find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA); } struct btrfs_block_rsv *btrfs_alloc_block_rsv(struct btrfs_fs_info *fs_info, enum btrfs_rsv_type type) { struct btrfs_block_rsv *block_rsv; block_rsv = kmalloc(sizeof(*block_rsv), GFP_NOFS); if (!block_rsv) return NULL; btrfs_init_metadata_block_rsv(fs_info, block_rsv, type); return block_rsv; } void btrfs_free_block_rsv(struct btrfs_fs_info *fs_info, struct btrfs_block_rsv *rsv) { if (!rsv) return; btrfs_block_rsv_release(fs_info, rsv, (u64)-1, NULL); kfree(rsv); } int btrfs_block_rsv_add(struct btrfs_fs_info *fs_info, struct btrfs_block_rsv *block_rsv, u64 num_bytes, enum btrfs_reserve_flush_enum flush) { int ret; if (num_bytes == 0) return 0; ret = btrfs_reserve_metadata_bytes(fs_info, block_rsv->space_info, num_bytes, flush); if (!ret) btrfs_block_rsv_add_bytes(block_rsv, num_bytes, true); return ret; } int btrfs_block_rsv_check(struct btrfs_block_rsv *block_rsv, int min_percent) { u64 num_bytes = 0; int ret = -ENOSPC; spin_lock(&block_rsv->lock); num_bytes = mult_perc(block_rsv->size, min_percent); if (block_rsv->reserved >= num_bytes) ret = 0; spin_unlock(&block_rsv->lock); return ret; } int btrfs_block_rsv_refill(struct btrfs_fs_info *fs_info, struct btrfs_block_rsv *block_rsv, u64 num_bytes, enum btrfs_reserve_flush_enum flush) { int ret = -ENOSPC; if (!block_rsv) return 0; spin_lock(&block_rsv->lock); if (block_rsv->reserved >= num_bytes) ret = 0; else num_bytes -= block_rsv->reserved; spin_unlock(&block_rsv->lock); if (!ret) return 0; ret = btrfs_reserve_metadata_bytes(fs_info, block_rsv->space_info, num_bytes, flush); if (!ret) { btrfs_block_rsv_add_bytes(block_rsv, num_bytes, false); return 0; } return ret; } u64 btrfs_block_rsv_release(struct btrfs_fs_info *fs_info, struct btrfs_block_rsv *block_rsv, u64 num_bytes, u64 *qgroup_to_release) { struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv; struct btrfs_block_rsv *delayed_rsv = &fs_info->delayed_refs_rsv; struct btrfs_block_rsv *target = NULL; /* * If we are a delayed block reserve then push to the global rsv, * otherwise dump into the global delayed reserve if it is not full. */ if (block_rsv->type == BTRFS_BLOCK_RSV_DELOPS) target = global_rsv; else if (block_rsv != global_rsv && !btrfs_block_rsv_full(delayed_rsv)) target = delayed_rsv; if (target && block_rsv->space_info != target->space_info) target = NULL; return block_rsv_release_bytes(fs_info, block_rsv, target, num_bytes, qgroup_to_release); } int btrfs_block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv, u64 num_bytes) { int ret = -ENOSPC; spin_lock(&block_rsv->lock); if (block_rsv->reserved >= num_bytes) { block_rsv->reserved -= num_bytes; if (block_rsv->reserved < block_rsv->size) block_rsv->full = false; ret = 0; } spin_unlock(&block_rsv->lock); return ret; } void btrfs_block_rsv_add_bytes(struct btrfs_block_rsv *block_rsv, u64 num_bytes, bool update_size) { spin_lock(&block_rsv->lock); block_rsv->reserved += num_bytes; if (update_size) block_rsv->size += num_bytes; else if (block_rsv->reserved >= block_rsv->size) block_rsv->full = true; spin_unlock(&block_rsv->lock); } void btrfs_update_global_block_rsv(struct btrfs_fs_info *fs_info) { struct btrfs_block_rsv *block_rsv = &fs_info->global_block_rsv; struct btrfs_space_info *sinfo = block_rsv->space_info; struct btrfs_root *root, *tmp; u64 num_bytes = btrfs_root_used(&fs_info->tree_root->root_item); unsigned int min_items = 1; /* * The global block rsv is based on the size of the extent tree, the * checksum tree and the root tree. If the fs is empty we want to set * it to a minimal amount for safety. * * We also are going to need to modify the minimum of the tree root and * any global roots we could touch. */ read_lock(&fs_info->global_root_lock); rbtree_postorder_for_each_entry_safe(root, tmp, &fs_info->global_root_tree, rb_node) { if (btrfs_root_id(root) == BTRFS_EXTENT_TREE_OBJECTID || btrfs_root_id(root) == BTRFS_CSUM_TREE_OBJECTID || btrfs_root_id(root) == BTRFS_FREE_SPACE_TREE_OBJECTID) { num_bytes += btrfs_root_used(&root->root_item); min_items++; } } read_unlock(&fs_info->global_root_lock); if (btrfs_fs_compat_ro(fs_info, BLOCK_GROUP_TREE)) { num_bytes += btrfs_root_used(&fs_info->block_group_root->root_item); min_items++; } if (btrfs_fs_incompat(fs_info, RAID_STRIPE_TREE)) { num_bytes += btrfs_root_used(&fs_info->stripe_root->root_item); min_items++; } /* * But we also want to reserve enough space so we can do the fallback * global reserve for an unlink, which is an additional * BTRFS_UNLINK_METADATA_UNITS items. * * But we also need space for the delayed ref updates from the unlink, * so add BTRFS_UNLINK_METADATA_UNITS units for delayed refs, one for * each unlink metadata item. */ min_items += BTRFS_UNLINK_METADATA_UNITS; num_bytes = max_t(u64, num_bytes, btrfs_calc_insert_metadata_size(fs_info, min_items) + btrfs_calc_delayed_ref_bytes(fs_info, BTRFS_UNLINK_METADATA_UNITS)); spin_lock(&sinfo->lock); spin_lock(&block_rsv->lock); block_rsv->size = min_t(u64, num_bytes, SZ_512M); if (block_rsv->reserved < block_rsv->size) { num_bytes = block_rsv->size - block_rsv->reserved; btrfs_space_info_update_bytes_may_use(fs_info, sinfo, num_bytes); block_rsv->reserved = block_rsv->size; } else if (block_rsv->reserved > block_rsv->size) { num_bytes = block_rsv->reserved - block_rsv->size; btrfs_space_info_update_bytes_may_use(fs_info, sinfo, -num_bytes); block_rsv->reserved = block_rsv->size; btrfs_try_granting_tickets(fs_info, sinfo); } block_rsv->full = (block_rsv->reserved == block_rsv->size); if (block_rsv->size >= sinfo->total_bytes) sinfo->force_alloc = CHUNK_ALLOC_FORCE; spin_unlock(&block_rsv->lock); spin_unlock(&sinfo->lock); } void btrfs_init_root_block_rsv(struct btrfs_root *root) { struct btrfs_fs_info *fs_info = root->fs_info; switch (btrfs_root_id(root)) { case BTRFS_CSUM_TREE_OBJECTID: case BTRFS_EXTENT_TREE_OBJECTID: case BTRFS_FREE_SPACE_TREE_OBJECTID: case BTRFS_BLOCK_GROUP_TREE_OBJECTID: case BTRFS_RAID_STRIPE_TREE_OBJECTID: root->block_rsv = &fs_info->delayed_refs_rsv; break; case BTRFS_ROOT_TREE_OBJECTID: case BTRFS_DEV_TREE_OBJECTID: case BTRFS_QUOTA_TREE_OBJECTID: root->block_rsv = &fs_info->global_block_rsv; break; case BTRFS_CHUNK_TREE_OBJECTID: root->block_rsv = &fs_info->chunk_block_rsv; break; default: root->block_rsv = NULL; break; } } void btrfs_init_global_block_rsv(struct btrfs_fs_info *fs_info) { struct btrfs_space_info *space_info; space_info = btrfs_find_space_info(fs_info, BTRFS_BLOCK_GROUP_SYSTEM); fs_info->chunk_block_rsv.space_info = space_info; space_info = btrfs_find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA); fs_info->global_block_rsv.space_info = space_info; fs_info->trans_block_rsv.space_info = space_info; fs_info->empty_block_rsv.space_info = space_info; fs_info->delayed_block_rsv.space_info = space_info; fs_info->delayed_refs_rsv.space_info = space_info; btrfs_update_global_block_rsv(fs_info); } void btrfs_release_global_block_rsv(struct btrfs_fs_info *fs_info) { btrfs_block_rsv_release(fs_info, &fs_info->global_block_rsv, (u64)-1, NULL); WARN_ON(fs_info->trans_block_rsv.size > 0); WARN_ON(fs_info->trans_block_rsv.reserved > 0); WARN_ON(fs_info->chunk_block_rsv.size > 0); WARN_ON(fs_info->chunk_block_rsv.reserved > 0); WARN_ON(fs_info->delayed_block_rsv.size > 0); WARN_ON(fs_info->delayed_block_rsv.reserved > 0); WARN_ON(fs_info->delayed_refs_rsv.reserved > 0); WARN_ON(fs_info->delayed_refs_rsv.size > 0); } static struct btrfs_block_rsv *get_block_rsv( const struct btrfs_trans_handle *trans, const struct btrfs_root *root) { struct btrfs_fs_info *fs_info = root->fs_info; struct btrfs_block_rsv *block_rsv = NULL; if (test_bit(BTRFS_ROOT_SHAREABLE, &root->state) || (root == fs_info->uuid_root) || (trans->adding_csums && btrfs_root_id(root) == BTRFS_CSUM_TREE_OBJECTID)) block_rsv = trans->block_rsv; if (!block_rsv) block_rsv = root->block_rsv; if (!block_rsv) block_rsv = &fs_info->empty_block_rsv; return block_rsv; } struct btrfs_block_rsv *btrfs_use_block_rsv(struct btrfs_trans_handle *trans, struct btrfs_root *root, u32 blocksize) { struct btrfs_fs_info *fs_info = root->fs_info; struct btrfs_block_rsv *block_rsv; struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv; int ret; bool global_updated = false; block_rsv = get_block_rsv(trans, root); if (unlikely(btrfs_block_rsv_size(block_rsv) == 0)) goto try_reserve; again: ret = btrfs_block_rsv_use_bytes(block_rsv, blocksize); if (!ret) return block_rsv; if (block_rsv->failfast) return ERR_PTR(ret); if (block_rsv->type == BTRFS_BLOCK_RSV_GLOBAL && !global_updated) { global_updated = true; btrfs_update_global_block_rsv(fs_info); goto again; } /* * The global reserve still exists to save us from ourselves, so don't * warn_on if we are short on our delayed refs reserve. */ if (block_rsv->type != BTRFS_BLOCK_RSV_DELREFS && btrfs_test_opt(fs_info, ENOSPC_DEBUG)) { static DEFINE_RATELIMIT_STATE(_rs, DEFAULT_RATELIMIT_INTERVAL * 10, /*DEFAULT_RATELIMIT_BURST*/ 1); if (__ratelimit(&_rs)) WARN(1, KERN_DEBUG "BTRFS: block rsv %d returned %d\n", block_rsv->type, ret); } try_reserve: ret = btrfs_reserve_metadata_bytes(fs_info, block_rsv->space_info, blocksize, BTRFS_RESERVE_NO_FLUSH); if (!ret) return block_rsv; /* * If we couldn't reserve metadata bytes try and use some from * the global reserve if its space type is the same as the global * reservation. */ if (block_rsv->type != BTRFS_BLOCK_RSV_GLOBAL && block_rsv->space_info == global_rsv->space_info) { ret = btrfs_block_rsv_use_bytes(global_rsv, blocksize); if (!ret) return global_rsv; } /* * All hope is lost, but of course our reservations are overly * pessimistic, so instead of possibly having an ENOSPC abort here, try * one last time to force a reservation if there's enough actual space * on disk to make the reservation. */ ret = btrfs_reserve_metadata_bytes(fs_info, block_rsv->space_info, blocksize, BTRFS_RESERVE_FLUSH_EMERGENCY); if (!ret) return block_rsv; return ERR_PTR(ret); } int btrfs_check_trunc_cache_free_space(const struct btrfs_fs_info *fs_info, struct btrfs_block_rsv *rsv) { u64 needed_bytes; int ret; /* 1 for slack space, 1 for updating the inode */ needed_bytes = btrfs_calc_insert_metadata_size(fs_info, 1) + btrfs_calc_metadata_size(fs_info, 1); spin_lock(&rsv->lock); if (rsv->reserved < needed_bytes) ret = -ENOSPC; else ret = 0; spin_unlock(&rsv->lock); return ret; }
1 1 1 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 // SPDX-License-Identifier: GPL-2.0-or-later /* * Driver for the Auvitek USB bridge * * Copyright (c) 2008 Steven Toth <stoth@linuxtv.org> */ #include "au0828.h" #include "au8522.h" #include <linux/module.h> #include <linux/slab.h> #include <linux/videodev2.h> #include <media/v4l2-common.h> #include <linux/mutex.h> /* Due to enum tuner_pad_index */ #include <media/tuner.h> /* * 1 = General debug messages * 2 = USB handling * 4 = I2C related * 8 = Bridge related * 16 = IR related */ int au0828_debug; module_param_named(debug, au0828_debug, int, 0644); MODULE_PARM_DESC(debug, "set debug bitmask: 1=general, 2=USB, 4=I2C, 8=bridge, 16=IR"); static unsigned int disable_usb_speed_check; module_param(disable_usb_speed_check, int, 0444); MODULE_PARM_DESC(disable_usb_speed_check, "override min bandwidth requirement of 480M bps"); #define _AU0828_BULKPIPE 0x03 #define _BULKPIPESIZE 0xffff static int send_control_msg(struct au0828_dev *dev, u16 request, u32 value, u16 index); static int recv_control_msg(struct au0828_dev *dev, u16 request, u32 value, u16 index, unsigned char *cp, u16 size); /* USB Direction */ #define CMD_REQUEST_IN 0x00 #define CMD_REQUEST_OUT 0x01 u32 au0828_readreg(struct au0828_dev *dev, u16 reg) { u8 result = 0; recv_control_msg(dev, CMD_REQUEST_IN, 0, reg, &result, 1); dprintk(8, "%s(0x%04x) = 0x%02x\n", __func__, reg, result); return result; } u32 au0828_writereg(struct au0828_dev *dev, u16 reg, u32 val) { dprintk(8, "%s(0x%04x, 0x%02x)\n", __func__, reg, val); return send_control_msg(dev, CMD_REQUEST_OUT, val, reg); } static int send_control_msg(struct au0828_dev *dev, u16 request, u32 value, u16 index) { int status = -ENODEV; if (dev->usbdev) { /* cp must be memory that has been allocated by kmalloc */ status = usb_control_msg(dev->usbdev, usb_sndctrlpipe(dev->usbdev, 0), request, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, value, index, NULL, 0, 1000); status = min(status, 0); if (status < 0) { pr_err("%s() Failed sending control message, error %d.\n", __func__, status); } } return status; } static int recv_control_msg(struct au0828_dev *dev, u16 request, u32 value, u16 index, unsigned char *cp, u16 size) { int status = -ENODEV; mutex_lock(&dev->mutex); if (dev->usbdev) { status = usb_control_msg(dev->usbdev, usb_rcvctrlpipe(dev->usbdev, 0), request, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, value, index, dev->ctrlmsg, size, 1000); status = min(status, 0); if (status < 0) { pr_err("%s() Failed receiving control message, error %d.\n", __func__, status); } /* the host controller requires heap allocated memory, which is why we didn't just pass "cp" into usb_control_msg */ memcpy(cp, dev->ctrlmsg, size); } mutex_unlock(&dev->mutex); return status; } #ifdef CONFIG_MEDIA_CONTROLLER static void au0828_media_graph_notify(struct media_entity *new, void *notify_data); #endif static void au0828_unregister_media_device(struct au0828_dev *dev) { #ifdef CONFIG_MEDIA_CONTROLLER struct media_device *mdev = dev->media_dev; struct media_entity_notify *notify, *nextp; if (!mdev || !media_devnode_is_registered(mdev->devnode)) return; /* Remove au0828 entity_notify callbacks */ list_for_each_entry_safe(notify, nextp, &mdev->entity_notify, list) { if (notify->notify != au0828_media_graph_notify) continue; media_device_unregister_entity_notify(mdev, notify); } /* clear enable_source, disable_source */ mutex_lock(&mdev->graph_mutex); dev->media_dev->source_priv = NULL; dev->media_dev->enable_source = NULL; dev->media_dev->disable_source = NULL; mutex_unlock(&mdev->graph_mutex); media_device_delete(dev->media_dev, KBUILD_MODNAME, THIS_MODULE); dev->media_dev = NULL; #endif } void au0828_usb_release(struct au0828_dev *dev) { au0828_unregister_media_device(dev); /* I2C */ au0828_i2c_unregister(dev); kfree(dev); } static void au0828_usb_disconnect(struct usb_interface *interface) { struct au0828_dev *dev = usb_get_intfdata(interface); dprintk(1, "%s()\n", __func__); /* there is a small window after disconnect, before dev->usbdev is NULL, for poll (e.g: IR) try to access the device and fill the dmesg with error messages. Set the status so poll routines can check and avoid access after disconnect. */ set_bit(DEV_DISCONNECTED, &dev->dev_state); au0828_rc_unregister(dev); /* Digital TV */ au0828_dvb_unregister(dev); usb_set_intfdata(interface, NULL); mutex_lock(&dev->mutex); dev->usbdev = NULL; mutex_unlock(&dev->mutex); if (au0828_analog_unregister(dev)) { /* * No need to call au0828_usb_release() if V4L2 is enabled, * as this is already called via au0828_usb_v4l2_release() */ return; } au0828_usb_release(dev); } static int au0828_media_device_init(struct au0828_dev *dev, struct usb_device *udev) { #ifdef CONFIG_MEDIA_CONTROLLER struct media_device *mdev; mdev = media_device_usb_allocate(udev, KBUILD_MODNAME, THIS_MODULE); if (IS_ERR(mdev)) return PTR_ERR(mdev); dev->media_dev = mdev; #endif return 0; } #ifdef CONFIG_MEDIA_CONTROLLER static void au0828_media_graph_notify(struct media_entity *new, void *notify_data) { struct au0828_dev *dev = notify_data; int ret; struct media_entity *entity, *mixer = NULL, *decoder = NULL; if (!new) { /* * Called during au0828 probe time to connect * entities that were created prior to registering * the notify handler. Find mixer and decoder. */ media_device_for_each_entity(entity, dev->media_dev) { if (entity->function == MEDIA_ENT_F_AUDIO_MIXER) mixer = entity; else if (entity->function == MEDIA_ENT_F_ATV_DECODER) decoder = entity; } goto create_link; } switch (new->function) { case MEDIA_ENT_F_AUDIO_MIXER: mixer = new; if (dev->decoder) decoder = dev->decoder; break; case MEDIA_ENT_F_ATV_DECODER: /* In case, Mixer is added first, find mixer and create link */ media_device_for_each_entity(entity, dev->media_dev) { if (entity->function == MEDIA_ENT_F_AUDIO_MIXER) mixer = entity; } decoder = new; break; default: break; } create_link: if (decoder && mixer) { ret = media_get_pad_index(decoder, MEDIA_PAD_FL_SOURCE, PAD_SIGNAL_AUDIO); if (ret >= 0) ret = media_create_pad_link(decoder, ret, mixer, 0, MEDIA_LNK_FL_ENABLED); if (ret < 0) dev_err(&dev->usbdev->dev, "Mixer Pad Link Create Error: %d\n", ret); } } static bool au0828_is_link_shareable(struct media_entity *owner, struct media_entity *entity) { bool shareable = false; /* Tuner link can be shared by audio, video, and VBI */ switch (owner->function) { case MEDIA_ENT_F_IO_V4L: case MEDIA_ENT_F_AUDIO_CAPTURE: case MEDIA_ENT_F_IO_VBI: if (entity->function == MEDIA_ENT_F_IO_V4L || entity->function == MEDIA_ENT_F_AUDIO_CAPTURE || entity->function == MEDIA_ENT_F_IO_VBI) shareable = true; break; case MEDIA_ENT_F_DTV_DEMOD: default: break; } return shareable; } /* Callers should hold graph_mutex */ static int au0828_enable_source(struct media_entity *entity, struct media_pipeline *pipe) { struct media_entity *source, *find_source; struct media_entity *sink; struct media_link *link, *found_link = NULL; int ret = 0; struct media_device *mdev = entity->graph_obj.mdev; struct au0828_dev *dev; if (!mdev) return -ENODEV; dev = mdev->source_priv; /* * For Audio and V4L2 entity, find the link to which decoder * is the sink. Look for an active link between decoder and * source (tuner/s-video/Composite), if one exists, nothing * to do. If not, look for any active links between source * and any other entity. If one exists, source is busy. If * source is free, setup link and start pipeline from source. * For DVB FE entity, the source for the link is the tuner. * Check if tuner is available and setup link and start * pipeline. */ if (entity->function == MEDIA_ENT_F_DTV_DEMOD) { sink = entity; find_source = dev->tuner; } else { /* Analog isn't configured or register failed */ if (!dev->decoder) { ret = -ENODEV; goto end; } sink = dev->decoder; /* * Default input is tuner and default input_type * is AU0828_VMUX_TELEVISION. * * There is a problem when s_input is called to * change the default input. s_input will try to * enable_source before attempting to change the * input on the device, and will end up enabling * default source which is tuner. * * Additional logic is necessary in au0828 to detect * that the input has changed and enable the right * source. au0828 handles this case in its s_input. * It will disable the old source and enable the new * source. * */ if (dev->input_type == AU0828_VMUX_TELEVISION) find_source = dev->tuner; else if (dev->input_type == AU0828_VMUX_SVIDEO || dev->input_type == AU0828_VMUX_COMPOSITE) find_source = &dev->input_ent[dev->input_type]; else { /* unknown input - let user select input */ ret = 0; goto end; } } /* Is there an active link between sink and source */ if (dev->active_link) { if (dev->active_link_owner == entity) { /* This check is necessary to handle multiple * enable_source calls from v4l_ioctls during * the course of video/vbi application run-time. */ pr_debug("%s already owns the tuner\n", entity->name); ret = 0; goto end; } else if (au0828_is_link_shareable(dev->active_link_owner, entity)) { /* Either ALSA or Video own tuner. Sink is the same * for both. Allow sharing the active link between * their common source (tuner) and sink (decoder). * Starting pipeline between sharing entity and sink * will fail with pipe mismatch, while owner has an * active pipeline. Switch pipeline ownership from * user to owner when owner disables the source. */ dev->active_link_shared = true; /* save the user info to use from disable */ dev->active_link_user = entity; dev->active_link_user_pipe = pipe; pr_debug("%s owns the tuner %s can share!\n", dev->active_link_owner->name, entity->name); ret = 0; goto end; } else { ret = -EBUSY; goto end; } } list_for_each_entry(link, &sink->links, list) { /* Check sink, and source */ if (link->sink->entity == sink && link->source->entity == find_source) { found_link = link; break; } } if (!found_link) { ret = -ENODEV; goto end; } /* activate link between source and sink and start pipeline */ source = found_link->source->entity; ret = __media_entity_setup_link(found_link, MEDIA_LNK_FL_ENABLED); if (ret) { pr_err("Activate link from %s->%s. Error %d\n", source->name, sink->name, ret); goto end; } ret = __media_pipeline_start(entity->pads, pipe); if (ret) { pr_err("Start Pipeline: %s->%s Error %d\n", source->name, entity->name, ret); ret = __media_entity_setup_link(found_link, 0); if (ret) pr_err("Deactivate link Error %d\n", ret); goto end; } /* save link state to allow audio and video share the link * and not disable the link while the other is using it. * active_link_owner is used to deactivate the link. */ dev->active_link = found_link; dev->active_link_owner = entity; dev->active_source = source; dev->active_sink = sink; pr_info("Enabled Source: %s->%s->%s Ret %d\n", dev->active_source->name, dev->active_sink->name, dev->active_link_owner->name, ret); end: pr_debug("%s end: ent:%s fnc:%d ret %d\n", __func__, entity->name, entity->function, ret); return ret; } /* Callers should hold graph_mutex */ static void au0828_disable_source(struct media_entity *entity) { int ret = 0; struct media_device *mdev = entity->graph_obj.mdev; struct au0828_dev *dev; if (!mdev) return; dev = mdev->source_priv; if (!dev->active_link) return; /* link is active - stop pipeline from source * (tuner/s-video/Composite) to the entity * When DVB/s-video/Composite owns tuner, it won't be in * shared state. */ if (dev->active_link->sink->entity == dev->active_sink && dev->active_link->source->entity == dev->active_source) { /* * Prevent video from deactivating link when audio * has active pipeline and vice versa. In addition * handle the case when more than one video/vbi * application is sharing the link. */ bool owner_is_audio = false; if (dev->active_link_owner->function == MEDIA_ENT_F_AUDIO_CAPTURE) owner_is_audio = true; if (dev->active_link_shared) { pr_debug("Shared link owner %s user %s %d\n", dev->active_link_owner->name, entity->name, dev->users); /* Handle video device users > 1 * When audio owns the shared link with * more than one video users, avoid * disabling the source and/or switching * the owner until the last disable_source * call from video _close(). Use dev->users to * determine when to switch/disable. */ if (dev->active_link_owner != entity) { /* video device has users > 1 */ if (owner_is_audio && dev->users > 1) return; dev->active_link_user = NULL; dev->active_link_user_pipe = NULL; dev->active_link_shared = false; return; } /* video owns the link and has users > 1 */ if (!owner_is_audio && dev->users > 1) return; /* stop pipeline */ __media_pipeline_stop(dev->active_link_owner->pads); pr_debug("Pipeline stop for %s\n", dev->active_link_owner->name); ret = __media_pipeline_start( dev->active_link_user->pads, dev->active_link_user_pipe); if (ret) { pr_err("Start Pipeline: %s->%s %d\n", dev->active_source->name, dev->active_link_user->name, ret); goto deactivate_link; } /* link user is now the owner */ dev->active_link_owner = dev->active_link_user; dev->active_link_user = NULL; dev->active_link_user_pipe = NULL; dev->active_link_shared = false; pr_debug("Pipeline started for %s\n", dev->active_link_owner->name); return; } else if (!owner_is_audio && dev->users > 1) /* video/vbi owns the link and has users > 1 */ return; if (dev->active_link_owner != entity) return; /* stop pipeline */ __media_pipeline_stop(dev->active_link_owner->pads); pr_debug("Pipeline stop for %s\n", dev->active_link_owner->name); deactivate_link: ret = __media_entity_setup_link(dev->active_link, 0); if (ret) pr_err("Deactivate link Error %d\n", ret); pr_info("Disabled Source: %s->%s->%s Ret %d\n", dev->active_source->name, dev->active_sink->name, dev->active_link_owner->name, ret); dev->active_link = NULL; dev->active_link_owner = NULL; dev->active_source = NULL; dev->active_sink = NULL; dev->active_link_shared = false; dev->active_link_user = NULL; } } #endif static int au0828_media_device_register(struct au0828_dev *dev, struct usb_device *udev) { #ifdef CONFIG_MEDIA_CONTROLLER int ret; struct media_entity *entity, *demod = NULL; struct media_link *link; if (!dev->media_dev) return 0; if (!media_devnode_is_registered(dev->media_dev->devnode)) { /* register media device */ ret = media_device_register(dev->media_dev); if (ret) { media_device_delete(dev->media_dev, KBUILD_MODNAME, THIS_MODULE); dev->media_dev = NULL; dev_err(&udev->dev, "Media Device Register Error: %d\n", ret); return ret; } } else { /* * Call au0828_media_graph_notify() to connect * audio graph to our graph. In this case, audio * driver registered the device and there is no * entity_notify to be called when new entities * are added. Invoke it now. */ au0828_media_graph_notify(NULL, (void *) dev); } /* * Find tuner, decoder and demod. * * The tuner and decoder should be cached, as they'll be used by * au0828_enable_source. * * It also needs to disable the link between tuner and * decoder/demod, to avoid disable step when tuner is requested * by video or audio. Note that this step can't be done until dvb * graph is created during dvb register. */ media_device_for_each_entity(entity, dev->media_dev) { switch (entity->function) { case MEDIA_ENT_F_TUNER: dev->tuner = entity; break; case MEDIA_ENT_F_ATV_DECODER: dev->decoder = entity; break; case MEDIA_ENT_F_DTV_DEMOD: demod = entity; break; } } /* Disable link between tuner->demod and/or tuner->decoder */ if (dev->tuner) { list_for_each_entry(link, &dev->tuner->links, list) { if (demod && link->sink->entity == demod) media_entity_setup_link(link, 0); if (dev->decoder && link->sink->entity == dev->decoder) media_entity_setup_link(link, 0); } } /* register entity_notify callback */ dev->entity_notify.notify_data = (void *) dev; dev->entity_notify.notify = (void *) au0828_media_graph_notify; media_device_register_entity_notify(dev->media_dev, &dev->entity_notify); /* set enable_source */ mutex_lock(&dev->media_dev->graph_mutex); dev->media_dev->source_priv = (void *) dev; dev->media_dev->enable_source = au0828_enable_source; dev->media_dev->disable_source = au0828_disable_source; mutex_unlock(&dev->media_dev->graph_mutex); #endif return 0; } static int au0828_usb_probe(struct usb_interface *interface, const struct usb_device_id *id) { int ifnum; int retval = 0; struct au0828_dev *dev; struct usb_device *usbdev = interface_to_usbdev(interface); ifnum = interface->altsetting->desc.bInterfaceNumber; if (ifnum != 0) return -ENODEV; dprintk(1, "%s() vendor id 0x%x device id 0x%x ifnum:%d\n", __func__, le16_to_cpu(usbdev->descriptor.idVendor), le16_to_cpu(usbdev->descriptor.idProduct), ifnum); /* * Make sure we have 480 Mbps of bandwidth, otherwise things like * video stream wouldn't likely work, since 12 Mbps is generally * not enough even for most Digital TV streams. */ if (usbdev->speed != USB_SPEED_HIGH && disable_usb_speed_check == 0) { pr_err("au0828: Device initialization failed.\n"); pr_err("au0828: Device must be connected to a high-speed USB 2.0 port.\n"); return -ENODEV; } dev = kzalloc(sizeof(*dev), GFP_KERNEL); if (dev == NULL) { pr_err("%s() Unable to allocate memory\n", __func__); return -ENOMEM; } mutex_init(&dev->lock); mutex_lock(&dev->lock); mutex_init(&dev->mutex); mutex_init(&dev->dvb.lock); dev->usbdev = usbdev; dev->boardnr = id->driver_info; dev->board = au0828_boards[dev->boardnr]; /* Initialize the media controller */ retval = au0828_media_device_init(dev, usbdev); if (retval) { pr_err("%s() au0828_media_device_init failed\n", __func__); mutex_unlock(&dev->lock); kfree(dev); return retval; } retval = au0828_v4l2_device_register(interface, dev); if (retval) { au0828_usb_v4l2_media_release(dev); mutex_unlock(&dev->lock); kfree(dev); return retval; } /* Power Up the bridge */ au0828_write(dev, REG_600, 1 << 4); /* Bring up the GPIO's and supporting devices */ au0828_gpio_setup(dev); /* I2C */ au0828_i2c_register(dev); /* Setup */ au0828_card_setup(dev); /* * Store the pointer to the au0828_dev so it can be accessed in * au0828_usb_disconnect */ usb_set_intfdata(interface, dev); /* Analog TV */ retval = au0828_analog_register(dev, interface); if (retval) { pr_err("%s() au0828_analog_register failed to register on V4L2\n", __func__); mutex_unlock(&dev->lock); goto done; } /* Digital TV */ retval = au0828_dvb_register(dev); if (retval) pr_err("%s() au0828_dvb_register failed\n", __func__); /* Remote controller */ au0828_rc_register(dev); pr_info("Registered device AU0828 [%s]\n", dev->board.name == NULL ? "Unset" : dev->board.name); mutex_unlock(&dev->lock); retval = au0828_media_device_register(dev, usbdev); done: if (retval < 0) au0828_usb_disconnect(interface); return retval; } static int au0828_suspend(struct usb_interface *interface, pm_message_t message) { struct au0828_dev *dev = usb_get_intfdata(interface); if (!dev) return 0; pr_info("Suspend\n"); au0828_rc_suspend(dev); au0828_v4l2_suspend(dev); au0828_dvb_suspend(dev); /* FIXME: should suspend also ATV/DTV */ return 0; } static int au0828_resume(struct usb_interface *interface) { struct au0828_dev *dev = usb_get_intfdata(interface); if (!dev) return 0; pr_info("Resume\n"); /* Power Up the bridge */ au0828_write(dev, REG_600, 1 << 4); /* Bring up the GPIO's and supporting devices */ au0828_gpio_setup(dev); au0828_rc_resume(dev); au0828_v4l2_resume(dev); au0828_dvb_resume(dev); /* FIXME: should resume also ATV/DTV */ return 0; } static struct usb_driver au0828_usb_driver = { .name = KBUILD_MODNAME, .probe = au0828_usb_probe, .disconnect = au0828_usb_disconnect, .id_table = au0828_usb_id_table, .suspend = au0828_suspend, .resume = au0828_resume, .reset_resume = au0828_resume, }; static int __init au0828_init(void) { int ret; if (au0828_debug & 1) pr_info("%s() Debugging is enabled\n", __func__); if (au0828_debug & 2) pr_info("%s() USB Debugging is enabled\n", __func__); if (au0828_debug & 4) pr_info("%s() I2C Debugging is enabled\n", __func__); if (au0828_debug & 8) pr_info("%s() Bridge Debugging is enabled\n", __func__); if (au0828_debug & 16) pr_info("%s() IR Debugging is enabled\n", __func__); pr_info("au0828 driver loaded\n"); ret = usb_register(&au0828_usb_driver); if (ret) pr_err("usb_register failed, error = %d\n", ret); return ret; } static void __exit au0828_exit(void) { usb_deregister(&au0828_usb_driver); } module_init(au0828_init); module_exit(au0828_exit); MODULE_DESCRIPTION("Driver for Auvitek AU0828 based products"); MODULE_AUTHOR("Steven Toth <stoth@linuxtv.org>"); MODULE_LICENSE("GPL"); MODULE_VERSION("0.0.3");
71 45 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef BTRFS_MISC_H #define BTRFS_MISC_H #include <linux/types.h> #include <linux/bitmap.h> #include <linux/sched.h> #include <linux/wait.h> #include <linux/math64.h> #include <linux/rbtree.h> /* * Enumerate bits using enum autoincrement. Define the @name as the n-th bit. */ #define ENUM_BIT(name) \ __ ## name ## _BIT, \ name = (1U << __ ## name ## _BIT), \ __ ## name ## _SEQ = __ ## name ## _BIT static inline void cond_wake_up(struct wait_queue_head *wq) { /* * This implies a full smp_mb barrier, see comments for * waitqueue_active why. */ if (wq_has_sleeper(wq)) wake_up(wq); } static inline void cond_wake_up_nomb(struct wait_queue_head *wq) { /* * Special case for conditional wakeup where the barrier required for * waitqueue_active is implied by some of the preceding code. Eg. one * of such atomic operations (atomic_dec_and_return, ...), or a * unlock/lock sequence, etc. */ if (waitqueue_active(wq)) wake_up(wq); } static inline u64 mult_perc(u64 num, u32 percent) { return div_u64(num * percent, 100); } /* Copy of is_power_of_two that is 64bit safe */ static inline bool is_power_of_two_u64(u64 n) { return n != 0 && (n & (n - 1)) == 0; } static inline bool has_single_bit_set(u64 n) { return is_power_of_two_u64(n); } /* * Simple bytenr based rb_tree relate structures * * Any structure wants to use bytenr as single search index should have their * structure start with these members. */ struct rb_simple_node { struct rb_node rb_node; u64 bytenr; }; static inline struct rb_node *rb_simple_search(const struct rb_root *root, u64 bytenr) { struct rb_node *node = root->rb_node; struct rb_simple_node *entry; while (node) { entry = rb_entry(node, struct rb_simple_node, rb_node); if (bytenr < entry->bytenr) node = node->rb_left; else if (bytenr > entry->bytenr) node = node->rb_right; else return node; } return NULL; } /* * Search @root from an entry that starts or comes after @bytenr. * * @root: the root to search. * @bytenr: bytenr to search from. * * Return the rb_node that start at or after @bytenr. If there is no entry at * or after @bytner return NULL. */ static inline struct rb_node *rb_simple_search_first(const struct rb_root *root, u64 bytenr) { struct rb_node *node = root->rb_node, *ret = NULL; struct rb_simple_node *entry, *ret_entry = NULL; while (node) { entry = rb_entry(node, struct rb_simple_node, rb_node); if (bytenr < entry->bytenr) { if (!ret || entry->bytenr < ret_entry->bytenr) { ret = node; ret_entry = entry; } node = node->rb_left; } else if (bytenr > entry->bytenr) { node = node->rb_right; } else { return node; } } return ret; } static inline struct rb_node *rb_simple_insert(struct rb_root *root, u64 bytenr, struct rb_node *node) { struct rb_node **p = &root->rb_node; struct rb_node *parent = NULL; struct rb_simple_node *entry; while (*p) { parent = *p; entry = rb_entry(parent, struct rb_simple_node, rb_node); if (bytenr < entry->bytenr) p = &(*p)->rb_left; else if (bytenr > entry->bytenr) p = &(*p)->rb_right; else return parent; } rb_link_node(node, parent, p); rb_insert_color(node, root); return NULL; } static inline bool bitmap_test_range_all_set(const unsigned long *addr, unsigned long start, unsigned long nbits) { unsigned long found_zero; found_zero = find_next_zero_bit(addr, start + nbits, start); return (found_zero == start + nbits); } static inline bool bitmap_test_range_all_zero(const unsigned long *addr, unsigned long start, unsigned long nbits) { unsigned long found_set; found_set = find_next_bit(addr, start + nbits, start); return (found_set == start + nbits); } #endif
4 4 95 61 63 62 770 59 516 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 /* SPDX-License-Identifier: GPL-2.0-or-later */ #ifndef _PROTO_MEMORY_H #define _PROTO_MEMORY_H #include <net/sock.h> #include <net/hotdata.h> /* 1 MB per cpu, in page units */ #define SK_MEMORY_PCPU_RESERVE (1 << (20 - PAGE_SHIFT)) static inline bool sk_has_memory_pressure(const struct sock *sk) { return sk->sk_prot->memory_pressure != NULL; } static inline bool proto_memory_pressure(const struct proto *prot) { if (!prot->memory_pressure) return false; return !!READ_ONCE(*prot->memory_pressure); } static inline bool sk_under_global_memory_pressure(const struct sock *sk) { return proto_memory_pressure(sk->sk_prot); } static inline bool sk_under_memory_pressure(const struct sock *sk) { if (!sk->sk_prot->memory_pressure) return false; if (mem_cgroup_sockets_enabled && sk->sk_memcg && mem_cgroup_under_socket_pressure(sk->sk_memcg)) return true; return !!READ_ONCE(*sk->sk_prot->memory_pressure); } static inline long proto_memory_allocated(const struct proto *prot) { return max(0L, atomic_long_read(prot->memory_allocated)); } static inline long sk_memory_allocated(const struct sock *sk) { return proto_memory_allocated(sk->sk_prot); } static inline void proto_memory_pcpu_drain(struct proto *proto) { int val = this_cpu_xchg(*proto->per_cpu_fw_alloc, 0); if (val) atomic_long_add(val, proto->memory_allocated); } static inline void sk_memory_allocated_add(const struct sock *sk, int val) { struct proto *proto = sk->sk_prot; val = this_cpu_add_return(*proto->per_cpu_fw_alloc, val); if (unlikely(val >= READ_ONCE(net_hotdata.sysctl_mem_pcpu_rsv))) proto_memory_pcpu_drain(proto); } static inline void sk_memory_allocated_sub(const struct sock *sk, int val) { struct proto *proto = sk->sk_prot; val = this_cpu_sub_return(*proto->per_cpu_fw_alloc, val); if (unlikely(val <= -READ_ONCE(net_hotdata.sysctl_mem_pcpu_rsv))) proto_memory_pcpu_drain(proto); } #endif /* _PROTO_MEMORY_H */
2 7 7 7 6 2 6 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 /* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (C) 2018 Christoph Hellwig. * * DMA operations that map physical memory directly without using an IOMMU. */ #ifndef _KERNEL_DMA_DIRECT_H #define _KERNEL_DMA_DIRECT_H #include <linux/dma-direct.h> #include <linux/memremap.h> int dma_direct_get_sgtable(struct device *dev, struct sg_table *sgt, void *cpu_addr, dma_addr_t dma_addr, size_t size, unsigned long attrs); bool dma_direct_can_mmap(struct device *dev); int dma_direct_mmap(struct device *dev, struct vm_area_struct *vma, void *cpu_addr, dma_addr_t dma_addr, size_t size, unsigned long attrs); bool dma_direct_need_sync(struct device *dev, dma_addr_t dma_addr); int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents, enum dma_data_direction dir, unsigned long attrs); bool dma_direct_all_ram_mapped(struct device *dev); size_t dma_direct_max_mapping_size(struct device *dev); #if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \ defined(CONFIG_SWIOTLB) void dma_direct_sync_sg_for_device(struct device *dev, struct scatterlist *sgl, int nents, enum dma_data_direction dir); #else static inline void dma_direct_sync_sg_for_device(struct device *dev, struct scatterlist *sgl, int nents, enum dma_data_direction dir) { } #endif #if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \ defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL) || \ defined(CONFIG_SWIOTLB) void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sgl, int nents, enum dma_data_direction dir, unsigned long attrs); void dma_direct_sync_sg_for_cpu(struct device *dev, struct scatterlist *sgl, int nents, enum dma_data_direction dir); #else static inline void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sgl, int nents, enum dma_data_direction dir, unsigned long attrs) { } static inline void dma_direct_sync_sg_for_cpu(struct device *dev, struct scatterlist *sgl, int nents, enum dma_data_direction dir) { } #endif static inline void dma_direct_sync_single_for_device(struct device *dev, dma_addr_t addr, size_t size, enum dma_data_direction dir) { phys_addr_t paddr = dma_to_phys(dev, addr); swiotlb_sync_single_for_device(dev, paddr, size, dir); if (!dev_is_dma_coherent(dev)) arch_sync_dma_for_device(paddr, size, dir); } static inline void dma_direct_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size, enum dma_data_direction dir) { phys_addr_t paddr = dma_to_phys(dev, addr); if (!dev_is_dma_coherent(dev)) { arch_sync_dma_for_cpu(paddr, size, dir); arch_sync_dma_for_cpu_all(); } swiotlb_sync_single_for_cpu(dev, paddr, size, dir); if (dir == DMA_FROM_DEVICE) arch_dma_mark_clean(paddr, size); } static inline dma_addr_t dma_direct_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir, unsigned long attrs) { phys_addr_t phys = page_to_phys(page) + offset; dma_addr_t dma_addr = phys_to_dma(dev, phys); if (is_swiotlb_force_bounce(dev)) { if (is_pci_p2pdma_page(page)) return DMA_MAPPING_ERROR; return swiotlb_map(dev, phys, size, dir, attrs); } if (unlikely(!dma_capable(dev, dma_addr, size, true)) || dma_kmalloc_needs_bounce(dev, size, dir)) { if (is_pci_p2pdma_page(page)) return DMA_MAPPING_ERROR; if (is_swiotlb_active(dev)) return swiotlb_map(dev, phys, size, dir, attrs); dev_WARN_ONCE(dev, 1, "DMA addr %pad+%zu overflow (mask %llx, bus limit %llx).\n", &dma_addr, size, *dev->dma_mask, dev->bus_dma_limit); return DMA_MAPPING_ERROR; } if (!dev_is_dma_coherent(dev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) arch_sync_dma_for_device(phys, size, dir); return dma_addr; } static inline void dma_direct_unmap_page(struct device *dev, dma_addr_t addr, size_t size, enum dma_data_direction dir, unsigned long attrs) { phys_addr_t phys = dma_to_phys(dev, addr); if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) dma_direct_sync_single_for_cpu(dev, addr, size, dir); swiotlb_tbl_unmap_single(dev, phys, size, dir, attrs | DMA_ATTR_SKIP_CPU_SYNC); } #endif /* _KERNEL_DMA_DIRECT_H */
10 3 3 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_MROUTE6_H #define __LINUX_MROUTE6_H #include <linux/pim.h> #include <linux/skbuff.h> /* for struct sk_buff_head */ #include <net/net_namespace.h> #include <uapi/linux/mroute6.h> #include <linux/mroute_base.h> #include <linux/sockptr.h> #include <net/fib_rules.h> #ifdef CONFIG_IPV6_MROUTE static inline int ip6_mroute_opt(int opt) { return (opt >= MRT6_BASE) && (opt <= MRT6_MAX); } #else static inline int ip6_mroute_opt(int opt) { return 0; } #endif struct sock; #ifdef CONFIG_IPV6_MROUTE extern int ip6_mroute_setsockopt(struct sock *, int, sockptr_t, unsigned int); extern int ip6_mroute_getsockopt(struct sock *, int, sockptr_t, sockptr_t); extern int ip6_mr_input(struct sk_buff *skb); extern int ip6mr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg); extern int ip6_mr_init(void); extern void ip6_mr_cleanup(void); int ip6mr_ioctl(struct sock *sk, int cmd, void *arg); #else static inline int ip6_mroute_setsockopt(struct sock *sock, int optname, sockptr_t optval, unsigned int optlen) { return -ENOPROTOOPT; } static inline int ip6_mroute_getsockopt(struct sock *sock, int optname, sockptr_t optval, sockptr_t optlen) { return -ENOPROTOOPT; } static inline int ip6mr_ioctl(struct sock *sk, int cmd, void *arg) { return -ENOIOCTLCMD; } static inline int ip6_mr_init(void) { return 0; } static inline void ip6_mr_cleanup(void) { return; } #endif #ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES bool ip6mr_rule_default(const struct fib_rule *rule); #else static inline bool ip6mr_rule_default(const struct fib_rule *rule) { return true; } #endif #define VIFF_STATIC 0x8000 struct mfc6_cache_cmp_arg { struct in6_addr mf6c_mcastgrp; struct in6_addr mf6c_origin; }; struct mfc6_cache { struct mr_mfc _c; union { struct { struct in6_addr mf6c_mcastgrp; struct in6_addr mf6c_origin; }; struct mfc6_cache_cmp_arg cmparg; }; }; #define MFC_ASSERT_THRESH (3*HZ) /* Maximal freq. of asserts */ struct rtmsg; extern int ip6mr_get_route(struct net *net, struct sk_buff *skb, struct rtmsg *rtm, u32 portid); #ifdef CONFIG_IPV6_MROUTE bool mroute6_is_socket(struct net *net, struct sk_buff *skb); extern int ip6mr_sk_done(struct sock *sk); static inline int ip6mr_sk_ioctl(struct sock *sk, unsigned int cmd, void __user *arg) { switch (cmd) { /* These userspace buffers will be consumed by ip6mr_ioctl() */ case SIOCGETMIFCNT_IN6: { struct sioc_mif_req6 buffer; return sock_ioctl_inout(sk, cmd, arg, &buffer, sizeof(buffer)); } case SIOCGETSGCNT_IN6: { struct sioc_sg_req6 buffer; return sock_ioctl_inout(sk, cmd, arg, &buffer, sizeof(buffer)); } } return 1; } #else static inline bool mroute6_is_socket(struct net *net, struct sk_buff *skb) { return false; } static inline int ip6mr_sk_done(struct sock *sk) { return 0; } static inline int ip6mr_sk_ioctl(struct sock *sk, unsigned int cmd, void __user *arg) { return 1; } #endif #endif
8 41 11 41 8 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 /* SPDX-License-Identifier: GPL-2.0-only */ /* * AppArmor security module * * This file contains AppArmor policy definitions. * * Copyright (C) 1998-2008 Novell/SUSE * Copyright 2009-2017 Canonical Ltd. */ #ifndef __AA_NAMESPACE_H #define __AA_NAMESPACE_H #include <linux/kref.h> #include "apparmor.h" #include "apparmorfs.h" #include "label.h" #include "policy.h" /* struct aa_ns_acct - accounting of profiles in namespace * @max_size: maximum space allowed for all profiles in namespace * @max_count: maximum number of profiles that can be in this namespace * @size: current size of profiles * @count: current count of profiles (includes null profiles) */ struct aa_ns_acct { int max_size; int max_count; int size; int count; }; /* struct aa_ns - namespace for a set of profiles * @base: common policy * @parent: parent of namespace * @lock: lock for modifying the object * @acct: accounting for the namespace * @unconfined: special unconfined profile for the namespace * @sub_ns: list of namespaces under the current namespace. * @uniq_null: uniq value used for null learning profiles * @uniq_id: a unique id count for the profiles in the namespace * @level: level of ns within the tree hierarchy * @dents: dentries for the namespaces file entries in apparmorfs * * An aa_ns defines the set profiles that are searched to determine which * profile to attach to a task. Profiles can not be shared between aa_ns * and profile names within a namespace are guaranteed to be unique. When * profiles in separate namespaces have the same name they are NOT considered * to be equivalent. * * Namespaces are hierarchical and only namespaces and profiles below the * current namespace are visible. * * Namespace names must be unique and can not contain the characters :/\0 */ struct aa_ns { struct aa_policy base; struct aa_ns *parent; struct mutex lock; struct aa_ns_acct acct; struct aa_profile *unconfined; struct list_head sub_ns; atomic_t uniq_null; long uniq_id; int level; long revision; wait_queue_head_t wait; struct aa_labelset labels; struct list_head rawdata_list; struct dentry *dents[AAFS_NS_SIZEOF]; }; extern struct aa_label *kernel_t; extern struct aa_ns *root_ns; extern const char *aa_hidden_ns_name; #define ns_unconfined(NS) (&(NS)->unconfined->label) bool aa_ns_visible(struct aa_ns *curr, struct aa_ns *view, bool subns); const char *aa_ns_name(struct aa_ns *parent, struct aa_ns *child, bool subns); void aa_free_ns(struct aa_ns *ns); int aa_alloc_root_ns(void); void aa_free_root_ns(void); struct aa_ns *__aa_lookupn_ns(struct aa_ns *view, const char *hname, size_t n); struct aa_ns *aa_lookupn_ns(struct aa_ns *view, const char *name, size_t n); struct aa_ns *__aa_find_or_create_ns(struct aa_ns *parent, const char *name, struct dentry *dir); struct aa_ns *aa_prepare_ns(struct aa_ns *root, const char *name); void __aa_remove_ns(struct aa_ns *ns); static inline struct aa_profile *aa_deref_parent(struct aa_profile *p) { return rcu_dereference_protected(p->parent, mutex_is_locked(&p->ns->lock)); } /** * aa_get_ns - increment references count on @ns * @ns: namespace to increment reference count of (MAYBE NULL) * * Returns: pointer to @ns, if @ns is NULL returns NULL * Requires: @ns must be held with valid refcount when called */ static inline struct aa_ns *aa_get_ns(struct aa_ns *ns) { if (ns) aa_get_profile(ns->unconfined); return ns; } /** * aa_put_ns - decrement refcount on @ns * @ns: namespace to put reference of * * Decrement reference count of @ns and if no longer in use free it */ static inline void aa_put_ns(struct aa_ns *ns) { if (ns) aa_put_profile(ns->unconfined); } /** * __aa_findn_ns - find a namespace on a list by @name * @head: list to search for namespace on (NOT NULL) * @name: name of namespace to look for (NOT NULL) * @n: length of @name * Returns: unrefcounted namespace * * Requires: rcu_read_lock be held */ static inline struct aa_ns *__aa_findn_ns(struct list_head *head, const char *name, size_t n) { return (struct aa_ns *)__policy_strn_find(head, name, n); } static inline struct aa_ns *__aa_find_ns(struct list_head *head, const char *name) { return __aa_findn_ns(head, name, strlen(name)); } #endif /* AA_NAMESPACE_H */
294 47 274 137 138 110 109 109 103 22 2 23 3 6 3 27 280 159 281 147 104 247 111 5 15 5 18 2 3 4 3 13 7 306 320 4 313 3 20 3 6 26 310 275 33 27 12 20 5 27 5 287 225 21 285 2 78 1 223 69 284 285 84 220 22 281 169 142 6 164 53 1 8 128 1 130 35 163 1 127 37 158 4 32 106 141 21 5 140 103 67 6 117 133 3 495 4 72 418 7 7 399 2 12 394 397 272 398 272 114 104 19 360 57 133 288 116 270 364 367 366 359 132 99 35 1 142 251 401 366 14 4 104 8 101 44 54 242 4 1 29 7 116 2 1 119 119 116 112 4 1 10 83 2 81 8 84 146 145 99 2 152 2 122 118 9 229 12 73 171 169 26 7 10 27 6 4 15 2 8 14 2 8 30 5 26 26 78 2 13 43 28 6 5 37 24 70 258 2 1 13 18 250 245 34 16 7 48 41 6 2 1 2 29 2 1 1 2 13 4 15 6 13 8 11 1 12 11 7 18 7 14 36 6 5 28 21 3 14 14 47 3 7 4 39 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 // SPDX-License-Identifier: GPL-2.0-only /* * "splice": joining two ropes together by interweaving their strands. * * This is the "extended pipe" functionality, where a pipe is used as * an arbitrary in-memory buffer. Think of a pipe as a small kernel * buffer that you can use to transfer data from one end to the other. * * The traditional unix read/write is extended with a "splice()" operation * that transfers data buffers to or from a pipe buffer. * * Named by Larry McVoy, original implementation from Linus, extended by * Jens to support splicing to files, network, direct splicing, etc and * fixing lots of bugs. * * Copyright (C) 2005-2006 Jens Axboe <axboe@kernel.dk> * Copyright (C) 2005-2006 Linus Torvalds <torvalds@osdl.org> * Copyright (C) 2006 Ingo Molnar <mingo@elte.hu> * */ #include <linux/bvec.h> #include <linux/fs.h> #include <linux/file.h> #include <linux/pagemap.h> #include <linux/splice.h> #include <linux/memcontrol.h> #include <linux/mm_inline.h> #include <linux/swap.h> #include <linux/writeback.h> #include <linux/export.h> #include <linux/syscalls.h> #include <linux/uio.h> #include <linux/fsnotify.h> #include <linux/security.h> #include <linux/gfp.h> #include <linux/net.h> #include <linux/socket.h> #include <linux/sched/signal.h> #include "internal.h" /* * Splice doesn't support FMODE_NOWAIT. Since pipes may set this flag to * indicate they support non-blocking reads or writes, we must clear it * here if set to avoid blocking other users of this pipe if splice is * being done on it. */ static noinline void noinline pipe_clear_nowait(struct file *file) { fmode_t fmode = READ_ONCE(file->f_mode); do { if (!(fmode & FMODE_NOWAIT)) break; } while (!try_cmpxchg(&file->f_mode, &fmode, fmode & ~FMODE_NOWAIT)); } /* * Attempt to steal a page from a pipe buffer. This should perhaps go into * a vm helper function, it's already simplified quite a bit by the * addition of remove_mapping(). If success is returned, the caller may * attempt to reuse this page for another destination. */ static bool page_cache_pipe_buf_try_steal(struct pipe_inode_info *pipe, struct pipe_buffer *buf) { struct folio *folio = page_folio(buf->page); struct address_space *mapping; folio_lock(folio); mapping = folio_mapping(folio); if (mapping) { WARN_ON(!folio_test_uptodate(folio)); /* * At least for ext2 with nobh option, we need to wait on * writeback completing on this folio, since we'll remove it * from the pagecache. Otherwise truncate wont wait on the * folio, allowing the disk blocks to be reused by someone else * before we actually wrote our data to them. fs corruption * ensues. */ folio_wait_writeback(folio); if (!filemap_release_folio(folio, GFP_KERNEL)) goto out_unlock; /* * If we succeeded in removing the mapping, set LRU flag * and return good. */ if (remove_mapping(mapping, folio)) { buf->flags |= PIPE_BUF_FLAG_LRU; return true; } } /* * Raced with truncate or failed to remove folio from current * address space, unlock and return failure. */ out_unlock: folio_unlock(folio); return false; } static void page_cache_pipe_buf_release(struct pipe_inode_info *pipe, struct pipe_buffer *buf) { put_page(buf->page); buf->flags &= ~PIPE_BUF_FLAG_LRU; } /* * Check whether the contents of buf is OK to access. Since the content * is a page cache page, IO may be in flight. */ static int page_cache_pipe_buf_confirm(struct pipe_inode_info *pipe, struct pipe_buffer *buf) { struct folio *folio = page_folio(buf->page); int err; if (!folio_test_uptodate(folio)) { folio_lock(folio); /* * Folio got truncated/unhashed. This will cause a 0-byte * splice, if this is the first page. */ if (!folio->mapping) { err = -ENODATA; goto error; } /* * Uh oh, read-error from disk. */ if (!folio_test_uptodate(folio)) { err = -EIO; goto error; } /* Folio is ok after all, we are done */ folio_unlock(folio); } return 0; error: folio_unlock(folio); return err; } const struct pipe_buf_operations page_cache_pipe_buf_ops = { .confirm = page_cache_pipe_buf_confirm, .release = page_cache_pipe_buf_release, .try_steal = page_cache_pipe_buf_try_steal, .get = generic_pipe_buf_get, }; static bool user_page_pipe_buf_try_steal(struct pipe_inode_info *pipe, struct pipe_buffer *buf) { if (!(buf->flags & PIPE_BUF_FLAG_GIFT)) return false; buf->flags |= PIPE_BUF_FLAG_LRU; return generic_pipe_buf_try_steal(pipe, buf); } static const struct pipe_buf_operations user_page_pipe_buf_ops = { .release = page_cache_pipe_buf_release, .try_steal = user_page_pipe_buf_try_steal, .get = generic_pipe_buf_get, }; static void wakeup_pipe_readers(struct pipe_inode_info *pipe) { smp_mb(); if (waitqueue_active(&pipe->rd_wait)) wake_up_interruptible(&pipe->rd_wait); kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN); } /** * splice_to_pipe - fill passed data into a pipe * @pipe: pipe to fill * @spd: data to fill * * Description: * @spd contains a map of pages and len/offset tuples, along with * the struct pipe_buf_operations associated with these pages. This * function will link that data to the pipe. * */ ssize_t splice_to_pipe(struct pipe_inode_info *pipe, struct splice_pipe_desc *spd) { unsigned int spd_pages = spd->nr_pages; unsigned int tail = pipe->tail; unsigned int head = pipe->head; unsigned int mask = pipe->ring_size - 1; ssize_t ret = 0; int page_nr = 0; if (!spd_pages) return 0; if (unlikely(!pipe->readers)) { send_sig(SIGPIPE, current, 0); ret = -EPIPE; goto out; } while (!pipe_full(head, tail, pipe->max_usage)) { struct pipe_buffer *buf = &pipe->bufs[head & mask]; buf->page = spd->pages[page_nr]; buf->offset = spd->partial[page_nr].offset; buf->len = spd->partial[page_nr].len; buf->private = spd->partial[page_nr].private; buf->ops = spd->ops; buf->flags = 0; head++; pipe->head = head; page_nr++; ret += buf->len; if (!--spd->nr_pages) break; } if (!ret) ret = -EAGAIN; out: while (page_nr < spd_pages) spd->spd_release(spd, page_nr++); return ret; } EXPORT_SYMBOL_GPL(splice_to_pipe); ssize_t add_to_pipe(struct pipe_inode_info *pipe, struct pipe_buffer *buf) { unsigned int head = pipe->head; unsigned int tail = pipe->tail; unsigned int mask = pipe->ring_size - 1; int ret; if (unlikely(!pipe->readers)) { send_sig(SIGPIPE, current, 0); ret = -EPIPE; } else if (pipe_full(head, tail, pipe->max_usage)) { ret = -EAGAIN; } else { pipe->bufs[head & mask] = *buf; pipe->head = head + 1; return buf->len; } pipe_buf_release(pipe, buf); return ret; } EXPORT_SYMBOL(add_to_pipe); /* * Check if we need to grow the arrays holding pages and partial page * descriptions. */ int splice_grow_spd(const struct pipe_inode_info *pipe, struct splice_pipe_desc *spd) { unsigned int max_usage = READ_ONCE(pipe->max_usage); spd->nr_pages_max = max_usage; if (max_usage <= PIPE_DEF_BUFFERS) return 0; spd->pages = kmalloc_array(max_usage, sizeof(struct page *), GFP_KERNEL); spd->partial = kmalloc_array(max_usage, sizeof(struct partial_page), GFP_KERNEL); if (spd->pages && spd->partial) return 0; kfree(spd->pages); kfree(spd->partial); return -ENOMEM; } void splice_shrink_spd(struct splice_pipe_desc *spd) { if (spd->nr_pages_max <= PIPE_DEF_BUFFERS) return; kfree(spd->pages); kfree(spd->partial); } /** * copy_splice_read - Copy data from a file and splice the copy into a pipe * @in: The file to read from * @ppos: Pointer to the file position to read from * @pipe: The pipe to splice into * @len: The amount to splice * @flags: The SPLICE_F_* flags * * This function allocates a bunch of pages sufficient to hold the requested * amount of data (but limited by the remaining pipe capacity), passes it to * the file's ->read_iter() to read into and then splices the used pages into * the pipe. * * Return: On success, the number of bytes read will be returned and *@ppos * will be updated if appropriate; 0 will be returned if there is no more data * to be read; -EAGAIN will be returned if the pipe had no space, and some * other negative error code will be returned on error. A short read may occur * if the pipe has insufficient space, we reach the end of the data or we hit a * hole. */ ssize_t copy_splice_read(struct file *in, loff_t *ppos, struct pipe_inode_info *pipe, size_t len, unsigned int flags) { struct iov_iter to; struct bio_vec *bv; struct kiocb kiocb; struct page **pages; ssize_t ret; size_t used, npages, chunk, remain, keep = 0; int i; /* Work out how much data we can actually add into the pipe */ used = pipe_occupancy(pipe->head, pipe->tail); npages = max_t(ssize_t, pipe->max_usage - used, 0); len = min_t(size_t, len, npages * PAGE_SIZE); npages = DIV_ROUND_UP(len, PAGE_SIZE); bv = kzalloc(array_size(npages, sizeof(bv[0])) + array_size(npages, sizeof(struct page *)), GFP_KERNEL); if (!bv) return -ENOMEM; pages = (struct page **)(bv + npages); npages = alloc_pages_bulk_array(GFP_USER, npages, pages); if (!npages) { kfree(bv); return -ENOMEM; } remain = len = min_t(size_t, len, npages * PAGE_SIZE); for (i = 0; i < npages; i++) { chunk = min_t(size_t, PAGE_SIZE, remain); bv[i].bv_page = pages[i]; bv[i].bv_offset = 0; bv[i].bv_len = chunk; remain -= chunk; } /* Do the I/O */ iov_iter_bvec(&to, ITER_DEST, bv, npages, len); init_sync_kiocb(&kiocb, in); kiocb.ki_pos = *ppos; ret = in->f_op->read_iter(&kiocb, &to); if (ret > 0) { keep = DIV_ROUND_UP(ret, PAGE_SIZE); *ppos = kiocb.ki_pos; } /* * Callers of ->splice_read() expect -EAGAIN on "can't put anything in * there", rather than -EFAULT. */ if (ret == -EFAULT) ret = -EAGAIN; /* Free any pages that didn't get touched at all. */ if (keep < npages) release_pages(pages + keep, npages - keep); /* Push the remaining pages into the pipe. */ remain = ret; for (i = 0; i < keep; i++) { struct pipe_buffer *buf = pipe_head_buf(pipe); chunk = min_t(size_t, remain, PAGE_SIZE); *buf = (struct pipe_buffer) { .ops = &default_pipe_buf_ops, .page = bv[i].bv_page, .offset = 0, .len = chunk, }; pipe->head++; remain -= chunk; } kfree(bv); return ret; } EXPORT_SYMBOL(copy_splice_read); const struct pipe_buf_operations default_pipe_buf_ops = { .release = generic_pipe_buf_release, .try_steal = generic_pipe_buf_try_steal, .get = generic_pipe_buf_get, }; /* Pipe buffer operations for a socket and similar. */ const struct pipe_buf_operations nosteal_pipe_buf_ops = { .release = generic_pipe_buf_release, .get = generic_pipe_buf_get, }; EXPORT_SYMBOL(nosteal_pipe_buf_ops); static void wakeup_pipe_writers(struct pipe_inode_info *pipe) { smp_mb(); if (waitqueue_active(&pipe->wr_wait)) wake_up_interruptible(&pipe->wr_wait); kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT); } /** * splice_from_pipe_feed - feed available data from a pipe to a file * @pipe: pipe to splice from * @sd: information to @actor * @actor: handler that splices the data * * Description: * This function loops over the pipe and calls @actor to do the * actual moving of a single struct pipe_buffer to the desired * destination. It returns when there's no more buffers left in * the pipe or if the requested number of bytes (@sd->total_len) * have been copied. It returns a positive number (one) if the * pipe needs to be filled with more data, zero if the required * number of bytes have been copied and -errno on error. * * This, together with splice_from_pipe_{begin,end,next}, may be * used to implement the functionality of __splice_from_pipe() when * locking is required around copying the pipe buffers to the * destination. */ static int splice_from_pipe_feed(struct pipe_inode_info *pipe, struct splice_desc *sd, splice_actor *actor) { unsigned int head = pipe->head; unsigned int tail = pipe->tail; unsigned int mask = pipe->ring_size - 1; int ret; while (!pipe_empty(head, tail)) { struct pipe_buffer *buf = &pipe->bufs[tail & mask]; sd->len = buf->len; if (sd->len > sd->total_len) sd->len = sd->total_len; ret = pipe_buf_confirm(pipe, buf); if (unlikely(ret)) { if (ret == -ENODATA) ret = 0; return ret; } ret = actor(pipe, buf, sd); if (ret <= 0) return ret; buf->offset += ret; buf->len -= ret; sd->num_spliced += ret; sd->len -= ret; sd->pos += ret; sd->total_len -= ret; if (!buf->len) { pipe_buf_release(pipe, buf); tail++; pipe->tail = tail; if (pipe->files) sd->need_wakeup = true; } if (!sd->total_len) return 0; } return 1; } /* We know we have a pipe buffer, but maybe it's empty? */ static inline bool eat_empty_buffer(struct pipe_inode_info *pipe) { unsigned int tail = pipe->tail; unsigned int mask = pipe->ring_size - 1; struct pipe_buffer *buf = &pipe->bufs[tail & mask]; if (unlikely(!buf->len)) { pipe_buf_release(pipe, buf); pipe->tail = tail+1; return true; } return false; } /** * splice_from_pipe_next - wait for some data to splice from * @pipe: pipe to splice from * @sd: information about the splice operation * * Description: * This function will wait for some data and return a positive * value (one) if pipe buffers are available. It will return zero * or -errno if no more data needs to be spliced. */ static int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd) { /* * Check for signal early to make process killable when there are * always buffers available */ if (signal_pending(current)) return -ERESTARTSYS; repeat: while (pipe_empty(pipe->head, pipe->tail)) { if (!pipe->writers) return 0; if (sd->num_spliced) return 0; if (sd->flags & SPLICE_F_NONBLOCK) return -EAGAIN; if (signal_pending(current)) return -ERESTARTSYS; if (sd->need_wakeup) { wakeup_pipe_writers(pipe); sd->need_wakeup = false; } pipe_wait_readable(pipe); } if (eat_empty_buffer(pipe)) goto repeat; return 1; } /** * splice_from_pipe_begin - start splicing from pipe * @sd: information about the splice operation * * Description: * This function should be called before a loop containing * splice_from_pipe_next() and splice_from_pipe_feed() to * initialize the necessary fields of @sd. */ static void splice_from_pipe_begin(struct splice_desc *sd) { sd->num_spliced = 0; sd->need_wakeup = false; } /** * splice_from_pipe_end - finish splicing from pipe * @pipe: pipe to splice from * @sd: information about the splice operation * * Description: * This function will wake up pipe writers if necessary. It should * be called after a loop containing splice_from_pipe_next() and * splice_from_pipe_feed(). */ static void splice_from_pipe_end(struct pipe_inode_info *pipe, struct splice_desc *sd) { if (sd->need_wakeup) wakeup_pipe_writers(pipe); } /** * __splice_from_pipe - splice data from a pipe to given actor * @pipe: pipe to splice from * @sd: information to @actor * @actor: handler that splices the data * * Description: * This function does little more than loop over the pipe and call * @actor to do the actual moving of a single struct pipe_buffer to * the desired destination. See pipe_to_file, pipe_to_sendmsg, or * pipe_to_user. * */ ssize_t __splice_from_pipe(struct pipe_inode_info *pipe, struct splice_desc *sd, splice_actor *actor) { int ret; splice_from_pipe_begin(sd); do { cond_resched(); ret = splice_from_pipe_next(pipe, sd); if (ret > 0) ret = splice_from_pipe_feed(pipe, sd, actor); } while (ret > 0); splice_from_pipe_end(pipe, sd); return sd->num_spliced ? sd->num_spliced : ret; } EXPORT_SYMBOL(__splice_from_pipe); /** * splice_from_pipe - splice data from a pipe to a file * @pipe: pipe to splice from * @out: file to splice to * @ppos: position in @out * @len: how many bytes to splice * @flags: splice modifier flags * @actor: handler that splices the data * * Description: * See __splice_from_pipe. This function locks the pipe inode, * otherwise it's identical to __splice_from_pipe(). * */ ssize_t splice_from_pipe(struct pipe_inode_info *pipe, struct file *out, loff_t *ppos, size_t len, unsigned int flags, splice_actor *actor) { ssize_t ret; struct splice_desc sd = { .total_len = len, .flags = flags, .pos = *ppos, .u.file = out, }; pipe_lock(pipe); ret = __splice_from_pipe(pipe, &sd, actor); pipe_unlock(pipe); return ret; } /** * iter_file_splice_write - splice data from a pipe to a file * @pipe: pipe info * @out: file to write to * @ppos: position in @out * @len: number of bytes to splice * @flags: splice modifier flags * * Description: * Will either move or copy pages (determined by @flags options) from * the given pipe inode to the given file. * This one is ->write_iter-based. * */ ssize_t iter_file_splice_write(struct pipe_inode_info *pipe, struct file *out, loff_t *ppos, size_t len, unsigned int flags) { struct splice_desc sd = { .total_len = len, .flags = flags, .pos = *ppos, .u.file = out, }; int nbufs = pipe->max_usage; struct bio_vec *array; ssize_t ret; if (!out->f_op->write_iter) return -EINVAL; array = kcalloc(nbufs, sizeof(struct bio_vec), GFP_KERNEL); if (unlikely(!array)) return -ENOMEM; pipe_lock(pipe); splice_from_pipe_begin(&sd); while (sd.total_len) { struct kiocb kiocb; struct iov_iter from; unsigned int head, tail, mask; size_t left; int n; ret = splice_from_pipe_next(pipe, &sd); if (ret <= 0) break; if (unlikely(nbufs < pipe->max_usage)) { kfree(array); nbufs = pipe->max_usage; array = kcalloc(nbufs, sizeof(struct bio_vec), GFP_KERNEL); if (!array) { ret = -ENOMEM; break; } } head = pipe->head; tail = pipe->tail; mask = pipe->ring_size - 1; /* build the vector */ left = sd.total_len; for (n = 0; !pipe_empty(head, tail) && left && n < nbufs; tail++) { struct pipe_buffer *buf = &pipe->bufs[tail & mask]; size_t this_len = buf->len; /* zero-length bvecs are not supported, skip them */ if (!this_len) continue; this_len = min(this_len, left); ret = pipe_buf_confirm(pipe, buf); if (unlikely(ret)) { if (ret == -ENODATA) ret = 0; goto done; } bvec_set_page(&array[n], buf->page, this_len, buf->offset); left -= this_len; n++; } iov_iter_bvec(&from, ITER_SOURCE, array, n, sd.total_len - left); init_sync_kiocb(&kiocb, out); kiocb.ki_pos = sd.pos; ret = out->f_op->write_iter(&kiocb, &from); sd.pos = kiocb.ki_pos; if (ret <= 0) break; sd.num_spliced += ret; sd.total_len -= ret; *ppos = sd.pos; /* dismiss the fully eaten buffers, adjust the partial one */ tail = pipe->tail; while (ret) { struct pipe_buffer *buf = &pipe->bufs[tail & mask]; if (ret >= buf->len) { ret -= buf->len; buf->len = 0; pipe_buf_release(pipe, buf); tail++; pipe->tail = tail; if (pipe->files) sd.need_wakeup = true; } else { buf->offset += ret; buf->len -= ret; ret = 0; } } } done: kfree(array); splice_from_pipe_end(pipe, &sd); pipe_unlock(pipe); if (sd.num_spliced) ret = sd.num_spliced; return ret; } EXPORT_SYMBOL(iter_file_splice_write); #ifdef CONFIG_NET /** * splice_to_socket - splice data from a pipe to a socket * @pipe: pipe to splice from * @out: socket to write to * @ppos: position in @out * @len: number of bytes to splice * @flags: splice modifier flags * * Description: * Will send @len bytes from the pipe to a network socket. No data copying * is involved. * */ ssize_t splice_to_socket(struct pipe_inode_info *pipe, struct file *out, loff_t *ppos, size_t len, unsigned int flags) { struct socket *sock = sock_from_file(out); struct bio_vec bvec[16]; struct msghdr msg = {}; ssize_t ret = 0; size_t spliced = 0; bool need_wakeup = false; pipe_lock(pipe); while (len > 0) { unsigned int head, tail, mask, bc = 0; size_t remain = len; /* * Check for signal early to make process killable when there * are always buffers available */ ret = -ERESTARTSYS; if (signal_pending(current)) break; while (pipe_empty(pipe->head, pipe->tail)) { ret = 0; if (!pipe->writers) goto out; if (spliced) goto out; ret = -EAGAIN; if (flags & SPLICE_F_NONBLOCK) goto out; ret = -ERESTARTSYS; if (signal_pending(current)) goto out; if (need_wakeup) { wakeup_pipe_writers(pipe); need_wakeup = false; } pipe_wait_readable(pipe); } head = pipe->head; tail = pipe->tail; mask = pipe->ring_size - 1; while (!pipe_empty(head, tail)) { struct pipe_buffer *buf = &pipe->bufs[tail & mask]; size_t seg; if (!buf->len) { tail++; continue; } seg = min_t(size_t, remain, buf->len); ret = pipe_buf_confirm(pipe, buf); if (unlikely(ret)) { if (ret == -ENODATA) ret = 0; break; } bvec_set_page(&bvec[bc++], buf->page, seg, buf->offset); remain -= seg; if (remain == 0 || bc >= ARRAY_SIZE(bvec)) break; tail++; } if (!bc) break; msg.msg_flags = MSG_SPLICE_PAGES; if (flags & SPLICE_F_MORE) msg.msg_flags |= MSG_MORE; if (remain && pipe_occupancy(pipe->head, tail) > 0) msg.msg_flags |= MSG_MORE; if (out->f_flags & O_NONBLOCK) msg.msg_flags |= MSG_DONTWAIT; iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, bvec, bc, len - remain); ret = sock_sendmsg(sock, &msg); if (ret <= 0) break; spliced += ret; len -= ret; tail = pipe->tail; while (ret > 0) { struct pipe_buffer *buf = &pipe->bufs[tail & mask]; size_t seg = min_t(size_t, ret, buf->len); buf->offset += seg; buf->len -= seg; ret -= seg; if (!buf->len) { pipe_buf_release(pipe, buf); tail++; } } if (tail != pipe->tail) { pipe->tail = tail; if (pipe->files) need_wakeup = true; } } out: pipe_unlock(pipe); if (need_wakeup) wakeup_pipe_writers(pipe); return spliced ?: ret; } #endif static int warn_unsupported(struct file *file, const char *op) { pr_debug_ratelimited( "splice %s not supported for file %pD4 (pid: %d comm: %.20s)\n", op, file, current->pid, current->comm); return -EINVAL; } /* * Attempt to initiate a splice from pipe to file. */ static ssize_t do_splice_from(struct pipe_inode_info *pipe, struct file *out, loff_t *ppos, size_t len, unsigned int flags) { if (unlikely(!out->f_op->splice_write)) return warn_unsupported(out, "write"); return out->f_op->splice_write(pipe, out, ppos, len, flags); } /* * Indicate to the caller that there was a premature EOF when reading from the * source and the caller didn't indicate they would be sending more data after * this. */ static void do_splice_eof(struct splice_desc *sd) { if (sd->splice_eof) sd->splice_eof(sd); } /* * Callers already called rw_verify_area() on the entire range. * No need to call it for sub ranges. */ static ssize_t do_splice_read(struct file *in, loff_t *ppos, struct pipe_inode_info *pipe, size_t len, unsigned int flags) { unsigned int p_space; if (unlikely(!(in->f_mode & FMODE_READ))) return -EBADF; if (!len) return 0; /* Don't try to read more the pipe has space for. */ p_space = pipe->max_usage - pipe_occupancy(pipe->head, pipe->tail); len = min_t(size_t, len, p_space << PAGE_SHIFT); if (unlikely(len > MAX_RW_COUNT)) len = MAX_RW_COUNT; if (unlikely(!in->f_op->splice_read)) return warn_unsupported(in, "read"); /* * O_DIRECT and DAX don't deal with the pagecache, so we allocate a * buffer, copy into it and splice that into the pipe. */ if ((in->f_flags & O_DIRECT) || IS_DAX(in->f_mapping->host)) return copy_splice_read(in, ppos, pipe, len, flags); return in->f_op->splice_read(in, ppos, pipe, len, flags); } /** * vfs_splice_read - Read data from a file and splice it into a pipe * @in: File to splice from * @ppos: Input file offset * @pipe: Pipe to splice to * @len: Number of bytes to splice * @flags: Splice modifier flags (SPLICE_F_*) * * Splice the requested amount of data from the input file to the pipe. This * is synchronous as the caller must hold the pipe lock across the entire * operation. * * If successful, it returns the amount of data spliced, 0 if it hit the EOF or * a hole and a negative error code otherwise. */ ssize_t vfs_splice_read(struct file *in, loff_t *ppos, struct pipe_inode_info *pipe, size_t len, unsigned int flags) { ssize_t ret; ret = rw_verify_area(READ, in, ppos, len); if (unlikely(ret < 0)) return ret; return do_splice_read(in, ppos, pipe, len, flags); } EXPORT_SYMBOL_GPL(vfs_splice_read); /** * splice_direct_to_actor - splices data directly between two non-pipes * @in: file to splice from * @sd: actor information on where to splice to * @actor: handles the data splicing * * Description: * This is a special case helper to splice directly between two * points, without requiring an explicit pipe. Internally an allocated * pipe is cached in the process, and reused during the lifetime of * that process. * */ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd, splice_direct_actor *actor) { struct pipe_inode_info *pipe; ssize_t ret, bytes; size_t len; int i, flags, more; /* * We require the input to be seekable, as we don't want to randomly * drop data for eg socket -> socket splicing. Use the piped splicing * for that! */ if (unlikely(!(in->f_mode & FMODE_LSEEK))) return -EINVAL; /* * neither in nor out is a pipe, setup an internal pipe attached to * 'out' and transfer the wanted data from 'in' to 'out' through that */ pipe = current->splice_pipe; if (unlikely(!pipe)) { pipe = alloc_pipe_info(); if (!pipe) return -ENOMEM; /* * We don't have an immediate reader, but we'll read the stuff * out of the pipe right after the splice_to_pipe(). So set * PIPE_READERS appropriately. */ pipe->readers = 1; current->splice_pipe = pipe; } /* * Do the splice. */ bytes = 0; len = sd->total_len; /* Don't block on output, we have to drain the direct pipe. */ flags = sd->flags; sd->flags &= ~SPLICE_F_NONBLOCK; /* * We signal MORE until we've read sufficient data to fulfill the * request and we keep signalling it if the caller set it. */ more = sd->flags & SPLICE_F_MORE; sd->flags |= SPLICE_F_MORE; WARN_ON_ONCE(!pipe_empty(pipe->head, pipe->tail)); while (len) { size_t read_len; loff_t pos = sd->pos, prev_pos = pos; ret = do_splice_read(in, &pos, pipe, len, flags); if (unlikely(ret <= 0)) goto read_failure; read_len = ret; sd->total_len = read_len; /* * If we now have sufficient data to fulfill the request then * we clear SPLICE_F_MORE if it was not set initially. */ if (read_len >= len && !more) sd->flags &= ~SPLICE_F_MORE; /* * NOTE: nonblocking mode only applies to the input. We * must not do the output in nonblocking mode as then we * could get stuck data in the internal pipe: */ ret = actor(pipe, sd); if (unlikely(ret <= 0)) { sd->pos = prev_pos; goto out_release; } bytes += ret; len -= ret; sd->pos = pos; if (ret < read_len) { sd->pos = prev_pos + ret; goto out_release; } } done: pipe->tail = pipe->head = 0; file_accessed(in); return bytes; read_failure: /* * If the user did *not* set SPLICE_F_MORE *and* we didn't hit that * "use all of len" case that cleared SPLICE_F_MORE, *and* we did a * "->splice_in()" that returned EOF (ie zero) *and* we have sent at * least 1 byte *then* we will also do the ->splice_eof() call. */ if (ret == 0 && !more && len > 0 && bytes) do_splice_eof(sd); out_release: /* * If we did an incomplete transfer we must release * the pipe buffers in question: */ for (i = 0; i < pipe->ring_size; i++) { struct pipe_buffer *buf = &pipe->bufs[i]; if (buf->ops) pipe_buf_release(pipe, buf); } if (!bytes) bytes = ret; goto done; } EXPORT_SYMBOL(splice_direct_to_actor); static int direct_splice_actor(struct pipe_inode_info *pipe, struct splice_desc *sd) { struct file *file = sd->u.file; long ret; file_start_write(file); ret = do_splice_from(pipe, file, sd->opos, sd->total_len, sd->flags); file_end_write(file); return ret; } static int splice_file_range_actor(struct pipe_inode_info *pipe, struct splice_desc *sd) { struct file *file = sd->u.file; return do_splice_from(pipe, file, sd->opos, sd->total_len, sd->flags); } static void direct_file_splice_eof(struct splice_desc *sd) { struct file *file = sd->u.file; if (file->f_op->splice_eof) file->f_op->splice_eof(file); } static ssize_t do_splice_direct_actor(struct file *in, loff_t *ppos, struct file *out, loff_t *opos, size_t len, unsigned int flags, splice_direct_actor *actor) { struct splice_desc sd = { .len = len, .total_len = len, .flags = flags, .pos = *ppos, .u.file = out, .splice_eof = direct_file_splice_eof, .opos = opos, }; ssize_t ret; if (unlikely(!(out->f_mode & FMODE_WRITE))) return -EBADF; if (unlikely(out->f_flags & O_APPEND)) return -EINVAL; ret = splice_direct_to_actor(in, &sd, actor); if (ret > 0) *ppos = sd.pos; return ret; } /** * do_splice_direct - splices data directly between two files * @in: file to splice from * @ppos: input file offset * @out: file to splice to * @opos: output file offset * @len: number of bytes to splice * @flags: splice modifier flags * * Description: * For use by do_sendfile(). splice can easily emulate sendfile, but * doing it in the application would incur an extra system call * (splice in + splice out, as compared to just sendfile()). So this helper * can splice directly through a process-private pipe. * * Callers already called rw_verify_area() on the entire range. */ ssize_t do_splice_direct(struct file *in, loff_t *ppos, struct file *out, loff_t *opos, size_t len, unsigned int flags) { return do_splice_direct_actor(in, ppos, out, opos, len, flags, direct_splice_actor); } EXPORT_SYMBOL(do_splice_direct); /** * splice_file_range - splices data between two files for copy_file_range() * @in: file to splice from * @ppos: input file offset * @out: file to splice to * @opos: output file offset * @len: number of bytes to splice * * Description: * For use by ->copy_file_range() methods. * Like do_splice_direct(), but vfs_copy_file_range() already holds * start_file_write() on @out file. * * Callers already called rw_verify_area() on the entire range. */ ssize_t splice_file_range(struct file *in, loff_t *ppos, struct file *out, loff_t *opos, size_t len) { lockdep_assert(file_write_started(out)); return do_splice_direct_actor(in, ppos, out, opos, min_t(size_t, len, MAX_RW_COUNT), 0, splice_file_range_actor); } EXPORT_SYMBOL(splice_file_range); static int wait_for_space(struct pipe_inode_info *pipe, unsigned flags) { for (;;) { if (unlikely(!pipe->readers)) { send_sig(SIGPIPE, current, 0); return -EPIPE; } if (!pipe_full(pipe->head, pipe->tail, pipe->max_usage)) return 0; if (flags & SPLICE_F_NONBLOCK) return -EAGAIN; if (signal_pending(current)) return -ERESTARTSYS; pipe_wait_writable(pipe); } } static int splice_pipe_to_pipe(struct pipe_inode_info *ipipe, struct pipe_inode_info *opipe, size_t len, unsigned int flags); ssize_t splice_file_to_pipe(struct file *in, struct pipe_inode_info *opipe, loff_t *offset, size_t len, unsigned int flags) { ssize_t ret; pipe_lock(opipe); ret = wait_for_space(opipe, flags); if (!ret) ret = do_splice_read(in, offset, opipe, len, flags); pipe_unlock(opipe); if (ret > 0) wakeup_pipe_readers(opipe); return ret; } /* * Determine where to splice to/from. */ ssize_t do_splice(struct file *in, loff_t *off_in, struct file *out, loff_t *off_out, size_t len, unsigned int flags) { struct pipe_inode_info *ipipe; struct pipe_inode_info *opipe; loff_t offset; ssize_t ret; if (unlikely(!(in->f_mode & FMODE_READ) || !(out->f_mode & FMODE_WRITE))) return -EBADF; ipipe = get_pipe_info(in, true); opipe = get_pipe_info(out, true); if (ipipe && opipe) { if (off_in || off_out) return -ESPIPE; /* Splicing to self would be fun, but... */ if (ipipe == opipe) return -EINVAL; if ((in->f_flags | out->f_flags) & O_NONBLOCK) flags |= SPLICE_F_NONBLOCK; ret = splice_pipe_to_pipe(ipipe, opipe, len, flags); } else if (ipipe) { if (off_in) return -ESPIPE; if (off_out) { if (!(out->f_mode & FMODE_PWRITE)) return -EINVAL; offset = *off_out; } else { offset = out->f_pos; } if (unlikely(out->f_flags & O_APPEND)) return -EINVAL; ret = rw_verify_area(WRITE, out, &offset, len); if (unlikely(ret < 0)) return ret; if (in->f_flags & O_NONBLOCK) flags |= SPLICE_F_NONBLOCK; file_start_write(out); ret = do_splice_from(ipipe, out, &offset, len, flags); file_end_write(out); if (!off_out) out->f_pos = offset; else *off_out = offset; } else if (opipe) { if (off_out) return -ESPIPE; if (off_in) { if (!(in->f_mode & FMODE_PREAD)) return -EINVAL; offset = *off_in; } else { offset = in->f_pos; } ret = rw_verify_area(READ, in, &offset, len); if (unlikely(ret < 0)) return ret; if (out->f_flags & O_NONBLOCK) flags |= SPLICE_F_NONBLOCK; ret = splice_file_to_pipe(in, opipe, &offset, len, flags); if (!off_in) in->f_pos = offset; else *off_in = offset; } else { ret = -EINVAL; } if (ret > 0) { /* * Generate modify out before access in: * do_splice_from() may've already sent modify out, * and this ensures the events get merged. */ fsnotify_modify(out); fsnotify_access(in); } return ret; } static ssize_t __do_splice(struct file *in, loff_t __user *off_in, struct file *out, loff_t __user *off_out, size_t len, unsigned int flags) { struct pipe_inode_info *ipipe; struct pipe_inode_info *opipe; loff_t offset, *__off_in = NULL, *__off_out = NULL; ssize_t ret; ipipe = get_pipe_info(in, true); opipe = get_pipe_info(out, true); if (ipipe) { if (off_in) return -ESPIPE; pipe_clear_nowait(in); } if (opipe) { if (off_out) return -ESPIPE; pipe_clear_nowait(out); } if (off_out) { if (copy_from_user(&offset, off_out, sizeof(loff_t))) return -EFAULT; __off_out = &offset; } if (off_in) { if (copy_from_user(&offset, off_in, sizeof(loff_t))) return -EFAULT; __off_in = &offset; } ret = do_splice(in, __off_in, out, __off_out, len, flags); if (ret < 0) return ret; if (__off_out && copy_to_user(off_out, __off_out, sizeof(loff_t))) return -EFAULT; if (__off_in && copy_to_user(off_in, __off_in, sizeof(loff_t))) return -EFAULT; return ret; } static ssize_t iter_to_pipe(struct iov_iter *from, struct pipe_inode_info *pipe, unsigned int flags) { struct pipe_buffer buf = { .ops = &user_page_pipe_buf_ops, .flags = flags }; size_t total = 0; ssize_t ret = 0; while (iov_iter_count(from)) { struct page *pages[16]; ssize_t left; size_t start; int i, n; left = iov_iter_get_pages2(from, pages, ~0UL, 16, &start); if (left <= 0) { ret = left; break; } n = DIV_ROUND_UP(left + start, PAGE_SIZE); for (i = 0; i < n; i++) { int size = min_t(int, left, PAGE_SIZE - start); buf.page = pages[i]; buf.offset = start; buf.len = size; ret = add_to_pipe(pipe, &buf); if (unlikely(ret < 0)) { iov_iter_revert(from, left); // this one got dropped by add_to_pipe() while (++i < n) put_page(pages[i]); goto out; } total += ret; left -= size; start = 0; } } out: return total ? total : ret; } static int pipe_to_user(struct pipe_inode_info *pipe, struct pipe_buffer *buf, struct splice_desc *sd) { int n = copy_page_to_iter(buf->page, buf->offset, sd->len, sd->u.data); return n == sd->len ? n : -EFAULT; } /* * For lack of a better implementation, implement vmsplice() to userspace * as a simple copy of the pipes pages to the user iov. */ static ssize_t vmsplice_to_user(struct file *file, struct iov_iter *iter, unsigned int flags) { struct pipe_inode_info *pipe = get_pipe_info(file, true); struct splice_desc sd = { .total_len = iov_iter_count(iter), .flags = flags, .u.data = iter }; ssize_t ret = 0; if (!pipe) return -EBADF; pipe_clear_nowait(file); if (sd.total_len) { pipe_lock(pipe); ret = __splice_from_pipe(pipe, &sd, pipe_to_user); pipe_unlock(pipe); } if (ret > 0) fsnotify_access(file); return ret; } /* * vmsplice splices a user address range into a pipe. It can be thought of * as splice-from-memory, where the regular splice is splice-from-file (or * to file). In both cases the output is a pipe, naturally. */ static ssize_t vmsplice_to_pipe(struct file *file, struct iov_iter *iter, unsigned int flags) { struct pipe_inode_info *pipe; ssize_t ret = 0; unsigned buf_flag = 0; if (flags & SPLICE_F_GIFT) buf_flag = PIPE_BUF_FLAG_GIFT; pipe = get_pipe_info(file, true); if (!pipe) return -EBADF; pipe_clear_nowait(file); pipe_lock(pipe); ret = wait_for_space(pipe, flags); if (!ret) ret = iter_to_pipe(iter, pipe, buf_flag); pipe_unlock(pipe); if (ret > 0) { wakeup_pipe_readers(pipe); fsnotify_modify(file); } return ret; } /* * Note that vmsplice only really supports true splicing _from_ user memory * to a pipe, not the other way around. Splicing from user memory is a simple * operation that can be supported without any funky alignment restrictions * or nasty vm tricks. We simply map in the user memory and fill them into * a pipe. The reverse isn't quite as easy, though. There are two possible * solutions for that: * * - memcpy() the data internally, at which point we might as well just * do a regular read() on the buffer anyway. * - Lots of nasty vm tricks, that are neither fast nor flexible (it * has restriction limitations on both ends of the pipe). * * Currently we punt and implement it as a normal copy, see pipe_to_user(). * */ SYSCALL_DEFINE4(vmsplice, int, fd, const struct iovec __user *, uiov, unsigned long, nr_segs, unsigned int, flags) { struct iovec iovstack[UIO_FASTIOV]; struct iovec *iov = iovstack; struct iov_iter iter; ssize_t error; int type; if (unlikely(flags & ~SPLICE_F_ALL)) return -EINVAL; CLASS(fd, f)(fd); if (fd_empty(f)) return -EBADF; if (fd_file(f)->f_mode & FMODE_WRITE) type = ITER_SOURCE; else if (fd_file(f)->f_mode & FMODE_READ) type = ITER_DEST; else return -EBADF; error = import_iovec(type, uiov, nr_segs, ARRAY_SIZE(iovstack), &iov, &iter); if (error < 0) return error; if (!iov_iter_count(&iter)) error = 0; else if (type == ITER_SOURCE) error = vmsplice_to_pipe(fd_file(f), &iter, flags); else error = vmsplice_to_user(fd_file(f), &iter, flags); kfree(iov); return error; } SYSCALL_DEFINE6(splice, int, fd_in, loff_t __user *, off_in, int, fd_out, loff_t __user *, off_out, size_t, len, unsigned int, flags) { if (unlikely(!len)) return 0; if (unlikely(flags & ~SPLICE_F_ALL)) return -EINVAL; CLASS(fd, in)(fd_in); if (fd_empty(in)) return -EBADF; CLASS(fd, out)(fd_out); if (fd_empty(out)) return -EBADF; return __do_splice(fd_file(in), off_in, fd_file(out), off_out, len, flags); } /* * Make sure there's data to read. Wait for input if we can, otherwise * return an appropriate error. */ static int ipipe_prep(struct pipe_inode_info *pipe, unsigned int flags) { int ret; /* * Check the pipe occupancy without the inode lock first. This function * is speculative anyways, so missing one is ok. */ if (!pipe_empty(pipe->head, pipe->tail)) return 0; ret = 0; pipe_lock(pipe); while (pipe_empty(pipe->head, pipe->tail)) { if (signal_pending(current)) { ret = -ERESTARTSYS; break; } if (!pipe->writers) break; if (flags & SPLICE_F_NONBLOCK) { ret = -EAGAIN; break; } pipe_wait_readable(pipe); } pipe_unlock(pipe); return ret; } /* * Make sure there's writeable room. Wait for room if we can, otherwise * return an appropriate error. */ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags) { int ret; /* * Check pipe occupancy without the inode lock first. This function * is speculative anyways, so missing one is ok. */ if (!pipe_full(pipe->head, pipe->tail, pipe->max_usage)) return 0; ret = 0; pipe_lock(pipe); while (pipe_full(pipe->head, pipe->tail, pipe->max_usage)) { if (!pipe->readers) { send_sig(SIGPIPE, current, 0); ret = -EPIPE; break; } if (flags & SPLICE_F_NONBLOCK) { ret = -EAGAIN; break; } if (signal_pending(current)) { ret = -ERESTARTSYS; break; } pipe_wait_writable(pipe); } pipe_unlock(pipe); return ret; } /* * Splice contents of ipipe to opipe. */ static int splice_pipe_to_pipe(struct pipe_inode_info *ipipe, struct pipe_inode_info *opipe, size_t len, unsigned int flags) { struct pipe_buffer *ibuf, *obuf; unsigned int i_head, o_head; unsigned int i_tail, o_tail; unsigned int i_mask, o_mask; int ret = 0; bool input_wakeup = false; retry: ret = ipipe_prep(ipipe, flags); if (ret) return ret; ret = opipe_prep(opipe, flags); if (ret) return ret; /* * Potential ABBA deadlock, work around it by ordering lock * grabbing by pipe info address. Otherwise two different processes * could deadlock (one doing tee from A -> B, the other from B -> A). */ pipe_double_lock(ipipe, opipe); i_tail = ipipe->tail; i_mask = ipipe->ring_size - 1; o_head = opipe->head; o_mask = opipe->ring_size - 1; do { size_t o_len; if (!opipe->readers) { send_sig(SIGPIPE, current, 0); if (!ret) ret = -EPIPE; break; } i_head = ipipe->head; o_tail = opipe->tail; if (pipe_empty(i_head, i_tail) && !ipipe->writers) break; /* * Cannot make any progress, because either the input * pipe is empty or the output pipe is full. */ if (pipe_empty(i_head, i_tail) || pipe_full(o_head, o_tail, opipe->max_usage)) { /* Already processed some buffers, break */ if (ret) break; if (flags & SPLICE_F_NONBLOCK) { ret = -EAGAIN; break; } /* * We raced with another reader/writer and haven't * managed to process any buffers. A zero return * value means EOF, so retry instead. */ pipe_unlock(ipipe); pipe_unlock(opipe); goto retry; } ibuf = &ipipe->bufs[i_tail & i_mask]; obuf = &opipe->bufs[o_head & o_mask]; if (len >= ibuf->len) { /* * Simply move the whole buffer from ipipe to opipe */ *obuf = *ibuf; ibuf->ops = NULL; i_tail++; ipipe->tail = i_tail; input_wakeup = true; o_len = obuf->len; o_head++; opipe->head = o_head; } else { /* * Get a reference to this pipe buffer, * so we can copy the contents over. */ if (!pipe_buf_get(ipipe, ibuf)) { if (ret == 0) ret = -EFAULT; break; } *obuf = *ibuf; /* * Don't inherit the gift and merge flags, we need to * prevent multiple steals of this page. */ obuf->flags &= ~PIPE_BUF_FLAG_GIFT; obuf->flags &= ~PIPE_BUF_FLAG_CAN_MERGE; obuf->len = len; ibuf->offset += len; ibuf->len -= len; o_len = len; o_head++; opipe->head = o_head; } ret += o_len; len -= o_len; } while (len); pipe_unlock(ipipe); pipe_unlock(opipe); /* * If we put data in the output pipe, wakeup any potential readers. */ if (ret > 0) wakeup_pipe_readers(opipe); if (input_wakeup) wakeup_pipe_writers(ipipe); return ret; } /* * Link contents of ipipe to opipe. */ static ssize_t link_pipe(struct pipe_inode_info *ipipe, struct pipe_inode_info *opipe, size_t len, unsigned int flags) { struct pipe_buffer *ibuf, *obuf; unsigned int i_head, o_head; unsigned int i_tail, o_tail; unsigned int i_mask, o_mask; ssize_t ret = 0; /* * Potential ABBA deadlock, work around it by ordering lock * grabbing by pipe info address. Otherwise two different processes * could deadlock (one doing tee from A -> B, the other from B -> A). */ pipe_double_lock(ipipe, opipe); i_tail = ipipe->tail; i_mask = ipipe->ring_size - 1; o_head = opipe->head; o_mask = opipe->ring_size - 1; do { if (!opipe->readers) { send_sig(SIGPIPE, current, 0); if (!ret) ret = -EPIPE; break; } i_head = ipipe->head; o_tail = opipe->tail; /* * If we have iterated all input buffers or run out of * output room, break. */ if (pipe_empty(i_head, i_tail) || pipe_full(o_head, o_tail, opipe->max_usage)) break; ibuf = &ipipe->bufs[i_tail & i_mask]; obuf = &opipe->bufs[o_head & o_mask]; /* * Get a reference to this pipe buffer, * so we can copy the contents over. */ if (!pipe_buf_get(ipipe, ibuf)) { if (ret == 0) ret = -EFAULT; break; } *obuf = *ibuf; /* * Don't inherit the gift and merge flag, we need to prevent * multiple steals of this page. */ obuf->flags &= ~PIPE_BUF_FLAG_GIFT; obuf->flags &= ~PIPE_BUF_FLAG_CAN_MERGE; if (obuf->len > len) obuf->len = len; ret += obuf->len; len -= obuf->len; o_head++; opipe->head = o_head; i_tail++; } while (len); pipe_unlock(ipipe); pipe_unlock(opipe); /* * If we put data in the output pipe, wakeup any potential readers. */ if (ret > 0) wakeup_pipe_readers(opipe); return ret; } /* * This is a tee(1) implementation that works on pipes. It doesn't copy * any data, it simply references the 'in' pages on the 'out' pipe. * The 'flags' used are the SPLICE_F_* variants, currently the only * applicable one is SPLICE_F_NONBLOCK. */ ssize_t do_tee(struct file *in, struct file *out, size_t len, unsigned int flags) { struct pipe_inode_info *ipipe = get_pipe_info(in, true); struct pipe_inode_info *opipe = get_pipe_info(out, true); ssize_t ret = -EINVAL; if (unlikely(!(in->f_mode & FMODE_READ) || !(out->f_mode & FMODE_WRITE))) return -EBADF; /* * Duplicate the contents of ipipe to opipe without actually * copying the data. */ if (ipipe && opipe && ipipe != opipe) { if ((in->f_flags | out->f_flags) & O_NONBLOCK) flags |= SPLICE_F_NONBLOCK; /* * Keep going, unless we encounter an error. The ipipe/opipe * ordering doesn't really matter. */ ret = ipipe_prep(ipipe, flags); if (!ret) { ret = opipe_prep(opipe, flags); if (!ret) ret = link_pipe(ipipe, opipe, len, flags); } } if (ret > 0) { fsnotify_access(in); fsnotify_modify(out); } return ret; } SYSCALL_DEFINE4(tee, int, fdin, int, fdout, size_t, len, unsigned int, flags) { if (unlikely(flags & ~SPLICE_F_ALL)) return -EINVAL; if (unlikely(!len)) return 0; CLASS(fd, in)(fdin); if (fd_empty(in)) return -EBADF; CLASS(fd, out)(fdout); if (fd_empty(out)) return -EBADF; return do_tee(fd_file(in), fd_file(out), len, flags); }
5 5 5 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 // SPDX-License-Identifier: GPL-2.0-only /* * drivers/acpi/device_sysfs.c - ACPI device sysfs attributes and modalias. * * Copyright (C) 2015, Intel Corp. * Author: Mika Westerberg <mika.westerberg@linux.intel.com> * Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com> * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ #include <linux/acpi.h> #include <linux/device.h> #include <linux/export.h> #include <linux/nls.h> #include "internal.h" static ssize_t acpi_object_path(acpi_handle handle, char *buf) { struct acpi_buffer path = {ACPI_ALLOCATE_BUFFER, NULL}; int result; result = acpi_get_name(handle, ACPI_FULL_PATHNAME, &path); if (result) return result; result = sprintf(buf, "%s\n", (char *)path.pointer); kfree(path.pointer); return result; } struct acpi_data_node_attr { struct attribute attr; ssize_t (*show)(struct acpi_data_node *, char *); ssize_t (*store)(struct acpi_data_node *, const char *, size_t count); }; #define DATA_NODE_ATTR(_name) \ static struct acpi_data_node_attr data_node_##_name = \ __ATTR(_name, 0444, data_node_show_##_name, NULL) static ssize_t data_node_show_path(struct acpi_data_node *dn, char *buf) { return dn->handle ? acpi_object_path(dn->handle, buf) : 0; } DATA_NODE_ATTR(path); static struct attribute *acpi_data_node_default_attrs[] = { &data_node_path.attr, NULL }; ATTRIBUTE_GROUPS(acpi_data_node_default); #define to_data_node(k) container_of(k, struct acpi_data_node, kobj) #define to_attr(a) container_of(a, struct acpi_data_node_attr, attr) static ssize_t acpi_data_node_attr_show(struct kobject *kobj, struct attribute *attr, char *buf) { struct acpi_data_node *dn = to_data_node(kobj); struct acpi_data_node_attr *dn_attr = to_attr(attr); return dn_attr->show ? dn_attr->show(dn, buf) : -ENXIO; } static const struct sysfs_ops acpi_data_node_sysfs_ops = { .show = acpi_data_node_attr_show, }; static void acpi_data_node_release(struct kobject *kobj) { struct acpi_data_node *dn = to_data_node(kobj); complete(&dn->kobj_done); } static const struct kobj_type acpi_data_node_ktype = { .sysfs_ops = &acpi_data_node_sysfs_ops, .default_groups = acpi_data_node_default_groups, .release = acpi_data_node_release, }; static void acpi_expose_nondev_subnodes(struct kobject *kobj, struct acpi_device_data *data) { struct list_head *list = &data->subnodes; struct acpi_data_node *dn; if (list_empty(list)) return; list_for_each_entry(dn, list, sibling) { int ret; init_completion(&dn->kobj_done); ret = kobject_init_and_add(&dn->kobj, &acpi_data_node_ktype, kobj, "%s", dn->name); if (!ret) acpi_expose_nondev_subnodes(&dn->kobj, &dn->data); else if (dn->handle) acpi_handle_err(dn->handle, "Failed to expose (%d)\n", ret); } } static void acpi_hide_nondev_subnodes(struct acpi_device_data *data) { struct list_head *list = &data->subnodes; struct acpi_data_node *dn; if (list_empty(list)) return; list_for_each_entry_reverse(dn, list, sibling) { acpi_hide_nondev_subnodes(&dn->data); kobject_put(&dn->kobj); } } /** * create_pnp_modalias - Create hid/cid(s) string for modalias and uevent * @acpi_dev: ACPI device object. * @modalias: Buffer to print into. * @size: Size of the buffer. * * Creates hid/cid(s) string needed for modalias and uevent * e.g. on a device with hid:IBM0001 and cid:ACPI0001 you get: * char *modalias: "acpi:IBM0001:ACPI0001" * Return: 0: no _HID and no _CID * -EINVAL: output error * -ENOMEM: output is truncated */ static int create_pnp_modalias(const struct acpi_device *acpi_dev, char *modalias, int size) { int len; int count; struct acpi_hardware_id *id; /* Avoid unnecessarily loading modules for non present devices. */ if (!acpi_device_is_present(acpi_dev)) return 0; /* * Since we skip ACPI_DT_NAMESPACE_HID from the modalias below, 0 should * be returned if ACPI_DT_NAMESPACE_HID is the only ACPI/PNP ID in the * device's list. */ count = 0; list_for_each_entry(id, &acpi_dev->pnp.ids, list) if (strcmp(id->id, ACPI_DT_NAMESPACE_HID)) count++; if (!count) return 0; len = snprintf(modalias, size, "acpi:"); if (len >= size) return -ENOMEM; size -= len; list_for_each_entry(id, &acpi_dev->pnp.ids, list) { if (!strcmp(id->id, ACPI_DT_NAMESPACE_HID)) continue; count = snprintf(&modalias[len], size, "%s:", id->id); if (count >= size) return -ENOMEM; len += count; size -= count; } return len; } /** * create_of_modalias - Creates DT compatible string for modalias and uevent * @acpi_dev: ACPI device object. * @modalias: Buffer to print into. * @size: Size of the buffer. * * Expose DT compatible modalias as of:NnameTCcompatible. This function should * only be called for devices having ACPI_DT_NAMESPACE_HID in their list of * ACPI/PNP IDs. */ static int create_of_modalias(const struct acpi_device *acpi_dev, char *modalias, int size) { struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER }; const union acpi_object *of_compatible, *obj; acpi_status status; int len, count; int i, nval; char *c; status = acpi_get_name(acpi_dev->handle, ACPI_SINGLE_NAME, &buf); if (ACPI_FAILURE(status)) return -ENODEV; /* DT strings are all in lower case */ for (c = buf.pointer; *c != '\0'; c++) *c = tolower(*c); len = snprintf(modalias, size, "of:N%sT", (char *)buf.pointer); ACPI_FREE(buf.pointer); if (len >= size) return -ENOMEM; size -= len; of_compatible = acpi_dev->data.of_compatible; if (of_compatible->type == ACPI_TYPE_PACKAGE) { nval = of_compatible->package.count; obj = of_compatible->package.elements; } else { /* Must be ACPI_TYPE_STRING. */ nval = 1; obj = of_compatible; } for (i = 0; i < nval; i++, obj++) { count = snprintf(&modalias[len], size, "C%s", obj->string.pointer); if (count >= size) return -ENOMEM; len += count; size -= count; } return len; } int __acpi_device_uevent_modalias(const struct acpi_device *adev, struct kobj_uevent_env *env) { int len; if (!adev) return -ENODEV; if (list_empty(&adev->pnp.ids)) return 0; if (add_uevent_var(env, "MODALIAS=")) return -ENOMEM; if (adev->data.of_compatible) len = create_of_modalias(adev, &env->buf[env->buflen - 1], sizeof(env->buf) - env->buflen); else len = create_pnp_modalias(adev, &env->buf[env->buflen - 1], sizeof(env->buf) - env->buflen); if (len < 0) return len; env->buflen += len; return 0; } /** * acpi_device_uevent_modalias - uevent modalias for ACPI-enumerated devices. * @dev: Struct device to get ACPI device node. * @env: Environment variables of the kobject uevent. * * Create the uevent modalias field for ACPI-enumerated devices. * * Because other buses do not support ACPI HIDs & CIDs, e.g. for a device with * hid:IBM0001 and cid:ACPI0001 you get: "acpi:IBM0001:ACPI0001". */ int acpi_device_uevent_modalias(const struct device *dev, struct kobj_uevent_env *env) { return __acpi_device_uevent_modalias(acpi_companion_match(dev), env); } EXPORT_SYMBOL_GPL(acpi_device_uevent_modalias); static int __acpi_device_modalias(const struct acpi_device *adev, char *buf, int size) { int len, count; if (!adev) return -ENODEV; if (list_empty(&adev->pnp.ids)) return 0; len = create_pnp_modalias(adev, buf, size - 1); if (len < 0) { return len; } else if (len > 0) { buf[len++] = '\n'; size -= len; } if (!adev->data.of_compatible) return len; count = create_of_modalias(adev, buf + len, size - 1); if (count < 0) { return count; } else if (count > 0) { len += count; buf[len++] = '\n'; } return len; } /** * acpi_device_modalias - modalias sysfs attribute for ACPI-enumerated devices. * @dev: Struct device to get ACPI device node. * @buf: The buffer to save pnp_modalias and of_modalias. * @size: Size of buffer. * * Create the modalias sysfs attribute for ACPI-enumerated devices. * * Because other buses do not support ACPI HIDs & CIDs, e.g. for a device with * hid:IBM0001 and cid:ACPI0001 you get: "acpi:IBM0001:ACPI0001". */ int acpi_device_modalias(struct device *dev, char *buf, int size) { return __acpi_device_modalias(acpi_companion_match(dev), buf, size); } EXPORT_SYMBOL_GPL(acpi_device_modalias); static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, char *buf) { return __acpi_device_modalias(to_acpi_device(dev), buf, 1024); } static DEVICE_ATTR_RO(modalias); static ssize_t real_power_state_show(struct device *dev, struct device_attribute *attr, char *buf) { struct acpi_device *adev = to_acpi_device(dev); int state; int ret; ret = acpi_device_get_power(adev, &state); if (ret) return ret; return sprintf(buf, "%s\n", acpi_power_state_string(state)); } static DEVICE_ATTR_RO(real_power_state); static ssize_t power_state_show(struct device *dev, struct device_attribute *attr, char *buf) { struct acpi_device *adev = to_acpi_device(dev); return sprintf(buf, "%s\n", acpi_power_state_string(adev->power.state)); } static DEVICE_ATTR_RO(power_state); static ssize_t eject_store(struct device *d, struct device_attribute *attr, const char *buf, size_t count) { struct acpi_device *acpi_device = to_acpi_device(d); acpi_object_type not_used; acpi_status status; if (!count || buf[0] != '1') return -EINVAL; if ((!acpi_device->handler || !acpi_device->handler->hotplug.enabled) && !d->driver) return -ENODEV; status = acpi_get_type(acpi_device->handle, &not_used); if (ACPI_FAILURE(status) || !acpi_device->flags.ejectable) return -ENODEV; acpi_dev_get(acpi_device); status = acpi_hotplug_schedule(acpi_device, ACPI_OST_EC_OSPM_EJECT); if (ACPI_SUCCESS(status)) return count; acpi_dev_put(acpi_device); acpi_evaluate_ost(acpi_device->handle, ACPI_OST_EC_OSPM_EJECT, ACPI_OST_SC_NON_SPECIFIC_FAILURE, NULL); return status == AE_NO_MEMORY ? -ENOMEM : -EAGAIN; } static DEVICE_ATTR_WO(eject); static ssize_t hid_show(struct device *dev, struct device_attribute *attr, char *buf) { struct acpi_device *acpi_dev = to_acpi_device(dev); return sprintf(buf, "%s\n", acpi_device_hid(acpi_dev)); } static DEVICE_ATTR_RO(hid); static ssize_t uid_show(struct device *dev, struct device_attribute *attr, char *buf) { struct acpi_device *acpi_dev = to_acpi_device(dev); return sprintf(buf, "%s\n", acpi_device_uid(acpi_dev)); } static DEVICE_ATTR_RO(uid); static ssize_t adr_show(struct device *dev, struct device_attribute *attr, char *buf) { struct acpi_device *acpi_dev = to_acpi_device(dev); if (acpi_dev->pnp.bus_address > U32_MAX) return sprintf(buf, "0x%016llx\n", acpi_dev->pnp.bus_address); else return sprintf(buf, "0x%08llx\n", acpi_dev->pnp.bus_address); } static DEVICE_ATTR_RO(adr); static ssize_t path_show(struct device *dev, struct device_attribute *attr, char *buf) { struct acpi_device *acpi_dev = to_acpi_device(dev); return acpi_object_path(acpi_dev->handle, buf); } static DEVICE_ATTR_RO(path); /* sysfs file that shows description text from the ACPI _STR method */ static ssize_t description_show(struct device *dev, struct device_attribute *attr, char *buf) { struct acpi_device *acpi_dev = to_acpi_device(dev); struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL}; union acpi_object *str_obj; acpi_status status; int result; status = acpi_evaluate_object_typed(acpi_dev->handle, "_STR", NULL, &buffer, ACPI_TYPE_BUFFER); if (ACPI_FAILURE(status)) return -EIO; str_obj = buffer.pointer; /* * The _STR object contains a Unicode identifier for a device. * We need to convert to utf-8 so it can be displayed. */ result = utf16s_to_utf8s( (wchar_t *)str_obj->buffer.pointer, str_obj->buffer.length, UTF16_LITTLE_ENDIAN, buf, PAGE_SIZE - 1); buf[result++] = '\n'; kfree(str_obj); return result; } static DEVICE_ATTR_RO(description); static ssize_t sun_show(struct device *dev, struct device_attribute *attr, char *buf) { struct acpi_device *acpi_dev = to_acpi_device(dev); acpi_status status; unsigned long long sun; status = acpi_evaluate_integer(acpi_dev->handle, "_SUN", NULL, &sun); if (ACPI_FAILURE(status)) return -EIO; return sprintf(buf, "%llu\n", sun); } static DEVICE_ATTR_RO(sun); static ssize_t hrv_show(struct device *dev, struct device_attribute *attr, char *buf) { struct acpi_device *acpi_dev = to_acpi_device(dev); acpi_status status; unsigned long long hrv; status = acpi_evaluate_integer(acpi_dev->handle, "_HRV", NULL, &hrv); if (ACPI_FAILURE(status)) return -EIO; return sprintf(buf, "%llu\n", hrv); } static DEVICE_ATTR_RO(hrv); static ssize_t status_show(struct device *dev, struct device_attribute *attr, char *buf) { struct acpi_device *acpi_dev = to_acpi_device(dev); acpi_status status; unsigned long long sta; status = acpi_evaluate_integer(acpi_dev->handle, "_STA", NULL, &sta); if (ACPI_FAILURE(status)) return -EIO; return sprintf(buf, "%llu\n", sta); } static DEVICE_ATTR_RO(status); static struct attribute *acpi_attrs[] = { &dev_attr_path.attr, &dev_attr_hid.attr, &dev_attr_modalias.attr, &dev_attr_description.attr, &dev_attr_adr.attr, &dev_attr_uid.attr, &dev_attr_sun.attr, &dev_attr_hrv.attr, &dev_attr_status.attr, &dev_attr_eject.attr, &dev_attr_power_state.attr, &dev_attr_real_power_state.attr, NULL }; static bool acpi_show_attr(struct acpi_device *dev, const struct device_attribute *attr) { /* * Devices gotten from FADT don't have a "path" attribute */ if (attr == &dev_attr_path) return dev->handle; if (attr == &dev_attr_hid || attr == &dev_attr_modalias) return !list_empty(&dev->pnp.ids); if (attr == &dev_attr_description) return acpi_has_method(dev->handle, "_STR"); if (attr == &dev_attr_adr) return dev->pnp.type.bus_address; if (attr == &dev_attr_uid) return acpi_device_uid(dev); if (attr == &dev_attr_sun) return acpi_has_method(dev->handle, "_SUN"); if (attr == &dev_attr_hrv) return acpi_has_method(dev->handle, "_HRV"); if (attr == &dev_attr_status) return acpi_has_method(dev->handle, "_STA"); /* * If device has _EJ0, 'eject' file is created that is used to trigger * hot-removal function from userland. */ if (attr == &dev_attr_eject) return acpi_has_method(dev->handle, "_EJ0"); if (attr == &dev_attr_power_state) return dev->flags.power_manageable; if (attr == &dev_attr_real_power_state) return dev->flags.power_manageable && dev->power.flags.power_resources; dev_warn_once(&dev->dev, "Unexpected attribute: %s\n", attr->attr.name); return false; } static umode_t acpi_attr_is_visible(struct kobject *kobj, struct attribute *attr, int attrno) { struct acpi_device *dev = to_acpi_device(kobj_to_dev(kobj)); if (acpi_show_attr(dev, container_of(attr, struct device_attribute, attr))) return attr->mode; else return 0; } static const struct attribute_group acpi_group = { .attrs = acpi_attrs, .is_visible = acpi_attr_is_visible, }; const struct attribute_group *acpi_groups[] = { &acpi_group, NULL }; /** * acpi_device_setup_files - Create sysfs attributes of an ACPI device. * @dev: ACPI device object. */ void acpi_device_setup_files(struct acpi_device *dev) { acpi_expose_nondev_subnodes(&dev->dev.kobj, &dev->data); } /** * acpi_device_remove_files - Remove sysfs attributes of an ACPI device. * @dev: ACPI device object. */ void acpi_device_remove_files(struct acpi_device *dev) { acpi_hide_nondev_subnodes(&dev->data); }
103 11 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * NET Generic infrastructure for INET connection oriented protocols. * * Definitions for inet_connection_sock * * Authors: Many people, see the TCP sources * * From code originally in TCP */ #ifndef _INET_CONNECTION_SOCK_H #define _INET_CONNECTION_SOCK_H #include <linux/compiler.h> #include <linux/string.h> #include <linux/timer.h> #include <linux/poll.h> #include <linux/kernel.h> #include <linux/sockptr.h> #include <net/inet_sock.h> #include <net/request_sock.h> /* Cancel timers, when they are not required. */ #undef INET_CSK_CLEAR_TIMERS struct inet_bind_bucket; struct inet_bind2_bucket; struct tcp_congestion_ops; /* * Pointers to address related TCP functions * (i.e. things that depend on the address family) */ struct inet_connection_sock_af_ops { int (*queue_xmit)(struct sock *sk, struct sk_buff *skb, struct flowi *fl); void (*send_check)(struct sock *sk, struct sk_buff *skb); int (*rebuild_header)(struct sock *sk); void (*sk_rx_dst_set)(struct sock *sk, const struct sk_buff *skb); int (*conn_request)(struct sock *sk, struct sk_buff *skb); struct sock *(*syn_recv_sock)(const struct sock *sk, struct sk_buff *skb, struct request_sock *req, struct dst_entry *dst, struct request_sock *req_unhash, bool *own_req); u16 net_header_len; u16 sockaddr_len; int (*setsockopt)(struct sock *sk, int level, int optname, sockptr_t optval, unsigned int optlen); int (*getsockopt)(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen); void (*addr2sockaddr)(struct sock *sk, struct sockaddr *); void (*mtu_reduced)(struct sock *sk); }; /** inet_connection_sock - INET connection oriented sock * * @icsk_accept_queue: FIFO of established children * @icsk_bind_hash: Bind node * @icsk_bind2_hash: Bind node in the bhash2 table * @icsk_timeout: Timeout * @icsk_retransmit_timer: Resend (no ack) * @icsk_rto: Retransmit timeout * @icsk_pmtu_cookie Last pmtu seen by socket * @icsk_ca_ops Pluggable congestion control hook * @icsk_af_ops Operations which are AF_INET{4,6} specific * @icsk_ulp_ops Pluggable ULP control hook * @icsk_ulp_data ULP private data * @icsk_clean_acked Clean acked data hook * @icsk_ca_state: Congestion control state * @icsk_retransmits: Number of unrecovered [RTO] timeouts * @icsk_pending: Scheduled timer event * @icsk_backoff: Backoff * @icsk_syn_retries: Number of allowed SYN (or equivalent) retries * @icsk_probes_out: unanswered 0 window probes * @icsk_ext_hdr_len: Network protocol overhead (IP/IPv6 options) * @icsk_ack: Delayed ACK control data * @icsk_mtup; MTU probing control data * @icsk_probes_tstamp: Probe timestamp (cleared by non-zero window ack) * @icsk_user_timeout: TCP_USER_TIMEOUT value */ struct inet_connection_sock { /* inet_sock has to be the first member! */ struct inet_sock icsk_inet; struct request_sock_queue icsk_accept_queue; struct inet_bind_bucket *icsk_bind_hash; struct inet_bind2_bucket *icsk_bind2_hash; unsigned long icsk_timeout; struct timer_list icsk_retransmit_timer; struct timer_list icsk_delack_timer; __u32 icsk_rto; __u32 icsk_rto_min; __u32 icsk_delack_max; __u32 icsk_pmtu_cookie; const struct tcp_congestion_ops *icsk_ca_ops; const struct inet_connection_sock_af_ops *icsk_af_ops; const struct tcp_ulp_ops *icsk_ulp_ops; void __rcu *icsk_ulp_data; void (*icsk_clean_acked)(struct sock *sk, u32 acked_seq); unsigned int (*icsk_sync_mss)(struct sock *sk, u32 pmtu); __u8 icsk_ca_state:5, icsk_ca_initialized:1, icsk_ca_setsockopt:1, icsk_ca_dst_locked:1; __u8 icsk_retransmits; __u8 icsk_pending; __u8 icsk_backoff; __u8 icsk_syn_retries; __u8 icsk_probes_out; __u16 icsk_ext_hdr_len; struct { __u8 pending; /* ACK is pending */ __u8 quick; /* Scheduled number of quick acks */ __u8 pingpong; /* The session is interactive */ __u8 retry; /* Number of attempts */ #define ATO_BITS 8 __u32 ato:ATO_BITS, /* Predicted tick of soft clock */ lrcv_flowlabel:20, /* last received ipv6 flowlabel */ unused:4; unsigned long timeout; /* Currently scheduled timeout */ __u32 lrcvtime; /* timestamp of last received data packet */ __u16 last_seg_size; /* Size of last incoming segment */ __u16 rcv_mss; /* MSS used for delayed ACK decisions */ } icsk_ack; struct { /* Range of MTUs to search */ int search_high; int search_low; /* Information on the current probe. */ u32 probe_size:31, /* Is the MTUP feature enabled for this connection? */ enabled:1; u32 probe_timestamp; } icsk_mtup; u32 icsk_probes_tstamp; u32 icsk_user_timeout; u64 icsk_ca_priv[104 / sizeof(u64)]; #define ICSK_CA_PRIV_SIZE sizeof_field(struct inet_connection_sock, icsk_ca_priv) }; #define ICSK_TIME_RETRANS 1 /* Retransmit timer */ #define ICSK_TIME_DACK 2 /* Delayed ack timer */ #define ICSK_TIME_PROBE0 3 /* Zero window probe timer */ #define ICSK_TIME_LOSS_PROBE 5 /* Tail loss probe timer */ #define ICSK_TIME_REO_TIMEOUT 6 /* Reordering timer */ #define inet_csk(ptr) container_of_const(ptr, struct inet_connection_sock, icsk_inet.sk) static inline void *inet_csk_ca(const struct sock *sk) { return (void *)inet_csk(sk)->icsk_ca_priv; } struct sock *inet_csk_clone_lock(const struct sock *sk, const struct request_sock *req, const gfp_t priority); enum inet_csk_ack_state_t { ICSK_ACK_SCHED = 1, ICSK_ACK_TIMER = 2, ICSK_ACK_PUSHED = 4, ICSK_ACK_PUSHED2 = 8, ICSK_ACK_NOW = 16, /* Send the next ACK immediately (once) */ ICSK_ACK_NOMEM = 32, }; void inet_csk_init_xmit_timers(struct sock *sk, void (*retransmit_handler)(struct timer_list *), void (*delack_handler)(struct timer_list *), void (*keepalive_handler)(struct timer_list *)); void inet_csk_clear_xmit_timers(struct sock *sk); void inet_csk_clear_xmit_timers_sync(struct sock *sk); static inline void inet_csk_schedule_ack(struct sock *sk) { inet_csk(sk)->icsk_ack.pending |= ICSK_ACK_SCHED; } static inline int inet_csk_ack_scheduled(const struct sock *sk) { return inet_csk(sk)->icsk_ack.pending & ICSK_ACK_SCHED; } static inline void inet_csk_delack_init(struct sock *sk) { memset(&inet_csk(sk)->icsk_ack, 0, sizeof(inet_csk(sk)->icsk_ack)); } void inet_csk_delete_keepalive_timer(struct sock *sk); void inet_csk_reset_keepalive_timer(struct sock *sk, unsigned long timeout); static inline void inet_csk_clear_xmit_timer(struct sock *sk, const int what) { struct inet_connection_sock *icsk = inet_csk(sk); if (what == ICSK_TIME_RETRANS || what == ICSK_TIME_PROBE0) { smp_store_release(&icsk->icsk_pending, 0); #ifdef INET_CSK_CLEAR_TIMERS sk_stop_timer(sk, &icsk->icsk_retransmit_timer); #endif } else if (what == ICSK_TIME_DACK) { smp_store_release(&icsk->icsk_ack.pending, 0); icsk->icsk_ack.retry = 0; #ifdef INET_CSK_CLEAR_TIMERS sk_stop_timer(sk, &icsk->icsk_delack_timer); #endif } else { pr_debug("inet_csk BUG: unknown timer value\n"); } } /* * Reset the retransmission timer */ static inline void inet_csk_reset_xmit_timer(struct sock *sk, const int what, unsigned long when, const unsigned long max_when) { struct inet_connection_sock *icsk = inet_csk(sk); if (when > max_when) { pr_debug("reset_xmit_timer: sk=%p %d when=0x%lx, caller=%p\n", sk, what, when, (void *)_THIS_IP_); when = max_when; } if (what == ICSK_TIME_RETRANS || what == ICSK_TIME_PROBE0 || what == ICSK_TIME_LOSS_PROBE || what == ICSK_TIME_REO_TIMEOUT) { smp_store_release(&icsk->icsk_pending, what); icsk->icsk_timeout = jiffies + when; sk_reset_timer(sk, &icsk->icsk_retransmit_timer, icsk->icsk_timeout); } else if (what == ICSK_TIME_DACK) { smp_store_release(&icsk->icsk_ack.pending, icsk->icsk_ack.pending | ICSK_ACK_TIMER); icsk->icsk_ack.timeout = jiffies + when; sk_reset_timer(sk, &icsk->icsk_delack_timer, icsk->icsk_ack.timeout); } else { pr_debug("inet_csk BUG: unknown timer value\n"); } } static inline unsigned long inet_csk_rto_backoff(const struct inet_connection_sock *icsk, unsigned long max_when) { u64 when = (u64)icsk->icsk_rto << icsk->icsk_backoff; return (unsigned long)min_t(u64, when, max_when); } struct sock *inet_csk_accept(struct sock *sk, struct proto_accept_arg *arg); int inet_csk_get_port(struct sock *sk, unsigned short snum); struct dst_entry *inet_csk_route_req(const struct sock *sk, struct flowi4 *fl4, const struct request_sock *req); struct dst_entry *inet_csk_route_child_sock(const struct sock *sk, struct sock *newsk, const struct request_sock *req); struct sock *inet_csk_reqsk_queue_add(struct sock *sk, struct request_sock *req, struct sock *child); bool inet_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req, unsigned long timeout); struct sock *inet_csk_complete_hashdance(struct sock *sk, struct sock *child, struct request_sock *req, bool own_req); static inline void inet_csk_reqsk_queue_added(struct sock *sk) { reqsk_queue_added(&inet_csk(sk)->icsk_accept_queue); } static inline int inet_csk_reqsk_queue_len(const struct sock *sk) { return reqsk_queue_len(&inet_csk(sk)->icsk_accept_queue); } static inline int inet_csk_reqsk_queue_is_full(const struct sock *sk) { return inet_csk_reqsk_queue_len(sk) >= READ_ONCE(sk->sk_max_ack_backlog); } bool inet_csk_reqsk_queue_drop(struct sock *sk, struct request_sock *req); void inet_csk_reqsk_queue_drop_and_put(struct sock *sk, struct request_sock *req); static inline unsigned long reqsk_timeout(struct request_sock *req, unsigned long max_timeout) { u64 timeout = (u64)req->timeout << req->num_timeout; return (unsigned long)min_t(u64, timeout, max_timeout); } static inline void inet_csk_prepare_for_destroy_sock(struct sock *sk) { /* The below has to be done to allow calling inet_csk_destroy_sock */ sock_set_flag(sk, SOCK_DEAD); this_cpu_inc(*sk->sk_prot->orphan_count); } void inet_csk_destroy_sock(struct sock *sk); void inet_csk_prepare_forced_close(struct sock *sk); /* * LISTEN is a special case for poll.. */ static inline __poll_t inet_csk_listen_poll(const struct sock *sk) { return !reqsk_queue_empty(&inet_csk(sk)->icsk_accept_queue) ? (EPOLLIN | EPOLLRDNORM) : 0; } int inet_csk_listen_start(struct sock *sk); void inet_csk_listen_stop(struct sock *sk); void inet_csk_addr2sockaddr(struct sock *sk, struct sockaddr *uaddr); /* update the fast reuse flag when adding a socket */ void inet_csk_update_fastreuse(struct inet_bind_bucket *tb, struct sock *sk); struct dst_entry *inet_csk_update_pmtu(struct sock *sk, u32 mtu); static inline void inet_csk_enter_pingpong_mode(struct sock *sk) { inet_csk(sk)->icsk_ack.pingpong = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_pingpong_thresh); } static inline void inet_csk_exit_pingpong_mode(struct sock *sk) { inet_csk(sk)->icsk_ack.pingpong = 0; } static inline bool inet_csk_in_pingpong_mode(struct sock *sk) { return inet_csk(sk)->icsk_ack.pingpong >= READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_pingpong_thresh); } static inline void inet_csk_inc_pingpong_cnt(struct sock *sk) { struct inet_connection_sock *icsk = inet_csk(sk); if (icsk->icsk_ack.pingpong < U8_MAX) icsk->icsk_ack.pingpong++; } static inline bool inet_csk_has_ulp(const struct sock *sk) { return inet_test_bit(IS_ICSK, sk) && !!inet_csk(sk)->icsk_ulp_ops; } static inline void inet_init_csk_locks(struct sock *sk) { struct inet_connection_sock *icsk = inet_csk(sk); spin_lock_init(&icsk->icsk_accept_queue.rskq_lock); spin_lock_init(&icsk->icsk_accept_queue.fastopenq.lock); } #endif /* _INET_CONNECTION_SOCK_H */
1884 402 1 22 402 654 1355 16 669 509 18 47 420 30 1937 905 34 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Linux INET6 implementation * * Authors: * Pedro Roque <roque@di.fc.ul.pt> */ #ifndef _IP6_FIB_H #define _IP6_FIB_H #include <linux/ipv6_route.h> #include <linux/rtnetlink.h> #include <linux/spinlock.h> #include <linux/notifier.h> #include <net/dst.h> #include <net/flow.h> #include <net/ip_fib.h> #include <net/netlink.h> #include <net/inetpeer.h> #include <net/fib_notifier.h> #include <linux/indirect_call_wrapper.h> #include <uapi/linux/bpf.h> #ifdef CONFIG_IPV6_MULTIPLE_TABLES #define FIB6_TABLE_HASHSZ 256 #else #define FIB6_TABLE_HASHSZ 1 #endif #define RT6_DEBUG 2 struct rt6_info; struct fib6_info; struct fib6_config { u32 fc_table; u32 fc_metric; int fc_dst_len; int fc_src_len; int fc_ifindex; u32 fc_flags; u32 fc_protocol; u16 fc_type; /* only 8 bits are used */ u16 fc_delete_all_nh : 1, fc_ignore_dev_down:1, __unused : 14; u32 fc_nh_id; struct in6_addr fc_dst; struct in6_addr fc_src; struct in6_addr fc_prefsrc; struct in6_addr fc_gateway; unsigned long fc_expires; struct nlattr *fc_mx; int fc_mx_len; int fc_mp_len; struct nlattr *fc_mp; struct nl_info fc_nlinfo; struct nlattr *fc_encap; u16 fc_encap_type; bool fc_is_fdb; }; struct fib6_node { struct fib6_node __rcu *parent; struct fib6_node __rcu *left; struct fib6_node __rcu *right; #ifdef CONFIG_IPV6_SUBTREES struct fib6_node __rcu *subtree; #endif struct fib6_info __rcu *leaf; __u16 fn_bit; /* bit key */ __u16 fn_flags; int fn_sernum; struct fib6_info __rcu *rr_ptr; struct rcu_head rcu; }; struct fib6_gc_args { int timeout; int more; }; #ifndef CONFIG_IPV6_SUBTREES #define FIB6_SUBTREE(fn) NULL static inline bool fib6_routes_require_src(const struct net *net) { return false; } static inline void fib6_routes_require_src_inc(struct net *net) {} static inline void fib6_routes_require_src_dec(struct net *net) {} #else static inline bool fib6_routes_require_src(const struct net *net) { return net->ipv6.fib6_routes_require_src > 0; } static inline void fib6_routes_require_src_inc(struct net *net) { net->ipv6.fib6_routes_require_src++; } static inline void fib6_routes_require_src_dec(struct net *net) { net->ipv6.fib6_routes_require_src--; } #define FIB6_SUBTREE(fn) (rcu_dereference_protected((fn)->subtree, 1)) #endif /* * routing information * */ struct rt6key { struct in6_addr addr; int plen; }; struct fib6_table; struct rt6_exception_bucket { struct hlist_head chain; int depth; }; struct rt6_exception { struct hlist_node hlist; struct rt6_info *rt6i; unsigned long stamp; struct rcu_head rcu; }; #define FIB6_EXCEPTION_BUCKET_SIZE_SHIFT 10 #define FIB6_EXCEPTION_BUCKET_SIZE (1 << FIB6_EXCEPTION_BUCKET_SIZE_SHIFT) #define FIB6_MAX_DEPTH 5 struct fib6_nh { struct fib_nh_common nh_common; #ifdef CONFIG_IPV6_ROUTER_PREF unsigned long last_probe; #endif struct rt6_info * __percpu *rt6i_pcpu; struct rt6_exception_bucket __rcu *rt6i_exception_bucket; }; struct fib6_info { struct fib6_table *fib6_table; struct fib6_info __rcu *fib6_next; struct fib6_node __rcu *fib6_node; /* Multipath routes: * siblings is a list of fib6_info that have the same metric/weight, * destination, but not the same gateway. nsiblings is just a cache * to speed up lookup. */ union { struct list_head fib6_siblings; struct list_head nh_list; }; unsigned int fib6_nsiblings; refcount_t fib6_ref; unsigned long expires; struct hlist_node gc_link; struct dst_metrics *fib6_metrics; #define fib6_pmtu fib6_metrics->metrics[RTAX_MTU-1] struct rt6key fib6_dst; u32 fib6_flags; struct rt6key fib6_src; struct rt6key fib6_prefsrc; u32 fib6_metric; u8 fib6_protocol; u8 fib6_type; u8 offload; u8 trap; u8 offload_failed; u8 should_flush:1, dst_nocount:1, dst_nopolicy:1, fib6_destroying:1, unused:4; struct rcu_head rcu; struct nexthop *nh; struct fib6_nh fib6_nh[]; }; struct rt6_info { struct dst_entry dst; struct fib6_info __rcu *from; int sernum; struct rt6key rt6i_dst; struct rt6key rt6i_src; struct in6_addr rt6i_gateway; struct inet6_dev *rt6i_idev; u32 rt6i_flags; /* more non-fragment space at head required */ unsigned short rt6i_nfheader_len; }; struct fib6_result { struct fib6_nh *nh; struct fib6_info *f6i; u32 fib6_flags; u8 fib6_type; struct rt6_info *rt6; }; #define for_each_fib6_node_rt_rcu(fn) \ for (rt = rcu_dereference((fn)->leaf); rt; \ rt = rcu_dereference(rt->fib6_next)) #define for_each_fib6_walker_rt(w) \ for (rt = (w)->leaf; rt; \ rt = rcu_dereference_protected(rt->fib6_next, 1)) #define dst_rt6_info(_ptr) container_of_const(_ptr, struct rt6_info, dst) static inline struct inet6_dev *ip6_dst_idev(const struct dst_entry *dst) { return dst_rt6_info(dst)->rt6i_idev; } static inline bool fib6_requires_src(const struct fib6_info *rt) { return rt->fib6_src.plen > 0; } /* The callers should hold f6i->fib6_table->tb6_lock if a route has ever * been added to a table before. */ static inline void fib6_clean_expires(struct fib6_info *f6i) { f6i->fib6_flags &= ~RTF_EXPIRES; f6i->expires = 0; } /* The callers should hold f6i->fib6_table->tb6_lock if a route has ever * been added to a table before. */ static inline void fib6_set_expires(struct fib6_info *f6i, unsigned long expires) { f6i->expires = expires; f6i->fib6_flags |= RTF_EXPIRES; } static inline bool fib6_check_expired(const struct fib6_info *f6i) { if (f6i->fib6_flags & RTF_EXPIRES) return time_after(jiffies, f6i->expires); return false; } /* Function to safely get fn->fn_sernum for passed in rt * and store result in passed in cookie. * Return true if we can get cookie safely * Return false if not */ static inline bool fib6_get_cookie_safe(const struct fib6_info *f6i, u32 *cookie) { struct fib6_node *fn; bool status = false; fn = rcu_dereference(f6i->fib6_node); if (fn) { *cookie = READ_ONCE(fn->fn_sernum); /* pairs with smp_wmb() in __fib6_update_sernum_upto_root() */ smp_rmb(); status = true; } return status; } static inline u32 rt6_get_cookie(const struct rt6_info *rt) { struct fib6_info *from; u32 cookie = 0; if (rt->sernum) return rt->sernum; rcu_read_lock(); from = rcu_dereference(rt->from); if (from) fib6_get_cookie_safe(from, &cookie); rcu_read_unlock(); return cookie; } static inline void ip6_rt_put(struct rt6_info *rt) { /* dst_release() accepts a NULL parameter. * We rely on dst being first structure in struct rt6_info */ BUILD_BUG_ON(offsetof(struct rt6_info, dst) != 0); dst_release(&rt->dst); } struct fib6_info *fib6_info_alloc(gfp_t gfp_flags, bool with_fib6_nh); void fib6_info_destroy_rcu(struct rcu_head *head); static inline void fib6_info_hold(struct fib6_info *f6i) { refcount_inc(&f6i->fib6_ref); } static inline bool fib6_info_hold_safe(struct fib6_info *f6i) { return refcount_inc_not_zero(&f6i->fib6_ref); } static inline void fib6_info_release(struct fib6_info *f6i) { if (f6i && refcount_dec_and_test(&f6i->fib6_ref)) { DEBUG_NET_WARN_ON_ONCE(!hlist_unhashed(&f6i->gc_link)); call_rcu_hurry(&f6i->rcu, fib6_info_destroy_rcu); } } enum fib6_walk_state { #ifdef CONFIG_IPV6_SUBTREES FWS_S, #endif FWS_L, FWS_R, FWS_C, FWS_U }; struct fib6_walker { struct list_head lh; struct fib6_node *root, *node; struct fib6_info *leaf; enum fib6_walk_state state; unsigned int skip; unsigned int count; unsigned int skip_in_node; int (*func)(struct fib6_walker *); void *args; }; struct rt6_statistics { __u32 fib_nodes; /* all fib6 nodes */ __u32 fib_route_nodes; /* intermediate nodes */ __u32 fib_rt_entries; /* rt entries in fib table */ __u32 fib_rt_cache; /* cached rt entries in exception table */ __u32 fib_discarded_routes; /* total number of routes delete */ /* The following stat is not protected by any lock */ atomic_t fib_rt_alloc; /* total number of routes alloced */ }; #define RTN_TL_ROOT 0x0001 #define RTN_ROOT 0x0002 /* tree root node */ #define RTN_RTINFO 0x0004 /* node with valid routing info */ /* * priority levels (or metrics) * */ struct fib6_table { struct hlist_node tb6_hlist; u32 tb6_id; spinlock_t tb6_lock; struct fib6_node tb6_root; struct inet_peer_base tb6_peers; unsigned int flags; unsigned int fib_seq; /* writes protected by rtnl_mutex */ struct hlist_head tb6_gc_hlist; /* GC candidates */ #define RT6_TABLE_HAS_DFLT_ROUTER BIT(0) }; #define RT6_TABLE_UNSPEC RT_TABLE_UNSPEC #define RT6_TABLE_MAIN RT_TABLE_MAIN #define RT6_TABLE_DFLT RT6_TABLE_MAIN #define RT6_TABLE_INFO RT6_TABLE_MAIN #define RT6_TABLE_PREFIX RT6_TABLE_MAIN #ifdef CONFIG_IPV6_MULTIPLE_TABLES #define FIB6_TABLE_MIN 1 #define FIB6_TABLE_MAX RT_TABLE_MAX #define RT6_TABLE_LOCAL RT_TABLE_LOCAL #else #define FIB6_TABLE_MIN RT_TABLE_MAIN #define FIB6_TABLE_MAX FIB6_TABLE_MIN #define RT6_TABLE_LOCAL RT6_TABLE_MAIN #endif typedef struct rt6_info *(*pol_lookup_t)(struct net *, struct fib6_table *, struct flowi6 *, const struct sk_buff *, int); struct fib6_entry_notifier_info { struct fib_notifier_info info; /* must be first */ struct fib6_info *rt; unsigned int nsiblings; }; /* * exported functions */ struct fib6_table *fib6_get_table(struct net *net, u32 id); struct fib6_table *fib6_new_table(struct net *net, u32 id); struct dst_entry *fib6_rule_lookup(struct net *net, struct flowi6 *fl6, const struct sk_buff *skb, int flags, pol_lookup_t lookup); /* called with rcu lock held; can return error pointer * caller needs to select path */ int fib6_lookup(struct net *net, int oif, struct flowi6 *fl6, struct fib6_result *res, int flags); /* called with rcu lock held; caller needs to select path */ int fib6_table_lookup(struct net *net, struct fib6_table *table, int oif, struct flowi6 *fl6, struct fib6_result *res, int strict); void fib6_select_path(const struct net *net, struct fib6_result *res, struct flowi6 *fl6, int oif, bool have_oif_match, const struct sk_buff *skb, int strict); struct fib6_node *fib6_node_lookup(struct fib6_node *root, const struct in6_addr *daddr, const struct in6_addr *saddr); struct fib6_node *fib6_locate(struct fib6_node *root, const struct in6_addr *daddr, int dst_len, const struct in6_addr *saddr, int src_len, bool exact_match); void fib6_clean_all(struct net *net, int (*func)(struct fib6_info *, void *arg), void *arg); void fib6_clean_all_skip_notify(struct net *net, int (*func)(struct fib6_info *, void *arg), void *arg); int fib6_add(struct fib6_node *root, struct fib6_info *rt, struct nl_info *info, struct netlink_ext_ack *extack); int fib6_del(struct fib6_info *rt, struct nl_info *info); static inline void rt6_get_prefsrc(const struct rt6_info *rt, struct in6_addr *addr) { const struct fib6_info *from; rcu_read_lock(); from = rcu_dereference(rt->from); if (from) *addr = from->fib6_prefsrc.addr; else *addr = in6addr_any; rcu_read_unlock(); } int fib6_nh_init(struct net *net, struct fib6_nh *fib6_nh, struct fib6_config *cfg, gfp_t gfp_flags, struct netlink_ext_ack *extack); void fib6_nh_release(struct fib6_nh *fib6_nh); void fib6_nh_release_dsts(struct fib6_nh *fib6_nh); int call_fib6_entry_notifiers(struct net *net, enum fib_event_type event_type, struct fib6_info *rt, struct netlink_ext_ack *extack); int call_fib6_multipath_entry_notifiers(struct net *net, enum fib_event_type event_type, struct fib6_info *rt, unsigned int nsiblings, struct netlink_ext_ack *extack); int call_fib6_entry_notifiers_replace(struct net *net, struct fib6_info *rt); void fib6_rt_update(struct net *net, struct fib6_info *rt, struct nl_info *info); void inet6_rt_notify(int event, struct fib6_info *rt, struct nl_info *info, unsigned int flags); void fib6_run_gc(unsigned long expires, struct net *net, bool force); void fib6_gc_cleanup(void); int fib6_init(void); /* Add the route to the gc list if it is not already there * * The callers should hold f6i->fib6_table->tb6_lock. */ static inline void fib6_add_gc_list(struct fib6_info *f6i) { /* If fib6_node is null, the f6i is not in (or removed from) the * table. * * There is a gap between finding the f6i from the table and * calling this function without the protection of the tb6_lock. * This check makes sure the f6i is not added to the gc list when * it is not on the table. */ if (!rcu_dereference_protected(f6i->fib6_node, lockdep_is_held(&f6i->fib6_table->tb6_lock))) return; if (hlist_unhashed(&f6i->gc_link)) hlist_add_head(&f6i->gc_link, &f6i->fib6_table->tb6_gc_hlist); } /* Remove the route from the gc list if it is on the list. * * The callers should hold f6i->fib6_table->tb6_lock. */ static inline void fib6_remove_gc_list(struct fib6_info *f6i) { if (!hlist_unhashed(&f6i->gc_link)) hlist_del_init(&f6i->gc_link); } struct ipv6_route_iter { struct seq_net_private p; struct fib6_walker w; loff_t skip; struct fib6_table *tbl; int sernum; }; extern const struct seq_operations ipv6_route_seq_ops; int call_fib6_notifier(struct notifier_block *nb, enum fib_event_type event_type, struct fib_notifier_info *info); int call_fib6_notifiers(struct net *net, enum fib_event_type event_type, struct fib_notifier_info *info); int __net_init fib6_notifier_init(struct net *net); void __net_exit fib6_notifier_exit(struct net *net); unsigned int fib6_tables_seq_read(const struct net *net); int fib6_tables_dump(struct net *net, struct notifier_block *nb, struct netlink_ext_ack *extack); void fib6_update_sernum(struct net *net, struct fib6_info *rt); void fib6_update_sernum_upto_root(struct net *net, struct fib6_info *rt); void fib6_update_sernum_stub(struct net *net, struct fib6_info *f6i); void fib6_metric_set(struct fib6_info *f6i, int metric, u32 val); static inline bool fib6_metric_locked(struct fib6_info *f6i, int metric) { return !!(f6i->fib6_metrics->metrics[RTAX_LOCK - 1] & (1 << metric)); } void fib6_info_hw_flags_set(struct net *net, struct fib6_info *f6i, bool offload, bool trap, bool offload_failed); #if IS_BUILTIN(CONFIG_IPV6) && defined(CONFIG_BPF_SYSCALL) struct bpf_iter__ipv6_route { __bpf_md_ptr(struct bpf_iter_meta *, meta); __bpf_md_ptr(struct fib6_info *, rt); }; #endif INDIRECT_CALLABLE_DECLARE(struct rt6_info *ip6_pol_route_output(struct net *net, struct fib6_table *table, struct flowi6 *fl6, const struct sk_buff *skb, int flags)); INDIRECT_CALLABLE_DECLARE(struct rt6_info *ip6_pol_route_input(struct net *net, struct fib6_table *table, struct flowi6 *fl6, const struct sk_buff *skb, int flags)); INDIRECT_CALLABLE_DECLARE(struct rt6_info *__ip6_route_redirect(struct net *net, struct fib6_table *table, struct flowi6 *fl6, const struct sk_buff *skb, int flags)); INDIRECT_CALLABLE_DECLARE(struct rt6_info *ip6_pol_route_lookup(struct net *net, struct fib6_table *table, struct flowi6 *fl6, const struct sk_buff *skb, int flags)); static inline struct rt6_info *pol_lookup_func(pol_lookup_t lookup, struct net *net, struct fib6_table *table, struct flowi6 *fl6, const struct sk_buff *skb, int flags) { return INDIRECT_CALL_4(lookup, ip6_pol_route_output, ip6_pol_route_input, ip6_pol_route_lookup, __ip6_route_redirect, net, table, fl6, skb, flags); } #ifdef CONFIG_IPV6_MULTIPLE_TABLES static inline bool fib6_has_custom_rules(const struct net *net) { return net->ipv6.fib6_has_custom_rules; } int fib6_rules_init(void); void fib6_rules_cleanup(void); bool fib6_rule_default(const struct fib_rule *rule); int fib6_rules_dump(struct net *net, struct notifier_block *nb, struct netlink_ext_ack *extack); unsigned int fib6_rules_seq_read(const struct net *net); static inline bool fib6_rules_early_flow_dissect(struct net *net, struct sk_buff *skb, struct flowi6 *fl6, struct flow_keys *flkeys) { unsigned int flag = FLOW_DISSECTOR_F_STOP_AT_ENCAP; if (!net->ipv6.fib6_rules_require_fldissect) return false; memset(flkeys, 0, sizeof(*flkeys)); __skb_flow_dissect(net, skb, &flow_keys_dissector, flkeys, NULL, 0, 0, 0, flag); fl6->fl6_sport = flkeys->ports.src; fl6->fl6_dport = flkeys->ports.dst; fl6->flowi6_proto = flkeys->basic.ip_proto; return true; } #else static inline bool fib6_has_custom_rules(const struct net *net) { return false; } static inline int fib6_rules_init(void) { return 0; } static inline void fib6_rules_cleanup(void) { return ; } static inline bool fib6_rule_default(const struct fib_rule *rule) { return true; } static inline int fib6_rules_dump(struct net *net, struct notifier_block *nb, struct netlink_ext_ack *extack) { return 0; } static inline unsigned int fib6_rules_seq_read(const struct net *net) { return 0; } static inline bool fib6_rules_early_flow_dissect(struct net *net, struct sk_buff *skb, struct flowi6 *fl6, struct flow_keys *flkeys) { return false; } #endif #endif
15 15 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _BCACHEFS_BACKPOINTERS_BACKGROUND_H #define _BCACHEFS_BACKPOINTERS_BACKGROUND_H #include "btree_cache.h" #include "btree_iter.h" #include "btree_update.h" #include "buckets.h" #include "error.h" #include "super.h" static inline u64 swab40(u64 x) { return (((x & 0x00000000ffULL) << 32)| ((x & 0x000000ff00ULL) << 16)| ((x & 0x0000ff0000ULL) >> 0)| ((x & 0x00ff000000ULL) >> 16)| ((x & 0xff00000000ULL) >> 32)); } int bch2_backpointer_validate(struct bch_fs *, struct bkey_s_c k, enum bch_validate_flags); void bch2_backpointer_to_text(struct printbuf *, const struct bch_backpointer *); void bch2_backpointer_k_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c); void bch2_backpointer_swab(struct bkey_s); #define bch2_bkey_ops_backpointer ((struct bkey_ops) { \ .key_validate = bch2_backpointer_validate, \ .val_to_text = bch2_backpointer_k_to_text, \ .swab = bch2_backpointer_swab, \ .min_val_size = 32, \ }) #define MAX_EXTENT_COMPRESS_RATIO_SHIFT 10 /* * Convert from pos in backpointer btree to pos of corresponding bucket in alloc * btree: */ static inline struct bpos bp_pos_to_bucket(const struct bch_dev *ca, struct bpos bp_pos) { u64 bucket_sector = bp_pos.offset >> MAX_EXTENT_COMPRESS_RATIO_SHIFT; return POS(bp_pos.inode, sector_to_bucket(ca, bucket_sector)); } static inline bool bp_pos_to_bucket_nodev_noerror(struct bch_fs *c, struct bpos bp_pos, struct bpos *bucket) { rcu_read_lock(); struct bch_dev *ca = bch2_dev_rcu(c, bp_pos.inode); if (ca) *bucket = bp_pos_to_bucket(ca, bp_pos); rcu_read_unlock(); return ca != NULL; } static inline bool bp_pos_to_bucket_nodev(struct bch_fs *c, struct bpos bp_pos, struct bpos *bucket) { return !bch2_fs_inconsistent_on(!bp_pos_to_bucket_nodev_noerror(c, bp_pos, bucket), c, "backpointer for missing device %llu", bp_pos.inode); } static inline struct bpos bucket_pos_to_bp_noerror(const struct bch_dev *ca, struct bpos bucket, u64 bucket_offset) { return POS(bucket.inode, (bucket_to_sector(ca, bucket.offset) << MAX_EXTENT_COMPRESS_RATIO_SHIFT) + bucket_offset); } /* * Convert from pos in alloc btree + bucket offset to pos in backpointer btree: */ static inline struct bpos bucket_pos_to_bp(const struct bch_dev *ca, struct bpos bucket, u64 bucket_offset) { struct bpos ret = bucket_pos_to_bp_noerror(ca, bucket, bucket_offset); EBUG_ON(!bkey_eq(bucket, bp_pos_to_bucket(ca, ret))); return ret; } int bch2_bucket_backpointer_mod_nowritebuffer(struct btree_trans *, struct bch_dev *, struct bpos bucket, struct bch_backpointer, struct bkey_s_c, bool); static inline int bch2_bucket_backpointer_mod(struct btree_trans *trans, struct bch_dev *ca, struct bpos bucket, struct bch_backpointer bp, struct bkey_s_c orig_k, bool insert) { if (unlikely(bch2_backpointers_no_use_write_buffer)) return bch2_bucket_backpointer_mod_nowritebuffer(trans, ca, bucket, bp, orig_k, insert); struct bkey_i_backpointer bp_k; bkey_backpointer_init(&bp_k.k_i); bp_k.k.p = bucket_pos_to_bp(ca, bucket, bp.bucket_offset); bp_k.v = bp; if (!insert) { bp_k.k.type = KEY_TYPE_deleted; set_bkey_val_u64s(&bp_k.k, 0); } return bch2_trans_update_buffered(trans, BTREE_ID_backpointers, &bp_k.k_i); } static inline enum bch_data_type bch2_bkey_ptr_data_type(struct bkey_s_c k, struct extent_ptr_decoded p, const union bch_extent_entry *entry) { switch (k.k->type) { case KEY_TYPE_btree_ptr: case KEY_TYPE_btree_ptr_v2: return BCH_DATA_btree; case KEY_TYPE_extent: case KEY_TYPE_reflink_v: return p.has_ec ? BCH_DATA_stripe : BCH_DATA_user; case KEY_TYPE_stripe: { const struct bch_extent_ptr *ptr = &entry->ptr; struct bkey_s_c_stripe s = bkey_s_c_to_stripe(k); BUG_ON(ptr < s.v->ptrs || ptr >= s.v->ptrs + s.v->nr_blocks); return ptr >= s.v->ptrs + s.v->nr_blocks - s.v->nr_redundant ? BCH_DATA_parity : BCH_DATA_user; } default: BUG(); } } static inline void __bch2_extent_ptr_to_bp(struct bch_fs *c, struct bch_dev *ca, enum btree_id btree_id, unsigned level, struct bkey_s_c k, struct extent_ptr_decoded p, const union bch_extent_entry *entry, struct bpos *bucket_pos, struct bch_backpointer *bp, u64 sectors) { u32 bucket_offset; *bucket_pos = PTR_BUCKET_POS_OFFSET(ca, &p.ptr, &bucket_offset); *bp = (struct bch_backpointer) { .btree_id = btree_id, .level = level, .data_type = bch2_bkey_ptr_data_type(k, p, entry), .bucket_offset = ((u64) bucket_offset << MAX_EXTENT_COMPRESS_RATIO_SHIFT) + p.crc.offset, .bucket_len = sectors, .pos = k.k->p, }; } static inline void bch2_extent_ptr_to_bp(struct bch_fs *c, struct bch_dev *ca, enum btree_id btree_id, unsigned level, struct bkey_s_c k, struct extent_ptr_decoded p, const union bch_extent_entry *entry, struct bpos *bucket_pos, struct bch_backpointer *bp) { u64 sectors = ptr_disk_sectors(level ? btree_sectors(c) : k.k->size, p); __bch2_extent_ptr_to_bp(c, ca, btree_id, level, k, p, entry, bucket_pos, bp, sectors); } int bch2_get_next_backpointer(struct btree_trans *, struct bch_dev *ca, struct bpos, int, struct bpos *, struct bch_backpointer *, unsigned); struct bkey_s_c bch2_backpointer_get_key(struct btree_trans *, struct btree_iter *, struct bpos, struct bch_backpointer, unsigned); struct btree *bch2_backpointer_get_node(struct btree_trans *, struct btree_iter *, struct bpos, struct bch_backpointer); int bch2_check_btree_backpointers(struct bch_fs *); int bch2_check_extents_to_backpointers(struct bch_fs *); int bch2_check_backpointers_to_extents(struct bch_fs *); #endif /* _BCACHEFS_BACKPOINTERS_BACKGROUND_H */
44 375 354 356 351 356 295 309 306 138 114 84 106 63 64 60 64 123 123 16 106 16 46 1 122 10 103 10 113 113 56 59 4 15 17 46 118 51 38 381 59 46 19 340 35 3 5 341 357 358 359 51 307 30 2 32 32 339 339 339 59 279 36 9 32 32 39 65 272 272 186 187 33 187 42 144 30 33 187 42 144 30 41 146 146 146 187 187 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 // SPDX-License-Identifier: GPL-2.0 /* * linux/fs/hfsplus/unicode.c * * Copyright (C) 2001 * Brad Boyer (flar@allandria.com) * (C) 2003 Ardis Technologies <roman@ardistech.com> * * Handler routines for unicode strings */ #include <linux/types.h> #include <linux/nls.h> #include "hfsplus_fs.h" #include "hfsplus_raw.h" /* Fold the case of a unicode char, given the 16 bit value */ /* Returns folded char, or 0 if ignorable */ static inline u16 case_fold(u16 c) { u16 tmp; tmp = hfsplus_case_fold_table[c >> 8]; if (tmp) tmp = hfsplus_case_fold_table[tmp + (c & 0xff)]; else tmp = c; return tmp; } /* Compare unicode strings, return values like normal strcmp */ int hfsplus_strcasecmp(const struct hfsplus_unistr *s1, const struct hfsplus_unistr *s2) { u16 len1, len2, c1, c2; const hfsplus_unichr *p1, *p2; len1 = be16_to_cpu(s1->length); len2 = be16_to_cpu(s2->length); p1 = s1->unicode; p2 = s2->unicode; while (1) { c1 = c2 = 0; while (len1 && !c1) { c1 = case_fold(be16_to_cpu(*p1)); p1++; len1--; } while (len2 && !c2) { c2 = case_fold(be16_to_cpu(*p2)); p2++; len2--; } if (c1 != c2) return (c1 < c2) ? -1 : 1; if (!c1 && !c2) return 0; } } /* Compare names as a sequence of 16-bit unsigned integers */ int hfsplus_strcmp(const struct hfsplus_unistr *s1, const struct hfsplus_unistr *s2) { u16 len1, len2, c1, c2; const hfsplus_unichr *p1, *p2; int len; len1 = be16_to_cpu(s1->length); len2 = be16_to_cpu(s2->length); p1 = s1->unicode; p2 = s2->unicode; for (len = min(len1, len2); len > 0; len--) { c1 = be16_to_cpu(*p1); c2 = be16_to_cpu(*p2); if (c1 != c2) return c1 < c2 ? -1 : 1; p1++; p2++; } return len1 < len2 ? -1 : len1 > len2 ? 1 : 0; } #define Hangul_SBase 0xac00 #define Hangul_LBase 0x1100 #define Hangul_VBase 0x1161 #define Hangul_TBase 0x11a7 #define Hangul_SCount 11172 #define Hangul_LCount 19 #define Hangul_VCount 21 #define Hangul_TCount 28 #define Hangul_NCount (Hangul_VCount * Hangul_TCount) static u16 *hfsplus_compose_lookup(u16 *p, u16 cc) { int i, s, e; s = 1; e = p[1]; if (!e || cc < p[s * 2] || cc > p[e * 2]) return NULL; do { i = (s + e) / 2; if (cc > p[i * 2]) s = i + 1; else if (cc < p[i * 2]) e = i - 1; else return hfsplus_compose_table + p[i * 2 + 1]; } while (s <= e); return NULL; } int hfsplus_uni2asc(struct super_block *sb, const struct hfsplus_unistr *ustr, char *astr, int *len_p) { const hfsplus_unichr *ip; struct nls_table *nls = HFSPLUS_SB(sb)->nls; u8 *op; u16 cc, c0, c1; u16 *ce1, *ce2; int i, len, ustrlen, res, compose; op = astr; ip = ustr->unicode; ustrlen = be16_to_cpu(ustr->length); len = *len_p; ce1 = NULL; compose = !test_bit(HFSPLUS_SB_NODECOMPOSE, &HFSPLUS_SB(sb)->flags); while (ustrlen > 0) { c0 = be16_to_cpu(*ip++); ustrlen--; /* search for single decomposed char */ if (likely(compose)) ce1 = hfsplus_compose_lookup(hfsplus_compose_table, c0); if (ce1) cc = ce1[0]; else cc = 0; if (cc) { /* start of a possibly decomposed Hangul char */ if (cc != 0xffff) goto done; if (!ustrlen) goto same; c1 = be16_to_cpu(*ip) - Hangul_VBase; if (c1 < Hangul_VCount) { /* compose the Hangul char */ cc = (c0 - Hangul_LBase) * Hangul_VCount; cc = (cc + c1) * Hangul_TCount; cc += Hangul_SBase; ip++; ustrlen--; if (!ustrlen) goto done; c1 = be16_to_cpu(*ip) - Hangul_TBase; if (c1 > 0 && c1 < Hangul_TCount) { cc += c1; ip++; ustrlen--; } goto done; } } while (1) { /* main loop for common case of not composed chars */ if (!ustrlen) goto same; c1 = be16_to_cpu(*ip); if (likely(compose)) ce1 = hfsplus_compose_lookup( hfsplus_compose_table, c1); if (ce1) break; switch (c0) { case 0: c0 = 0x2400; break; case '/': c0 = ':'; break; } res = nls->uni2char(c0, op, len); if (res < 0) { if (res == -ENAMETOOLONG) goto out; *op = '?'; res = 1; } op += res; len -= res; c0 = c1; ip++; ustrlen--; } ce2 = hfsplus_compose_lookup(ce1, c0); if (ce2) { i = 1; while (i < ustrlen) { ce1 = hfsplus_compose_lookup(ce2, be16_to_cpu(ip[i])); if (!ce1) break; i++; ce2 = ce1; } cc = ce2[0]; if (cc) { ip += i; ustrlen -= i; goto done; } } same: switch (c0) { case 0: cc = 0x2400; break; case '/': cc = ':'; break; default: cc = c0; } done: res = nls->uni2char(cc, op, len); if (res < 0) { if (res == -ENAMETOOLONG) goto out; *op = '?'; res = 1; } op += res; len -= res; } res = 0; out: *len_p = (char *)op - astr; return res; } /* * Convert one or more ASCII characters into a single unicode character. * Returns the number of ASCII characters corresponding to the unicode char. */ static inline int asc2unichar(struct super_block *sb, const char *astr, int len, wchar_t *uc) { int size = HFSPLUS_SB(sb)->nls->char2uni(astr, len, uc); if (size <= 0) { *uc = '?'; size = 1; } switch (*uc) { case 0x2400: *uc = 0; break; case ':': *uc = '/'; break; } return size; } /* Decomposes a non-Hangul unicode character. */ static u16 *hfsplus_decompose_nonhangul(wchar_t uc, int *size) { int off; off = hfsplus_decompose_table[(uc >> 12) & 0xf]; if (off == 0 || off == 0xffff) return NULL; off = hfsplus_decompose_table[off + ((uc >> 8) & 0xf)]; if (!off) return NULL; off = hfsplus_decompose_table[off + ((uc >> 4) & 0xf)]; if (!off) return NULL; off = hfsplus_decompose_table[off + (uc & 0xf)]; *size = off & 3; if (*size == 0) return NULL; return hfsplus_decompose_table + (off / 4); } /* * Try to decompose a unicode character as Hangul. Return 0 if @uc is not * precomposed Hangul, otherwise return the length of the decomposition. * * This function was adapted from sample code from the Unicode Standard * Annex #15: Unicode Normalization Forms, version 3.2.0. * * Copyright (C) 1991-2018 Unicode, Inc. All rights reserved. Distributed * under the Terms of Use in http://www.unicode.org/copyright.html. */ static int hfsplus_try_decompose_hangul(wchar_t uc, u16 *result) { int index; int l, v, t; index = uc - Hangul_SBase; if (index < 0 || index >= Hangul_SCount) return 0; l = Hangul_LBase + index / Hangul_NCount; v = Hangul_VBase + (index % Hangul_NCount) / Hangul_TCount; t = Hangul_TBase + index % Hangul_TCount; result[0] = l; result[1] = v; if (t != Hangul_TBase) { result[2] = t; return 3; } return 2; } /* Decomposes a single unicode character. */ static u16 *decompose_unichar(wchar_t uc, int *size, u16 *hangul_buffer) { u16 *result; /* Hangul is handled separately */ result = hangul_buffer; *size = hfsplus_try_decompose_hangul(uc, result); if (*size == 0) result = hfsplus_decompose_nonhangul(uc, size); return result; } int hfsplus_asc2uni(struct super_block *sb, struct hfsplus_unistr *ustr, int max_unistr_len, const char *astr, int len) { int size, dsize, decompose; u16 *dstr, outlen = 0; wchar_t c; u16 dhangul[3]; decompose = !test_bit(HFSPLUS_SB_NODECOMPOSE, &HFSPLUS_SB(sb)->flags); while (outlen < max_unistr_len && len > 0) { size = asc2unichar(sb, astr, len, &c); if (decompose) dstr = decompose_unichar(c, &dsize, dhangul); else dstr = NULL; if (dstr) { if (outlen + dsize > max_unistr_len) break; do { ustr->unicode[outlen++] = cpu_to_be16(*dstr++); } while (--dsize > 0); } else ustr->unicode[outlen++] = cpu_to_be16(c); astr += size; len -= size; } ustr->length = cpu_to_be16(outlen); if (len > 0) return -ENAMETOOLONG; return 0; } /* * Hash a string to an integer as appropriate for the HFS+ filesystem. * Composed unicode characters are decomposed and case-folding is performed * if the appropriate bits are (un)set on the superblock. */ int hfsplus_hash_dentry(const struct dentry *dentry, struct qstr *str) { struct super_block *sb = dentry->d_sb; const char *astr; const u16 *dstr; int casefold, decompose, size, len; unsigned long hash; wchar_t c; u16 c2; u16 dhangul[3]; casefold = test_bit(HFSPLUS_SB_CASEFOLD, &HFSPLUS_SB(sb)->flags); decompose = !test_bit(HFSPLUS_SB_NODECOMPOSE, &HFSPLUS_SB(sb)->flags); hash = init_name_hash(dentry); astr = str->name; len = str->len; while (len > 0) { int dsize; size = asc2unichar(sb, astr, len, &c); astr += size; len -= size; if (decompose) dstr = decompose_unichar(c, &dsize, dhangul); else dstr = NULL; if (dstr) { do { c2 = *dstr++; if (casefold) c2 = case_fold(c2); if (!casefold || c2) hash = partial_name_hash(c2, hash); } while (--dsize > 0); } else { c2 = c; if (casefold) c2 = case_fold(c2); if (!casefold || c2) hash = partial_name_hash(c2, hash); } } str->hash = end_name_hash(hash); return 0; } /* * Compare strings with HFS+ filename ordering. * Composed unicode characters are decomposed and case-folding is performed * if the appropriate bits are (un)set on the superblock. */ int hfsplus_compare_dentry(const struct dentry *dentry, unsigned int len, const char *str, const struct qstr *name) { struct super_block *sb = dentry->d_sb; int casefold, decompose, size; int dsize1, dsize2, len1, len2; const u16 *dstr1, *dstr2; const char *astr1, *astr2; u16 c1, c2; wchar_t c; u16 dhangul_1[3], dhangul_2[3]; casefold = test_bit(HFSPLUS_SB_CASEFOLD, &HFSPLUS_SB(sb)->flags); decompose = !test_bit(HFSPLUS_SB_NODECOMPOSE, &HFSPLUS_SB(sb)->flags); astr1 = str; len1 = len; astr2 = name->name; len2 = name->len; dsize1 = dsize2 = 0; dstr1 = dstr2 = NULL; while (len1 > 0 && len2 > 0) { if (!dsize1) { size = asc2unichar(sb, astr1, len1, &c); astr1 += size; len1 -= size; if (decompose) dstr1 = decompose_unichar(c, &dsize1, dhangul_1); if (!decompose || !dstr1) { c1 = c; dstr1 = &c1; dsize1 = 1; } } if (!dsize2) { size = asc2unichar(sb, astr2, len2, &c); astr2 += size; len2 -= size; if (decompose) dstr2 = decompose_unichar(c, &dsize2, dhangul_2); if (!decompose || !dstr2) { c2 = c; dstr2 = &c2; dsize2 = 1; } } c1 = *dstr1; c2 = *dstr2; if (casefold) { c1 = case_fold(c1); if (!c1) { dstr1++; dsize1--; continue; } c2 = case_fold(c2); if (!c2) { dstr2++; dsize2--; continue; } } if (c1 < c2) return -1; else if (c1 > c2) return 1; dstr1++; dsize1--; dstr2++; dsize2--; } if (len1 < len2) return -1; if (len1 > len2) return 1; return 0; }
15 15 3 12 15 15 15 6 15 15 15 15 15 15 15 15 2 1 3 1711 1710 1712 81 1 13 1 2 7 7 3 14 1 13 13 9 1 8 8 5 5 1 1 1 1 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 // SPDX-License-Identifier: GPL-2.0-or-later /* * net/sched/sch_cbs.c Credit Based Shaper * * Authors: Vinicius Costa Gomes <vinicius.gomes@intel.com> */ /* Credit Based Shaper (CBS) * ========================= * * This is a simple rate-limiting shaper aimed at TSN applications on * systems with known traffic workloads. * * Its algorithm is defined by the IEEE 802.1Q-2014 Specification, * Section 8.6.8.2, and explained in more detail in the Annex L of the * same specification. * * There are four tunables to be considered: * * 'idleslope': Idleslope is the rate of credits that is * accumulated (in kilobits per second) when there is at least * one packet waiting for transmission. Packets are transmitted * when the current value of credits is equal or greater than * zero. When there is no packet to be transmitted the amount of * credits is set to zero. This is the main tunable of the CBS * algorithm. * * 'sendslope': * Sendslope is the rate of credits that is depleted (it should be a * negative number of kilobits per second) when a transmission is * ocurring. It can be calculated as follows, (IEEE 802.1Q-2014 Section * 8.6.8.2 item g): * * sendslope = idleslope - port_transmit_rate * * 'hicredit': Hicredit defines the maximum amount of credits (in * bytes) that can be accumulated. Hicredit depends on the * characteristics of interfering traffic, * 'max_interference_size' is the maximum size of any burst of * traffic that can delay the transmission of a frame that is * available for transmission for this traffic class, (IEEE * 802.1Q-2014 Annex L, Equation L-3): * * hicredit = max_interference_size * (idleslope / port_transmit_rate) * * 'locredit': Locredit is the minimum amount of credits that can * be reached. It is a function of the traffic flowing through * this qdisc (IEEE 802.1Q-2014 Annex L, Equation L-2): * * locredit = max_frame_size * (sendslope / port_transmit_rate) */ #include <linux/ethtool.h> #include <linux/module.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/errno.h> #include <linux/skbuff.h> #include <linux/units.h> #include <net/netevent.h> #include <net/netlink.h> #include <net/sch_generic.h> #include <net/pkt_sched.h> static LIST_HEAD(cbs_list); static DEFINE_SPINLOCK(cbs_list_lock); struct cbs_sched_data { bool offload; int queue; atomic64_t port_rate; /* in bytes/s */ s64 last; /* timestamp in ns */ s64 credits; /* in bytes */ s32 locredit; /* in bytes */ s32 hicredit; /* in bytes */ s64 sendslope; /* in bytes/s */ s64 idleslope; /* in bytes/s */ struct qdisc_watchdog watchdog; int (*enqueue)(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free); struct sk_buff *(*dequeue)(struct Qdisc *sch); struct Qdisc *qdisc; struct list_head cbs_list; }; static int cbs_child_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct Qdisc *child, struct sk_buff **to_free) { unsigned int len = qdisc_pkt_len(skb); int err; err = child->ops->enqueue(skb, child, to_free); if (err != NET_XMIT_SUCCESS) return err; sch->qstats.backlog += len; sch->q.qlen++; return NET_XMIT_SUCCESS; } static int cbs_enqueue_offload(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) { struct cbs_sched_data *q = qdisc_priv(sch); struct Qdisc *qdisc = q->qdisc; return cbs_child_enqueue(skb, sch, qdisc, to_free); } static int cbs_enqueue_soft(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) { struct cbs_sched_data *q = qdisc_priv(sch); struct Qdisc *qdisc = q->qdisc; if (sch->q.qlen == 0 && q->credits > 0) { /* We need to stop accumulating credits when there's * no enqueued packets and q->credits is positive. */ q->credits = 0; q->last = ktime_get_ns(); } return cbs_child_enqueue(skb, sch, qdisc, to_free); } static int cbs_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) { struct cbs_sched_data *q = qdisc_priv(sch); return q->enqueue(skb, sch, to_free); } /* timediff is in ns, slope is in bytes/s */ static s64 timediff_to_credits(s64 timediff, s64 slope) { return div64_s64(timediff * slope, NSEC_PER_SEC); } static s64 delay_from_credits(s64 credits, s64 slope) { if (unlikely(slope == 0)) return S64_MAX; return div64_s64(-credits * NSEC_PER_SEC, slope); } static s64 credits_from_len(unsigned int len, s64 slope, s64 port_rate) { if (unlikely(port_rate == 0)) return S64_MAX; return div64_s64(len * slope, port_rate); } static struct sk_buff *cbs_child_dequeue(struct Qdisc *sch, struct Qdisc *child) { struct sk_buff *skb; skb = child->ops->dequeue(child); if (!skb) return NULL; qdisc_qstats_backlog_dec(sch, skb); qdisc_bstats_update(sch, skb); sch->q.qlen--; return skb; } static struct sk_buff *cbs_dequeue_soft(struct Qdisc *sch) { struct cbs_sched_data *q = qdisc_priv(sch); struct Qdisc *qdisc = q->qdisc; s64 now = ktime_get_ns(); struct sk_buff *skb; s64 credits; int len; /* The previous packet is still being sent */ if (now < q->last) { qdisc_watchdog_schedule_ns(&q->watchdog, q->last); return NULL; } if (q->credits < 0) { credits = timediff_to_credits(now - q->last, q->idleslope); credits = q->credits + credits; q->credits = min_t(s64, credits, q->hicredit); if (q->credits < 0) { s64 delay; delay = delay_from_credits(q->credits, q->idleslope); qdisc_watchdog_schedule_ns(&q->watchdog, now + delay); q->last = now; return NULL; } } skb = cbs_child_dequeue(sch, qdisc); if (!skb) return NULL; len = qdisc_pkt_len(skb); /* As sendslope is a negative number, this will decrease the * amount of q->credits. */ credits = credits_from_len(len, q->sendslope, atomic64_read(&q->port_rate)); credits += q->credits; q->credits = max_t(s64, credits, q->locredit); /* Estimate of the transmission of the last byte of the packet in ns */ if (unlikely(atomic64_read(&q->port_rate) == 0)) q->last = now; else q->last = now + div64_s64(len * NSEC_PER_SEC, atomic64_read(&q->port_rate)); return skb; } static struct sk_buff *cbs_dequeue_offload(struct Qdisc *sch) { struct cbs_sched_data *q = qdisc_priv(sch); struct Qdisc *qdisc = q->qdisc; return cbs_child_dequeue(sch, qdisc); } static struct sk_buff *cbs_dequeue(struct Qdisc *sch) { struct cbs_sched_data *q = qdisc_priv(sch); return q->dequeue(sch); } static const struct nla_policy cbs_policy[TCA_CBS_MAX + 1] = { [TCA_CBS_PARMS] = { .len = sizeof(struct tc_cbs_qopt) }, }; static void cbs_disable_offload(struct net_device *dev, struct cbs_sched_data *q) { struct tc_cbs_qopt_offload cbs = { }; const struct net_device_ops *ops; int err; if (!q->offload) return; q->enqueue = cbs_enqueue_soft; q->dequeue = cbs_dequeue_soft; ops = dev->netdev_ops; if (!ops->ndo_setup_tc) return; cbs.queue = q->queue; cbs.enable = 0; err = ops->ndo_setup_tc(dev, TC_SETUP_QDISC_CBS, &cbs); if (err < 0) pr_warn("Couldn't disable CBS offload for queue %d\n", cbs.queue); } static int cbs_enable_offload(struct net_device *dev, struct cbs_sched_data *q, const struct tc_cbs_qopt *opt, struct netlink_ext_ack *extack) { const struct net_device_ops *ops = dev->netdev_ops; struct tc_cbs_qopt_offload cbs = { }; int err; if (!ops->ndo_setup_tc) { NL_SET_ERR_MSG(extack, "Specified device does not support cbs offload"); return -EOPNOTSUPP; } cbs.queue = q->queue; cbs.enable = 1; cbs.hicredit = opt->hicredit; cbs.locredit = opt->locredit; cbs.idleslope = opt->idleslope; cbs.sendslope = opt->sendslope; err = ops->ndo_setup_tc(dev, TC_SETUP_QDISC_CBS, &cbs); if (err < 0) { NL_SET_ERR_MSG(extack, "Specified device failed to setup cbs hardware offload"); return err; } q->enqueue = cbs_enqueue_offload; q->dequeue = cbs_dequeue_offload; return 0; } static void cbs_set_port_rate(struct net_device *dev, struct cbs_sched_data *q) { struct ethtool_link_ksettings ecmd; int speed = SPEED_10; s64 port_rate; int err; err = __ethtool_get_link_ksettings(dev, &ecmd); if (err < 0) goto skip; if (ecmd.base.speed && ecmd.base.speed != SPEED_UNKNOWN) speed = ecmd.base.speed; skip: port_rate = speed * 1000 * BYTES_PER_KBIT; atomic64_set(&q->port_rate, port_rate); netdev_dbg(dev, "cbs: set %s's port_rate to: %lld, linkspeed: %d\n", dev->name, (long long)atomic64_read(&q->port_rate), ecmd.base.speed); } static int cbs_dev_notifier(struct notifier_block *nb, unsigned long event, void *ptr) { struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct cbs_sched_data *q; struct net_device *qdev; bool found = false; ASSERT_RTNL(); if (event != NETDEV_UP && event != NETDEV_CHANGE) return NOTIFY_DONE; spin_lock(&cbs_list_lock); list_for_each_entry(q, &cbs_list, cbs_list) { qdev = qdisc_dev(q->qdisc); if (qdev == dev) { found = true; break; } } spin_unlock(&cbs_list_lock); if (found) cbs_set_port_rate(dev, q); return NOTIFY_DONE; } static int cbs_change(struct Qdisc *sch, struct nlattr *opt, struct netlink_ext_ack *extack) { struct cbs_sched_data *q = qdisc_priv(sch); struct net_device *dev = qdisc_dev(sch); struct nlattr *tb[TCA_CBS_MAX + 1]; struct tc_cbs_qopt *qopt; int err; err = nla_parse_nested_deprecated(tb, TCA_CBS_MAX, opt, cbs_policy, extack); if (err < 0) return err; if (!tb[TCA_CBS_PARMS]) { NL_SET_ERR_MSG(extack, "Missing CBS parameter which are mandatory"); return -EINVAL; } qopt = nla_data(tb[TCA_CBS_PARMS]); if (!qopt->offload) { cbs_set_port_rate(dev, q); cbs_disable_offload(dev, q); } else { err = cbs_enable_offload(dev, q, qopt, extack); if (err < 0) return err; } /* Everything went OK, save the parameters used. */ WRITE_ONCE(q->hicredit, qopt->hicredit); WRITE_ONCE(q->locredit, qopt->locredit); WRITE_ONCE(q->idleslope, qopt->idleslope * BYTES_PER_KBIT); WRITE_ONCE(q->sendslope, qopt->sendslope * BYTES_PER_KBIT); WRITE_ONCE(q->offload, qopt->offload); return 0; } static int cbs_init(struct Qdisc *sch, struct nlattr *opt, struct netlink_ext_ack *extack) { struct cbs_sched_data *q = qdisc_priv(sch); struct net_device *dev = qdisc_dev(sch); if (!opt) { NL_SET_ERR_MSG(extack, "Missing CBS qdisc options which are mandatory"); return -EINVAL; } q->qdisc = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, sch->handle, extack); if (!q->qdisc) return -ENOMEM; spin_lock(&cbs_list_lock); list_add(&q->cbs_list, &cbs_list); spin_unlock(&cbs_list_lock); qdisc_hash_add(q->qdisc, false); q->queue = sch->dev_queue - netdev_get_tx_queue(dev, 0); q->enqueue = cbs_enqueue_soft; q->dequeue = cbs_dequeue_soft; qdisc_watchdog_init(&q->watchdog, sch); return cbs_change(sch, opt, extack); } static void cbs_destroy(struct Qdisc *sch) { struct cbs_sched_data *q = qdisc_priv(sch); struct net_device *dev = qdisc_dev(sch); /* Nothing to do if we couldn't create the underlying qdisc */ if (!q->qdisc) return; qdisc_watchdog_cancel(&q->watchdog); cbs_disable_offload(dev, q); spin_lock(&cbs_list_lock); list_del(&q->cbs_list); spin_unlock(&cbs_list_lock); qdisc_put(q->qdisc); } static int cbs_dump(struct Qdisc *sch, struct sk_buff *skb) { struct cbs_sched_data *q = qdisc_priv(sch); struct tc_cbs_qopt opt = { }; struct nlattr *nest; nest = nla_nest_start_noflag(skb, TCA_OPTIONS); if (!nest) goto nla_put_failure; opt.hicredit = READ_ONCE(q->hicredit); opt.locredit = READ_ONCE(q->locredit); opt.sendslope = div64_s64(READ_ONCE(q->sendslope), BYTES_PER_KBIT); opt.idleslope = div64_s64(READ_ONCE(q->idleslope), BYTES_PER_KBIT); opt.offload = READ_ONCE(q->offload); if (nla_put(skb, TCA_CBS_PARMS, sizeof(opt), &opt)) goto nla_put_failure; return nla_nest_end(skb, nest); nla_put_failure: nla_nest_cancel(skb, nest); return -1; } static int cbs_dump_class(struct Qdisc *sch, unsigned long cl, struct sk_buff *skb, struct tcmsg *tcm) { struct cbs_sched_data *q = qdisc_priv(sch); if (cl != 1 || !q->qdisc) /* only one class */ return -ENOENT; tcm->tcm_handle |= TC_H_MIN(1); tcm->tcm_info = q->qdisc->handle; return 0; } static int cbs_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new, struct Qdisc **old, struct netlink_ext_ack *extack) { struct cbs_sched_data *q = qdisc_priv(sch); if (!new) { new = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, sch->handle, NULL); if (!new) new = &noop_qdisc; } *old = qdisc_replace(sch, new, &q->qdisc); return 0; } static struct Qdisc *cbs_leaf(struct Qdisc *sch, unsigned long arg) { struct cbs_sched_data *q = qdisc_priv(sch); return q->qdisc; } static unsigned long cbs_find(struct Qdisc *sch, u32 classid) { return 1; } static void cbs_walk(struct Qdisc *sch, struct qdisc_walker *walker) { if (!walker->stop) { tc_qdisc_stats_dump(sch, 1, walker); } } static const struct Qdisc_class_ops cbs_class_ops = { .graft = cbs_graft, .leaf = cbs_leaf, .find = cbs_find, .walk = cbs_walk, .dump = cbs_dump_class, }; static struct Qdisc_ops cbs_qdisc_ops __read_mostly = { .id = "cbs", .cl_ops = &cbs_class_ops, .priv_size = sizeof(struct cbs_sched_data), .enqueue = cbs_enqueue, .dequeue = cbs_dequeue, .peek = qdisc_peek_dequeued, .init = cbs_init, .reset = qdisc_reset_queue, .destroy = cbs_destroy, .change = cbs_change, .dump = cbs_dump, .owner = THIS_MODULE, }; MODULE_ALIAS_NET_SCH("cbs"); static struct notifier_block cbs_device_notifier = { .notifier_call = cbs_dev_notifier, }; static int __init cbs_module_init(void) { int err; err = register_netdevice_notifier(&cbs_device_notifier); if (err) return err; err = register_qdisc(&cbs_qdisc_ops); if (err) unregister_netdevice_notifier(&cbs_device_notifier); return err; } static void __exit cbs_module_exit(void) { unregister_qdisc(&cbs_qdisc_ops); unregister_netdevice_notifier(&cbs_device_notifier); } module_init(cbs_module_init) module_exit(cbs_module_exit) MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Credit Based shaper");
6 6 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 // SPDX-License-Identifier: GPL-2.0-or-later /* * Tap functions for AF_VSOCK sockets. * * Code based on net/netlink/af_netlink.c tap functions. */ #include <linux/module.h> #include <net/sock.h> #include <net/af_vsock.h> #include <linux/if_arp.h> static DEFINE_SPINLOCK(vsock_tap_lock); static struct list_head vsock_tap_all __read_mostly = LIST_HEAD_INIT(vsock_tap_all); int vsock_add_tap(struct vsock_tap *vt) { if (unlikely(vt->dev->type != ARPHRD_VSOCKMON)) return -EINVAL; __module_get(vt->module); spin_lock(&vsock_tap_lock); list_add_rcu(&vt->list, &vsock_tap_all); spin_unlock(&vsock_tap_lock); return 0; } EXPORT_SYMBOL_GPL(vsock_add_tap); int vsock_remove_tap(struct vsock_tap *vt) { struct vsock_tap *tmp; bool found = false; spin_lock(&vsock_tap_lock); list_for_each_entry(tmp, &vsock_tap_all, list) { if (vt == tmp) { list_del_rcu(&vt->list); found = true; goto out; } } pr_warn("vsock_remove_tap: %p not found\n", vt); out: spin_unlock(&vsock_tap_lock); synchronize_net(); if (found) module_put(vt->module); return found ? 0 : -ENODEV; } EXPORT_SYMBOL_GPL(vsock_remove_tap); static int __vsock_deliver_tap_skb(struct sk_buff *skb, struct net_device *dev) { int ret = 0; struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC); if (nskb) { dev_hold(dev); nskb->dev = dev; ret = dev_queue_xmit(nskb); if (unlikely(ret > 0)) ret = net_xmit_errno(ret); dev_put(dev); } return ret; } static void __vsock_deliver_tap(struct sk_buff *skb) { int ret; struct vsock_tap *tmp; list_for_each_entry_rcu(tmp, &vsock_tap_all, list) { ret = __vsock_deliver_tap_skb(skb, tmp->dev); if (unlikely(ret)) break; } } void vsock_deliver_tap(struct sk_buff *build_skb(void *opaque), void *opaque) { struct sk_buff *skb; rcu_read_lock(); if (likely(list_empty(&vsock_tap_all))) goto out; skb = build_skb(opaque); if (skb) { __vsock_deliver_tap(skb); consume_skb(skb); } out: rcu_read_unlock(); } EXPORT_SYMBOL_GPL(vsock_deliver_tap);
14 14 3 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 // SPDX-License-Identifier: GPL-2.0 /* File: fs/ext4/acl.h (C) 2001 Andreas Gruenbacher, <a.gruenbacher@computer.org> */ #include <linux/posix_acl_xattr.h> #define EXT4_ACL_VERSION 0x0001 typedef struct { __le16 e_tag; __le16 e_perm; __le32 e_id; } ext4_acl_entry; typedef struct { __le16 e_tag; __le16 e_perm; } ext4_acl_entry_short; typedef struct { __le32 a_version; } ext4_acl_header; static inline size_t ext4_acl_size(int count) { if (count <= 4) { return sizeof(ext4_acl_header) + count * sizeof(ext4_acl_entry_short); } else { return sizeof(ext4_acl_header) + 4 * sizeof(ext4_acl_entry_short) + (count - 4) * sizeof(ext4_acl_entry); } } static inline int ext4_acl_count(size_t size) { ssize_t s; size -= sizeof(ext4_acl_header); s = size - 4 * sizeof(ext4_acl_entry_short); if (s < 0) { if (size % sizeof(ext4_acl_entry_short)) return -1; return size / sizeof(ext4_acl_entry_short); } else { if (s % sizeof(ext4_acl_entry)) return -1; return s / sizeof(ext4_acl_entry) + 4; } } #ifdef CONFIG_EXT4_FS_POSIX_ACL /* acl.c */ struct posix_acl *ext4_get_acl(struct inode *inode, int type, bool rcu); int ext4_set_acl(struct mnt_idmap *idmap, struct dentry *dentry, struct posix_acl *acl, int type); extern int ext4_init_acl(handle_t *, struct inode *, struct inode *); #else /* CONFIG_EXT4_FS_POSIX_ACL */ #include <linux/sched.h> #define ext4_get_acl NULL #define ext4_set_acl NULL static inline int ext4_init_acl(handle_t *handle, struct inode *inode, struct inode *dir) { return 0; } #endif /* CONFIG_EXT4_FS_POSIX_ACL */
11099 284 11062 284 10831 284 11052 11054 11056 92 92 92 64 33 92 16 16 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 // SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2010 Red Hat, Inc., Peter Zijlstra * * Provides a framework for enqueueing and running callbacks from hardirq * context. The enqueueing is NMI-safe. */ #include <linux/bug.h> #include <linux/kernel.h> #include <linux/export.h> #include <linux/irq_work.h> #include <linux/percpu.h> #include <linux/hardirq.h> #include <linux/irqflags.h> #include <linux/sched.h> #include <linux/tick.h> #include <linux/cpu.h> #include <linux/notifier.h> #include <linux/smp.h> #include <linux/smpboot.h> #include <asm/processor.h> #include <linux/kasan.h> #include <trace/events/ipi.h> static DEFINE_PER_CPU(struct llist_head, raised_list); static DEFINE_PER_CPU(struct llist_head, lazy_list); static DEFINE_PER_CPU(struct task_struct *, irq_workd); static void wake_irq_workd(void) { struct task_struct *tsk = __this_cpu_read(irq_workd); if (!llist_empty(this_cpu_ptr(&lazy_list)) && tsk) wake_up_process(tsk); } #ifdef CONFIG_SMP static void irq_work_wake(struct irq_work *entry) { wake_irq_workd(); } static DEFINE_PER_CPU(struct irq_work, irq_work_wakeup) = IRQ_WORK_INIT_HARD(irq_work_wake); #endif static int irq_workd_should_run(unsigned int cpu) { return !llist_empty(this_cpu_ptr(&lazy_list)); } /* * Claim the entry so that no one else will poke at it. */ static bool irq_work_claim(struct irq_work *work) { int oflags; oflags = atomic_fetch_or(IRQ_WORK_CLAIMED | CSD_TYPE_IRQ_WORK, &work->node.a_flags); /* * If the work is already pending, no need to raise the IPI. * The pairing smp_mb() in irq_work_single() makes sure * everything we did before is visible. */ if (oflags & IRQ_WORK_PENDING) return false; return true; } void __weak arch_irq_work_raise(void) { /* * Lame architectures will get the timer tick callback */ } static __always_inline void irq_work_raise(struct irq_work *work) { if (trace_ipi_send_cpu_enabled() && arch_irq_work_has_interrupt()) trace_ipi_send_cpu(smp_processor_id(), _RET_IP_, work->func); arch_irq_work_raise(); } /* Enqueue on current CPU, work must already be claimed and preempt disabled */ static void __irq_work_queue_local(struct irq_work *work) { struct llist_head *list; bool rt_lazy_work = false; bool lazy_work = false; int work_flags; work_flags = atomic_read(&work->node.a_flags); if (work_flags & IRQ_WORK_LAZY) lazy_work = true; else if (IS_ENABLED(CONFIG_PREEMPT_RT) && !(work_flags & IRQ_WORK_HARD_IRQ)) rt_lazy_work = true; if (lazy_work || rt_lazy_work) list = this_cpu_ptr(&lazy_list); else list = this_cpu_ptr(&raised_list); if (!llist_add(&work->node.llist, list)) return; /* If the work is "lazy", handle it from next tick if any */ if (!lazy_work || tick_nohz_tick_stopped()) irq_work_raise(work); } /* Enqueue the irq work @work on the current CPU */ bool irq_work_queue(struct irq_work *work) { /* Only queue if not already pending */ if (!irq_work_claim(work)) return false; /* Queue the entry and raise the IPI if needed. */ preempt_disable(); __irq_work_queue_local(work); preempt_enable(); return true; } EXPORT_SYMBOL_GPL(irq_work_queue); /* * Enqueue the irq_work @work on @cpu unless it's already pending * somewhere. * * Can be re-enqueued while the callback is still in progress. */ bool irq_work_queue_on(struct irq_work *work, int cpu) { #ifndef CONFIG_SMP return irq_work_queue(work); #else /* CONFIG_SMP: */ /* All work should have been flushed before going offline */ WARN_ON_ONCE(cpu_is_offline(cpu)); /* Only queue if not already pending */ if (!irq_work_claim(work)) return false; kasan_record_aux_stack_noalloc(work); preempt_disable(); if (cpu != smp_processor_id()) { /* Arch remote IPI send/receive backend aren't NMI safe */ WARN_ON_ONCE(in_nmi()); /* * On PREEMPT_RT the items which are not marked as * IRQ_WORK_HARD_IRQ are added to the lazy list and a HARD work * item is used on the remote CPU to wake the thread. */ if (IS_ENABLED(CONFIG_PREEMPT_RT) && !(atomic_read(&work->node.a_flags) & IRQ_WORK_HARD_IRQ)) { if (!llist_add(&work->node.llist, &per_cpu(lazy_list, cpu))) goto out; work = &per_cpu(irq_work_wakeup, cpu); if (!irq_work_claim(work)) goto out; } __smp_call_single_queue(cpu, &work->node.llist); } else { __irq_work_queue_local(work); } out: preempt_enable(); return true; #endif /* CONFIG_SMP */ } bool irq_work_needs_cpu(void) { struct llist_head *raised, *lazy; raised = this_cpu_ptr(&raised_list); lazy = this_cpu_ptr(&lazy_list); if (llist_empty(raised) || arch_irq_work_has_interrupt()) if (llist_empty(lazy)) return false; /* All work should have been flushed before going offline */ WARN_ON_ONCE(cpu_is_offline(smp_processor_id())); return true; } void irq_work_single(void *arg) { struct irq_work *work = arg; int flags; /* * Clear the PENDING bit, after this point the @work can be re-used. * The PENDING bit acts as a lock, and we own it, so we can clear it * without atomic ops. */ flags = atomic_read(&work->node.a_flags); flags &= ~IRQ_WORK_PENDING; atomic_set(&work->node.a_flags, flags); /* * See irq_work_claim(). */ smp_mb(); lockdep_irq_work_enter(flags); work->func(work); lockdep_irq_work_exit(flags); /* * Clear the BUSY bit, if set, and return to the free state if no-one * else claimed it meanwhile. */ (void)atomic_cmpxchg(&work->node.a_flags, flags, flags & ~IRQ_WORK_BUSY); if ((IS_ENABLED(CONFIG_PREEMPT_RT) && !irq_work_is_hard(work)) || !arch_irq_work_has_interrupt()) rcuwait_wake_up(&work->irqwait); } static void irq_work_run_list(struct llist_head *list) { struct irq_work *work, *tmp; struct llist_node *llnode; /* * On PREEMPT_RT IRQ-work which is not marked as HARD will be processed * in a per-CPU thread in preemptible context. Only the items which are * marked as IRQ_WORK_HARD_IRQ will be processed in hardirq context. */ BUG_ON(!irqs_disabled() && !IS_ENABLED(CONFIG_PREEMPT_RT)); if (llist_empty(list)) return; llnode = llist_del_all(list); llist_for_each_entry_safe(work, tmp, llnode, node.llist) irq_work_single(work); } /* * hotplug calls this through: * hotplug_cfd() -> flush_smp_call_function_queue() */ void irq_work_run(void) { irq_work_run_list(this_cpu_ptr(&raised_list)); if (!IS_ENABLED(CONFIG_PREEMPT_RT)) irq_work_run_list(this_cpu_ptr(&lazy_list)); else wake_irq_workd(); } EXPORT_SYMBOL_GPL(irq_work_run); void irq_work_tick(void) { struct llist_head *raised = this_cpu_ptr(&raised_list); if (!llist_empty(raised) && !arch_irq_work_has_interrupt()) irq_work_run_list(raised); if (!IS_ENABLED(CONFIG_PREEMPT_RT)) irq_work_run_list(this_cpu_ptr(&lazy_list)); else wake_irq_workd(); } /* * Synchronize against the irq_work @entry, ensures the entry is not * currently in use. */ void irq_work_sync(struct irq_work *work) { lockdep_assert_irqs_enabled(); might_sleep(); if ((IS_ENABLED(CONFIG_PREEMPT_RT) && !irq_work_is_hard(work)) || !arch_irq_work_has_interrupt()) { rcuwait_wait_event(&work->irqwait, !irq_work_is_busy(work), TASK_UNINTERRUPTIBLE); return; } while (irq_work_is_busy(work)) cpu_relax(); } EXPORT_SYMBOL_GPL(irq_work_sync); static void run_irq_workd(unsigned int cpu) { irq_work_run_list(this_cpu_ptr(&lazy_list)); } static void irq_workd_setup(unsigned int cpu) { sched_set_fifo_low(current); } static struct smp_hotplug_thread irqwork_threads = { .store = &irq_workd, .setup = irq_workd_setup, .thread_should_run = irq_workd_should_run, .thread_fn = run_irq_workd, .thread_comm = "irq_work/%u", }; static __init int irq_work_init_threads(void) { if (IS_ENABLED(CONFIG_PREEMPT_RT)) BUG_ON(smpboot_register_percpu_thread(&irqwork_threads)); return 0; } early_initcall(irq_work_init_threads);
16 1 7 7 7 12 10 40 1 1 38 50 3 46 1 3 3 4 1 3 3 2 2 1 1 2 41 1 40 30 10 40 44 49 49 43 44 44 44 5 44 32 42 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 // SPDX-License-Identifier: GPL-2.0 /* * fs/bfs/dir.c * BFS directory operations. * Copyright (C) 1999-2018 Tigran Aivazian <aivazian.tigran@gmail.com> * Made endianness-clean by Andrew Stribblehill <ads@wompom.org> 2005 */ #include <linux/time.h> #include <linux/string.h> #include <linux/fs.h> #include <linux/buffer_head.h> #include <linux/sched.h> #include "bfs.h" #undef DEBUG #ifdef DEBUG #define dprintf(x...) printf(x) #else #define dprintf(x...) #endif static int bfs_add_entry(struct inode *dir, const struct qstr *child, int ino); static struct buffer_head *bfs_find_entry(struct inode *dir, const struct qstr *child, struct bfs_dirent **res_dir); static int bfs_readdir(struct file *f, struct dir_context *ctx) { struct inode *dir = file_inode(f); struct buffer_head *bh; struct bfs_dirent *de; unsigned int offset; int block; if (ctx->pos & (BFS_DIRENT_SIZE - 1)) { printf("Bad f_pos=%08lx for %s:%08lx\n", (unsigned long)ctx->pos, dir->i_sb->s_id, dir->i_ino); return -EINVAL; } while (ctx->pos < dir->i_size) { offset = ctx->pos & (BFS_BSIZE - 1); block = BFS_I(dir)->i_sblock + (ctx->pos >> BFS_BSIZE_BITS); bh = sb_bread(dir->i_sb, block); if (!bh) { ctx->pos += BFS_BSIZE - offset; continue; } do { de = (struct bfs_dirent *)(bh->b_data + offset); if (de->ino) { int size = strnlen(de->name, BFS_NAMELEN); if (!dir_emit(ctx, de->name, size, le16_to_cpu(de->ino), DT_UNKNOWN)) { brelse(bh); return 0; } } offset += BFS_DIRENT_SIZE; ctx->pos += BFS_DIRENT_SIZE; } while ((offset < BFS_BSIZE) && (ctx->pos < dir->i_size)); brelse(bh); } return 0; } const struct file_operations bfs_dir_operations = { .read = generic_read_dir, .iterate_shared = bfs_readdir, .fsync = generic_file_fsync, .llseek = generic_file_llseek, }; static int bfs_create(struct mnt_idmap *idmap, struct inode *dir, struct dentry *dentry, umode_t mode, bool excl) { int err; struct inode *inode; struct super_block *s = dir->i_sb; struct bfs_sb_info *info = BFS_SB(s); unsigned long ino; inode = new_inode(s); if (!inode) return -ENOMEM; mutex_lock(&info->bfs_lock); ino = find_first_zero_bit(info->si_imap, info->si_lasti + 1); if (ino > info->si_lasti) { mutex_unlock(&info->bfs_lock); iput(inode); return -ENOSPC; } set_bit(ino, info->si_imap); info->si_freei--; inode_init_owner(&nop_mnt_idmap, inode, dir, mode); simple_inode_init_ts(inode); inode->i_blocks = 0; inode->i_op = &bfs_file_inops; inode->i_fop = &bfs_file_operations; inode->i_mapping->a_ops = &bfs_aops; inode->i_ino = ino; BFS_I(inode)->i_dsk_ino = ino; BFS_I(inode)->i_sblock = 0; BFS_I(inode)->i_eblock = 0; insert_inode_hash(inode); mark_inode_dirty(inode); bfs_dump_imap("create", s); err = bfs_add_entry(dir, &dentry->d_name, inode->i_ino); if (err) { inode_dec_link_count(inode); mutex_unlock(&info->bfs_lock); iput(inode); return err; } mutex_unlock(&info->bfs_lock); d_instantiate(dentry, inode); return 0; } static struct dentry *bfs_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags) { struct inode *inode = NULL; struct buffer_head *bh; struct bfs_dirent *de; struct bfs_sb_info *info = BFS_SB(dir->i_sb); if (dentry->d_name.len > BFS_NAMELEN) return ERR_PTR(-ENAMETOOLONG); mutex_lock(&info->bfs_lock); bh = bfs_find_entry(dir, &dentry->d_name, &de); if (bh) { unsigned long ino = (unsigned long)le16_to_cpu(de->ino); brelse(bh); inode = bfs_iget(dir->i_sb, ino); } mutex_unlock(&info->bfs_lock); return d_splice_alias(inode, dentry); } static int bfs_link(struct dentry *old, struct inode *dir, struct dentry *new) { struct inode *inode = d_inode(old); struct bfs_sb_info *info = BFS_SB(inode->i_sb); int err; mutex_lock(&info->bfs_lock); err = bfs_add_entry(dir, &new->d_name, inode->i_ino); if (err) { mutex_unlock(&info->bfs_lock); return err; } inc_nlink(inode); inode_set_ctime_current(inode); mark_inode_dirty(inode); ihold(inode); d_instantiate(new, inode); mutex_unlock(&info->bfs_lock); return 0; } static int bfs_unlink(struct inode *dir, struct dentry *dentry) { int error = -ENOENT; struct inode *inode = d_inode(dentry); struct buffer_head *bh; struct bfs_dirent *de; struct bfs_sb_info *info = BFS_SB(inode->i_sb); mutex_lock(&info->bfs_lock); bh = bfs_find_entry(dir, &dentry->d_name, &de); if (!bh || (le16_to_cpu(de->ino) != inode->i_ino)) goto out_brelse; if (!inode->i_nlink) { printf("unlinking non-existent file %s:%lu (nlink=%d)\n", inode->i_sb->s_id, inode->i_ino, inode->i_nlink); set_nlink(inode, 1); } de->ino = 0; mark_buffer_dirty_inode(bh, dir); inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir)); mark_inode_dirty(dir); inode_set_ctime_to_ts(inode, inode_get_ctime(dir)); inode_dec_link_count(inode); error = 0; out_brelse: brelse(bh); mutex_unlock(&info->bfs_lock); return error; } static int bfs_rename(struct mnt_idmap *idmap, struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry, unsigned int flags) { struct inode *old_inode, *new_inode; struct buffer_head *old_bh, *new_bh; struct bfs_dirent *old_de, *new_de; struct bfs_sb_info *info; int error = -ENOENT; if (flags & ~RENAME_NOREPLACE) return -EINVAL; old_bh = new_bh = NULL; old_inode = d_inode(old_dentry); if (S_ISDIR(old_inode->i_mode)) return -EINVAL; info = BFS_SB(old_inode->i_sb); mutex_lock(&info->bfs_lock); old_bh = bfs_find_entry(old_dir, &old_dentry->d_name, &old_de); if (!old_bh || (le16_to_cpu(old_de->ino) != old_inode->i_ino)) goto end_rename; error = -EPERM; new_inode = d_inode(new_dentry); new_bh = bfs_find_entry(new_dir, &new_dentry->d_name, &new_de); if (new_bh && !new_inode) { brelse(new_bh); new_bh = NULL; } if (!new_bh) { error = bfs_add_entry(new_dir, &new_dentry->d_name, old_inode->i_ino); if (error) goto end_rename; } old_de->ino = 0; inode_set_mtime_to_ts(old_dir, inode_set_ctime_current(old_dir)); mark_inode_dirty(old_dir); if (new_inode) { inode_set_ctime_current(new_inode); inode_dec_link_count(new_inode); } mark_buffer_dirty_inode(old_bh, old_dir); error = 0; end_rename: mutex_unlock(&info->bfs_lock); brelse(old_bh); brelse(new_bh); return error; } const struct inode_operations bfs_dir_inops = { .create = bfs_create, .lookup = bfs_lookup, .link = bfs_link, .unlink = bfs_unlink, .rename = bfs_rename, }; static int bfs_add_entry(struct inode *dir, const struct qstr *child, int ino) { const unsigned char *name = child->name; int namelen = child->len; struct buffer_head *bh; struct bfs_dirent *de; int block, sblock, eblock, off, pos; int i; dprintf("name=%s, namelen=%d\n", name, namelen); sblock = BFS_I(dir)->i_sblock; eblock = BFS_I(dir)->i_eblock; for (block = sblock; block <= eblock; block++) { bh = sb_bread(dir->i_sb, block); if (!bh) return -EIO; for (off = 0; off < BFS_BSIZE; off += BFS_DIRENT_SIZE) { de = (struct bfs_dirent *)(bh->b_data + off); if (!de->ino) { pos = (block - sblock) * BFS_BSIZE + off; if (pos >= dir->i_size) { dir->i_size += BFS_DIRENT_SIZE; inode_set_ctime_current(dir); } inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir)); mark_inode_dirty(dir); de->ino = cpu_to_le16((u16)ino); for (i = 0; i < BFS_NAMELEN; i++) de->name[i] = (i < namelen) ? name[i] : 0; mark_buffer_dirty_inode(bh, dir); brelse(bh); return 0; } } brelse(bh); } return -ENOSPC; } static inline int bfs_namecmp(int len, const unsigned char *name, const char *buffer) { if ((len < BFS_NAMELEN) && buffer[len]) return 0; return !memcmp(name, buffer, len); } static struct buffer_head *bfs_find_entry(struct inode *dir, const struct qstr *child, struct bfs_dirent **res_dir) { unsigned long block = 0, offset = 0; struct buffer_head *bh = NULL; struct bfs_dirent *de; const unsigned char *name = child->name; int namelen = child->len; *res_dir = NULL; if (namelen > BFS_NAMELEN) return NULL; while (block * BFS_BSIZE + offset < dir->i_size) { if (!bh) { bh = sb_bread(dir->i_sb, BFS_I(dir)->i_sblock + block); if (!bh) { block++; continue; } } de = (struct bfs_dirent *)(bh->b_data + offset); offset += BFS_DIRENT_SIZE; if (le16_to_cpu(de->ino) && bfs_namecmp(namelen, name, de->name)) { *res_dir = de; return bh; } if (offset < bh->b_size) continue; brelse(bh); bh = NULL; offset = 0; block++; } brelse(bh); return NULL; }
34 69 80 28 93 55 82 82 64 29 83 4 21 10 17 29 3 29 2 10 2 33 33 1 5 10 3 12 12 6 2 3 3 5 64 187 3 10 163 104 1 46 62 49 7 56 56 102 1 19 75 75 35 45 11 8 6 33 25 8 27 6 33 8 22 7 11 1 10 11 9 3 3 1 1 1 28 1 8 20 14 5 24 2 22 20 2 18 5 7 2 5 8 6 9 9 4 3 5 1 4 3 3 12 9 3 7 11 3 1 1 1 3 3 3 2 2 1 1 4 1 1 1 1 1 1 2 1 1 4 4 2 1 1 8 4 4 5 1 1 2 1 1 70 54 16 10 54 36 29 7 57 6 2 2 5 43 5 39 6 10 22 9 4 1 2 3 3 3 1 1 1 4 2 3 3 3 264 37 5 276 39 8 7 1 2 5 2 197 1 198 150 66 34 4 10 16 10 2 8 163 1 2 4 3 1 2 4 2 1 27 4 9 1 6 1 1 7 7 8 40 5 37 9 39 7 30 16 32 2 11 46 8 8 35 10 33 33 1 31 15 30 3 3 3 51 50 9 43 4 30 17 43 3 3 2 1 43 3 30 15 1 46 34 12 44 1 30 15 1 16 21 8 1 2 3 2 9 1 11 6 1 12 13 13 26 46 45 1 45 35 3 1 1 1 39 20 8 4 10 2 38 2 38 2 3 56 52 2 1 1 8 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 /* FUSE: Filesystem in Userspace Copyright (C) 2001-2008 Miklos Szeredi <miklos@szeredi.hu> This program can be distributed under the terms of the GNU GPL. See the file COPYING. */ #include "fuse_i.h" #include <linux/pagemap.h> #include <linux/file.h> #include <linux/fs_context.h> #include <linux/moduleparam.h> #include <linux/sched.h> #include <linux/namei.h> #include <linux/slab.h> #include <linux/xattr.h> #include <linux/iversion.h> #include <linux/posix_acl.h> #include <linux/security.h> #include <linux/types.h> #include <linux/kernel.h> static bool __read_mostly allow_sys_admin_access; module_param(allow_sys_admin_access, bool, 0644); MODULE_PARM_DESC(allow_sys_admin_access, "Allow users with CAP_SYS_ADMIN in initial userns to bypass allow_other access check"); static void fuse_advise_use_readdirplus(struct inode *dir) { struct fuse_inode *fi = get_fuse_inode(dir); set_bit(FUSE_I_ADVISE_RDPLUS, &fi->state); } #if BITS_PER_LONG >= 64 static inline void __fuse_dentry_settime(struct dentry *entry, u64 time) { entry->d_fsdata = (void *) time; } static inline u64 fuse_dentry_time(const struct dentry *entry) { return (u64)entry->d_fsdata; } #else union fuse_dentry { u64 time; struct rcu_head rcu; }; static inline void __fuse_dentry_settime(struct dentry *dentry, u64 time) { ((union fuse_dentry *) dentry->d_fsdata)->time = time; } static inline u64 fuse_dentry_time(const struct dentry *entry) { return ((union fuse_dentry *) entry->d_fsdata)->time; } #endif static void fuse_dentry_settime(struct dentry *dentry, u64 time) { struct fuse_conn *fc = get_fuse_conn_super(dentry->d_sb); bool delete = !time && fc->delete_stale; /* * Mess with DCACHE_OP_DELETE because dput() will be faster without it. * Don't care about races, either way it's just an optimization */ if ((!delete && (dentry->d_flags & DCACHE_OP_DELETE)) || (delete && !(dentry->d_flags & DCACHE_OP_DELETE))) { spin_lock(&dentry->d_lock); if (!delete) dentry->d_flags &= ~DCACHE_OP_DELETE; else dentry->d_flags |= DCACHE_OP_DELETE; spin_unlock(&dentry->d_lock); } __fuse_dentry_settime(dentry, time); } /* * FUSE caches dentries and attributes with separate timeout. The * time in jiffies until the dentry/attributes are valid is stored in * dentry->d_fsdata and fuse_inode->i_time respectively. */ /* * Calculate the time in jiffies until a dentry/attributes are valid */ u64 fuse_time_to_jiffies(u64 sec, u32 nsec) { if (sec || nsec) { struct timespec64 ts = { sec, min_t(u32, nsec, NSEC_PER_SEC - 1) }; return get_jiffies_64() + timespec64_to_jiffies(&ts); } else return 0; } /* * Set dentry and possibly attribute timeouts from the lookup/mk* * replies */ void fuse_change_entry_timeout(struct dentry *entry, struct fuse_entry_out *o) { fuse_dentry_settime(entry, fuse_time_to_jiffies(o->entry_valid, o->entry_valid_nsec)); } void fuse_invalidate_attr_mask(struct inode *inode, u32 mask) { set_mask_bits(&get_fuse_inode(inode)->inval_mask, 0, mask); } /* * Mark the attributes as stale, so that at the next call to * ->getattr() they will be fetched from userspace */ void fuse_invalidate_attr(struct inode *inode) { fuse_invalidate_attr_mask(inode, STATX_BASIC_STATS); } static void fuse_dir_changed(struct inode *dir) { fuse_invalidate_attr(dir); inode_maybe_inc_iversion(dir, false); } /* * Mark the attributes as stale due to an atime change. Avoid the invalidate if * atime is not used. */ void fuse_invalidate_atime(struct inode *inode) { if (!IS_RDONLY(inode)) fuse_invalidate_attr_mask(inode, STATX_ATIME); } /* * Just mark the entry as stale, so that a next attempt to look it up * will result in a new lookup call to userspace * * This is called when a dentry is about to become negative and the * timeout is unknown (unlink, rmdir, rename and in some cases * lookup) */ void fuse_invalidate_entry_cache(struct dentry *entry) { fuse_dentry_settime(entry, 0); } /* * Same as fuse_invalidate_entry_cache(), but also try to remove the * dentry from the hash */ static void fuse_invalidate_entry(struct dentry *entry) { d_invalidate(entry); fuse_invalidate_entry_cache(entry); } static void fuse_lookup_init(struct fuse_conn *fc, struct fuse_args *args, u64 nodeid, const struct qstr *name, struct fuse_entry_out *outarg) { memset(outarg, 0, sizeof(struct fuse_entry_out)); args->opcode = FUSE_LOOKUP; args->nodeid = nodeid; args->in_numargs = 1; args->in_args[0].size = name->len + 1; args->in_args[0].value = name->name; args->out_numargs = 1; args->out_args[0].size = sizeof(struct fuse_entry_out); args->out_args[0].value = outarg; } /* * Check whether the dentry is still valid * * If the entry validity timeout has expired and the dentry is * positive, try to redo the lookup. If the lookup results in a * different inode, then let the VFS invalidate the dentry and redo * the lookup once more. If the lookup results in the same inode, * then refresh the attributes, timeouts and mark the dentry valid. */ static int fuse_dentry_revalidate(struct dentry *entry, unsigned int flags) { struct inode *inode; struct dentry *parent; struct fuse_mount *fm; struct fuse_inode *fi; int ret; inode = d_inode_rcu(entry); if (inode && fuse_is_bad(inode)) goto invalid; else if (time_before64(fuse_dentry_time(entry), get_jiffies_64()) || (flags & (LOOKUP_EXCL | LOOKUP_REVAL | LOOKUP_RENAME_TARGET))) { struct fuse_entry_out outarg; FUSE_ARGS(args); struct fuse_forget_link *forget; u64 attr_version; /* For negative dentries, always do a fresh lookup */ if (!inode) goto invalid; ret = -ECHILD; if (flags & LOOKUP_RCU) goto out; fm = get_fuse_mount(inode); forget = fuse_alloc_forget(); ret = -ENOMEM; if (!forget) goto out; attr_version = fuse_get_attr_version(fm->fc); parent = dget_parent(entry); fuse_lookup_init(fm->fc, &args, get_node_id(d_inode(parent)), &entry->d_name, &outarg); ret = fuse_simple_request(fm, &args); dput(parent); /* Zero nodeid is same as -ENOENT */ if (!ret && !outarg.nodeid) ret = -ENOENT; if (!ret) { fi = get_fuse_inode(inode); if (outarg.nodeid != get_node_id(inode) || (bool) IS_AUTOMOUNT(inode) != (bool) (outarg.attr.flags & FUSE_ATTR_SUBMOUNT)) { fuse_queue_forget(fm->fc, forget, outarg.nodeid, 1); goto invalid; } spin_lock(&fi->lock); fi->nlookup++; spin_unlock(&fi->lock); } kfree(forget); if (ret == -ENOMEM || ret == -EINTR) goto out; if (ret || fuse_invalid_attr(&outarg.attr) || fuse_stale_inode(inode, outarg.generation, &outarg.attr)) goto invalid; forget_all_cached_acls(inode); fuse_change_attributes(inode, &outarg.attr, NULL, ATTR_TIMEOUT(&outarg), attr_version); fuse_change_entry_timeout(entry, &outarg); } else if (inode) { fi = get_fuse_inode(inode); if (flags & LOOKUP_RCU) { if (test_bit(FUSE_I_INIT_RDPLUS, &fi->state)) return -ECHILD; } else if (test_and_clear_bit(FUSE_I_INIT_RDPLUS, &fi->state)) { parent = dget_parent(entry); fuse_advise_use_readdirplus(d_inode(parent)); dput(parent); } } ret = 1; out: return ret; invalid: ret = 0; goto out; } #if BITS_PER_LONG < 64 static int fuse_dentry_init(struct dentry *dentry) { dentry->d_fsdata = kzalloc(sizeof(union fuse_dentry), GFP_KERNEL_ACCOUNT | __GFP_RECLAIMABLE); return dentry->d_fsdata ? 0 : -ENOMEM; } static void fuse_dentry_release(struct dentry *dentry) { union fuse_dentry *fd = dentry->d_fsdata; kfree_rcu(fd, rcu); } #endif static int fuse_dentry_delete(const struct dentry *dentry) { return time_before64(fuse_dentry_time(dentry), get_jiffies_64()); } /* * Create a fuse_mount object with a new superblock (with path->dentry * as the root), and return that mount so it can be auto-mounted on * @path. */ static struct vfsmount *fuse_dentry_automount(struct path *path) { struct fs_context *fsc; struct vfsmount *mnt; struct fuse_inode *mp_fi = get_fuse_inode(d_inode(path->dentry)); fsc = fs_context_for_submount(path->mnt->mnt_sb->s_type, path->dentry); if (IS_ERR(fsc)) return ERR_CAST(fsc); /* Pass the FUSE inode of the mount for fuse_get_tree_submount() */ fsc->fs_private = mp_fi; /* Create the submount */ mnt = fc_mount(fsc); if (!IS_ERR(mnt)) mntget(mnt); put_fs_context(fsc); return mnt; } const struct dentry_operations fuse_dentry_operations = { .d_revalidate = fuse_dentry_revalidate, .d_delete = fuse_dentry_delete, #if BITS_PER_LONG < 64 .d_init = fuse_dentry_init, .d_release = fuse_dentry_release, #endif .d_automount = fuse_dentry_automount, }; const struct dentry_operations fuse_root_dentry_operations = { #if BITS_PER_LONG < 64 .d_init = fuse_dentry_init, .d_release = fuse_dentry_release, #endif }; int fuse_valid_type(int m) { return S_ISREG(m) || S_ISDIR(m) || S_ISLNK(m) || S_ISCHR(m) || S_ISBLK(m) || S_ISFIFO(m) || S_ISSOCK(m); } static bool fuse_valid_size(u64 size) { return size <= LLONG_MAX; } bool fuse_invalid_attr(struct fuse_attr *attr) { return !fuse_valid_type(attr->mode) || !fuse_valid_size(attr->size); } int fuse_lookup_name(struct super_block *sb, u64 nodeid, const struct qstr *name, struct fuse_entry_out *outarg, struct inode **inode) { struct fuse_mount *fm = get_fuse_mount_super(sb); FUSE_ARGS(args); struct fuse_forget_link *forget; u64 attr_version, evict_ctr; int err; *inode = NULL; err = -ENAMETOOLONG; if (name->len > FUSE_NAME_MAX) goto out; forget = fuse_alloc_forget(); err = -ENOMEM; if (!forget) goto out; attr_version = fuse_get_attr_version(fm->fc); evict_ctr = fuse_get_evict_ctr(fm->fc); fuse_lookup_init(fm->fc, &args, nodeid, name, outarg); err = fuse_simple_request(fm, &args); /* Zero nodeid is same as -ENOENT, but with valid timeout */ if (err || !outarg->nodeid) goto out_put_forget; err = -EIO; if (fuse_invalid_attr(&outarg->attr)) goto out_put_forget; if (outarg->nodeid == FUSE_ROOT_ID && outarg->generation != 0) { pr_warn_once("root generation should be zero\n"); outarg->generation = 0; } *inode = fuse_iget(sb, outarg->nodeid, outarg->generation, &outarg->attr, ATTR_TIMEOUT(outarg), attr_version, evict_ctr); err = -ENOMEM; if (!*inode) { fuse_queue_forget(fm->fc, forget, outarg->nodeid, 1); goto out; } err = 0; out_put_forget: kfree(forget); out: return err; } static struct dentry *fuse_lookup(struct inode *dir, struct dentry *entry, unsigned int flags) { int err; struct fuse_entry_out outarg; struct inode *inode; struct dentry *newent; bool outarg_valid = true; bool locked; if (fuse_is_bad(dir)) return ERR_PTR(-EIO); locked = fuse_lock_inode(dir); err = fuse_lookup_name(dir->i_sb, get_node_id(dir), &entry->d_name, &outarg, &inode); fuse_unlock_inode(dir, locked); if (err == -ENOENT) { outarg_valid = false; err = 0; } if (err) goto out_err; err = -EIO; if (inode && get_node_id(inode) == FUSE_ROOT_ID) goto out_iput; newent = d_splice_alias(inode, entry); err = PTR_ERR(newent); if (IS_ERR(newent)) goto out_err; entry = newent ? newent : entry; if (outarg_valid) fuse_change_entry_timeout(entry, &outarg); else fuse_invalidate_entry_cache(entry); if (inode) fuse_advise_use_readdirplus(dir); return newent; out_iput: iput(inode); out_err: return ERR_PTR(err); } static int get_security_context(struct dentry *entry, umode_t mode, struct fuse_in_arg *ext) { struct fuse_secctx *fctx; struct fuse_secctx_header *header; void *ctx = NULL, *ptr; u32 ctxlen, total_len = sizeof(*header); int err, nr_ctx = 0; const char *name; size_t namelen; err = security_dentry_init_security(entry, mode, &entry->d_name, &name, &ctx, &ctxlen); if (err) { if (err != -EOPNOTSUPP) goto out_err; /* No LSM is supporting this security hook. Ignore error */ ctxlen = 0; ctx = NULL; } if (ctxlen) { nr_ctx = 1; namelen = strlen(name) + 1; err = -EIO; if (WARN_ON(namelen > XATTR_NAME_MAX + 1 || ctxlen > S32_MAX)) goto out_err; total_len += FUSE_REC_ALIGN(sizeof(*fctx) + namelen + ctxlen); } err = -ENOMEM; header = ptr = kzalloc(total_len, GFP_KERNEL); if (!ptr) goto out_err; header->nr_secctx = nr_ctx; header->size = total_len; ptr += sizeof(*header); if (nr_ctx) { fctx = ptr; fctx->size = ctxlen; ptr += sizeof(*fctx); strcpy(ptr, name); ptr += namelen; memcpy(ptr, ctx, ctxlen); } ext->size = total_len; ext->value = header; err = 0; out_err: kfree(ctx); return err; } static void *extend_arg(struct fuse_in_arg *buf, u32 bytes) { void *p; u32 newlen = buf->size + bytes; p = krealloc(buf->value, newlen, GFP_KERNEL); if (!p) { kfree(buf->value); buf->size = 0; buf->value = NULL; return NULL; } memset(p + buf->size, 0, bytes); buf->value = p; buf->size = newlen; return p + newlen - bytes; } static u32 fuse_ext_size(size_t size) { return FUSE_REC_ALIGN(sizeof(struct fuse_ext_header) + size); } /* * This adds just a single supplementary group that matches the parent's group. */ static int get_create_supp_group(struct mnt_idmap *idmap, struct inode *dir, struct fuse_in_arg *ext) { struct fuse_conn *fc = get_fuse_conn(dir); struct fuse_ext_header *xh; struct fuse_supp_groups *sg; kgid_t kgid = dir->i_gid; vfsgid_t vfsgid = make_vfsgid(idmap, fc->user_ns, kgid); gid_t parent_gid = from_kgid(fc->user_ns, kgid); u32 sg_len = fuse_ext_size(sizeof(*sg) + sizeof(sg->groups[0])); if (parent_gid == (gid_t) -1 || vfsgid_eq_kgid(vfsgid, current_fsgid()) || !vfsgid_in_group_p(vfsgid)) return 0; xh = extend_arg(ext, sg_len); if (!xh) return -ENOMEM; xh->size = sg_len; xh->type = FUSE_EXT_GROUPS; sg = (struct fuse_supp_groups *) &xh[1]; sg->nr_groups = 1; sg->groups[0] = parent_gid; return 0; } static int get_create_ext(struct mnt_idmap *idmap, struct fuse_args *args, struct inode *dir, struct dentry *dentry, umode_t mode) { struct fuse_conn *fc = get_fuse_conn_super(dentry->d_sb); struct fuse_in_arg ext = { .size = 0, .value = NULL }; int err = 0; if (fc->init_security) err = get_security_context(dentry, mode, &ext); if (!err && fc->create_supp_group) err = get_create_supp_group(idmap, dir, &ext); if (!err && ext.size) { WARN_ON(args->in_numargs >= ARRAY_SIZE(args->in_args)); args->is_ext = true; args->ext_idx = args->in_numargs++; args->in_args[args->ext_idx] = ext; } else { kfree(ext.value); } return err; } static void free_ext_value(struct fuse_args *args) { if (args->is_ext) kfree(args->in_args[args->ext_idx].value); } /* * Atomic create+open operation * * If the filesystem doesn't support this, then fall back to separate * 'mknod' + 'open' requests. */ static int fuse_create_open(struct mnt_idmap *idmap, struct inode *dir, struct dentry *entry, struct file *file, unsigned int flags, umode_t mode, u32 opcode) { int err; struct inode *inode; struct fuse_mount *fm = get_fuse_mount(dir); FUSE_ARGS(args); struct fuse_forget_link *forget; struct fuse_create_in inarg; struct fuse_open_out *outopenp; struct fuse_entry_out outentry; struct fuse_inode *fi; struct fuse_file *ff; bool trunc = flags & O_TRUNC; /* Userspace expects S_IFREG in create mode */ BUG_ON((mode & S_IFMT) != S_IFREG); forget = fuse_alloc_forget(); err = -ENOMEM; if (!forget) goto out_err; err = -ENOMEM; ff = fuse_file_alloc(fm, true); if (!ff) goto out_put_forget_req; if (!fm->fc->dont_mask) mode &= ~current_umask(); flags &= ~O_NOCTTY; memset(&inarg, 0, sizeof(inarg)); memset(&outentry, 0, sizeof(outentry)); inarg.flags = flags; inarg.mode = mode; inarg.umask = current_umask(); if (fm->fc->handle_killpriv_v2 && trunc && !(flags & O_EXCL) && !capable(CAP_FSETID)) { inarg.open_flags |= FUSE_OPEN_KILL_SUIDGID; } args.opcode = opcode; args.nodeid = get_node_id(dir); args.in_numargs = 2; args.in_args[0].size = sizeof(inarg); args.in_args[0].value = &inarg; args.in_args[1].size = entry->d_name.len + 1; args.in_args[1].value = entry->d_name.name; args.out_numargs = 2; args.out_args[0].size = sizeof(outentry); args.out_args[0].value = &outentry; /* Store outarg for fuse_finish_open() */ outopenp = &ff->args->open_outarg; args.out_args[1].size = sizeof(*outopenp); args.out_args[1].value = outopenp; err = get_create_ext(idmap, &args, dir, entry, mode); if (err) goto out_free_ff; err = fuse_simple_idmap_request(idmap, fm, &args); free_ext_value(&args); if (err) goto out_free_ff; err = -EIO; if (!S_ISREG(outentry.attr.mode) || invalid_nodeid(outentry.nodeid) || fuse_invalid_attr(&outentry.attr)) goto out_free_ff; ff->fh = outopenp->fh; ff->nodeid = outentry.nodeid; ff->open_flags = outopenp->open_flags; inode = fuse_iget(dir->i_sb, outentry.nodeid, outentry.generation, &outentry.attr, ATTR_TIMEOUT(&outentry), 0, 0); if (!inode) { flags &= ~(O_CREAT | O_EXCL | O_TRUNC); fuse_sync_release(NULL, ff, flags); fuse_queue_forget(fm->fc, forget, outentry.nodeid, 1); err = -ENOMEM; goto out_err; } kfree(forget); d_instantiate(entry, inode); fuse_change_entry_timeout(entry, &outentry); fuse_dir_changed(dir); err = generic_file_open(inode, file); if (!err) { file->private_data = ff; err = finish_open(file, entry, fuse_finish_open); } if (err) { fi = get_fuse_inode(inode); fuse_sync_release(fi, ff, flags); } else { if (fm->fc->atomic_o_trunc && trunc) truncate_pagecache(inode, 0); else if (!(ff->open_flags & FOPEN_KEEP_CACHE)) invalidate_inode_pages2(inode->i_mapping); } return err; out_free_ff: fuse_file_free(ff); out_put_forget_req: kfree(forget); out_err: return err; } static int fuse_mknod(struct mnt_idmap *, struct inode *, struct dentry *, umode_t, dev_t); static int fuse_atomic_open(struct inode *dir, struct dentry *entry, struct file *file, unsigned flags, umode_t mode) { int err; struct mnt_idmap *idmap = file_mnt_idmap(file); struct fuse_conn *fc = get_fuse_conn(dir); struct dentry *res = NULL; if (fuse_is_bad(dir)) return -EIO; if (d_in_lookup(entry)) { res = fuse_lookup(dir, entry, 0); if (IS_ERR(res)) return PTR_ERR(res); if (res) entry = res; } if (!(flags & O_CREAT) || d_really_is_positive(entry)) goto no_open; /* Only creates */ file->f_mode |= FMODE_CREATED; if (fc->no_create) goto mknod; err = fuse_create_open(idmap, dir, entry, file, flags, mode, FUSE_CREATE); if (err == -ENOSYS) { fc->no_create = 1; goto mknod; } else if (err == -EEXIST) fuse_invalidate_entry(entry); out_dput: dput(res); return err; mknod: err = fuse_mknod(idmap, dir, entry, mode, 0); if (err) goto out_dput; no_open: return finish_no_open(file, res); } /* * Code shared between mknod, mkdir, symlink and link */ static int create_new_entry(struct mnt_idmap *idmap, struct fuse_mount *fm, struct fuse_args *args, struct inode *dir, struct dentry *entry, umode_t mode) { struct fuse_entry_out outarg; struct inode *inode; struct dentry *d; int err; struct fuse_forget_link *forget; if (fuse_is_bad(dir)) return -EIO; forget = fuse_alloc_forget(); if (!forget) return -ENOMEM; memset(&outarg, 0, sizeof(outarg)); args->nodeid = get_node_id(dir); args->out_numargs = 1; args->out_args[0].size = sizeof(outarg); args->out_args[0].value = &outarg; if (args->opcode != FUSE_LINK) { err = get_create_ext(idmap, args, dir, entry, mode); if (err) goto out_put_forget_req; } err = fuse_simple_idmap_request(idmap, fm, args); free_ext_value(args); if (err) goto out_put_forget_req; err = -EIO; if (invalid_nodeid(outarg.nodeid) || fuse_invalid_attr(&outarg.attr)) goto out_put_forget_req; if ((outarg.attr.mode ^ mode) & S_IFMT) goto out_put_forget_req; inode = fuse_iget(dir->i_sb, outarg.nodeid, outarg.generation, &outarg.attr, ATTR_TIMEOUT(&outarg), 0, 0); if (!inode) { fuse_queue_forget(fm->fc, forget, outarg.nodeid, 1); return -ENOMEM; } kfree(forget); d_drop(entry); d = d_splice_alias(inode, entry); if (IS_ERR(d)) return PTR_ERR(d); if (d) { fuse_change_entry_timeout(d, &outarg); dput(d); } else { fuse_change_entry_timeout(entry, &outarg); } fuse_dir_changed(dir); return 0; out_put_forget_req: if (err == -EEXIST) fuse_invalidate_entry(entry); kfree(forget); return err; } static int fuse_mknod(struct mnt_idmap *idmap, struct inode *dir, struct dentry *entry, umode_t mode, dev_t rdev) { struct fuse_mknod_in inarg; struct fuse_mount *fm = get_fuse_mount(dir); FUSE_ARGS(args); if (!fm->fc->dont_mask) mode &= ~current_umask(); memset(&inarg, 0, sizeof(inarg)); inarg.mode = mode; inarg.rdev = new_encode_dev(rdev); inarg.umask = current_umask(); args.opcode = FUSE_MKNOD; args.in_numargs = 2; args.in_args[0].size = sizeof(inarg); args.in_args[0].value = &inarg; args.in_args[1].size = entry->d_name.len + 1; args.in_args[1].value = entry->d_name.name; return create_new_entry(idmap, fm, &args, dir, entry, mode); } static int fuse_create(struct mnt_idmap *idmap, struct inode *dir, struct dentry *entry, umode_t mode, bool excl) { return fuse_mknod(idmap, dir, entry, mode, 0); } static int fuse_tmpfile(struct mnt_idmap *idmap, struct inode *dir, struct file *file, umode_t mode) { struct fuse_conn *fc = get_fuse_conn(dir); int err; if (fc->no_tmpfile) return -EOPNOTSUPP; err = fuse_create_open(idmap, dir, file->f_path.dentry, file, file->f_flags, mode, FUSE_TMPFILE); if (err == -ENOSYS) { fc->no_tmpfile = 1; err = -EOPNOTSUPP; } return err; } static int fuse_mkdir(struct mnt_idmap *idmap, struct inode *dir, struct dentry *entry, umode_t mode) { struct fuse_mkdir_in inarg; struct fuse_mount *fm = get_fuse_mount(dir); FUSE_ARGS(args); if (!fm->fc->dont_mask) mode &= ~current_umask(); memset(&inarg, 0, sizeof(inarg)); inarg.mode = mode; inarg.umask = current_umask(); args.opcode = FUSE_MKDIR; args.in_numargs = 2; args.in_args[0].size = sizeof(inarg); args.in_args[0].value = &inarg; args.in_args[1].size = entry->d_name.len + 1; args.in_args[1].value = entry->d_name.name; return create_new_entry(idmap, fm, &args, dir, entry, S_IFDIR); } static int fuse_symlink(struct mnt_idmap *idmap, struct inode *dir, struct dentry *entry, const char *link) { struct fuse_mount *fm = get_fuse_mount(dir); unsigned len = strlen(link) + 1; FUSE_ARGS(args); args.opcode = FUSE_SYMLINK; args.in_numargs = 2; args.in_args[0].size = entry->d_name.len + 1; args.in_args[0].value = entry->d_name.name; args.in_args[1].size = len; args.in_args[1].value = link; return create_new_entry(idmap, fm, &args, dir, entry, S_IFLNK); } void fuse_flush_time_update(struct inode *inode) { int err = sync_inode_metadata(inode, 1); mapping_set_error(inode->i_mapping, err); } static void fuse_update_ctime_in_cache(struct inode *inode) { if (!IS_NOCMTIME(inode)) { inode_set_ctime_current(inode); mark_inode_dirty_sync(inode); fuse_flush_time_update(inode); } } void fuse_update_ctime(struct inode *inode) { fuse_invalidate_attr_mask(inode, STATX_CTIME); fuse_update_ctime_in_cache(inode); } static void fuse_entry_unlinked(struct dentry *entry) { struct inode *inode = d_inode(entry); struct fuse_conn *fc = get_fuse_conn(inode); struct fuse_inode *fi = get_fuse_inode(inode); spin_lock(&fi->lock); fi->attr_version = atomic64_inc_return(&fc->attr_version); /* * If i_nlink == 0 then unlink doesn't make sense, yet this can * happen if userspace filesystem is careless. It would be * difficult to enforce correct nlink usage so just ignore this * condition here */ if (S_ISDIR(inode->i_mode)) clear_nlink(inode); else if (inode->i_nlink > 0) drop_nlink(inode); spin_unlock(&fi->lock); fuse_invalidate_entry_cache(entry); fuse_update_ctime(inode); } static int fuse_unlink(struct inode *dir, struct dentry *entry) { int err; struct fuse_mount *fm = get_fuse_mount(dir); FUSE_ARGS(args); if (fuse_is_bad(dir)) return -EIO; args.opcode = FUSE_UNLINK; args.nodeid = get_node_id(dir); args.in_numargs = 1; args.in_args[0].size = entry->d_name.len + 1; args.in_args[0].value = entry->d_name.name; err = fuse_simple_request(fm, &args); if (!err) { fuse_dir_changed(dir); fuse_entry_unlinked(entry); } else if (err == -EINTR || err == -ENOENT) fuse_invalidate_entry(entry); return err; } static int fuse_rmdir(struct inode *dir, struct dentry *entry) { int err; struct fuse_mount *fm = get_fuse_mount(dir); FUSE_ARGS(args); if (fuse_is_bad(dir)) return -EIO; args.opcode = FUSE_RMDIR; args.nodeid = get_node_id(dir); args.in_numargs = 1; args.in_args[0].size = entry->d_name.len + 1; args.in_args[0].value = entry->d_name.name; err = fuse_simple_request(fm, &args); if (!err) { fuse_dir_changed(dir); fuse_entry_unlinked(entry); } else if (err == -EINTR || err == -ENOENT) fuse_invalidate_entry(entry); return err; } static int fuse_rename_common(struct mnt_idmap *idmap, struct inode *olddir, struct dentry *oldent, struct inode *newdir, struct dentry *newent, unsigned int flags, int opcode, size_t argsize) { int err; struct fuse_rename2_in inarg; struct fuse_mount *fm = get_fuse_mount(olddir); FUSE_ARGS(args); memset(&inarg, 0, argsize); inarg.newdir = get_node_id(newdir); inarg.flags = flags; args.opcode = opcode; args.nodeid = get_node_id(olddir); args.in_numargs = 3; args.in_args[0].size = argsize; args.in_args[0].value = &inarg; args.in_args[1].size = oldent->d_name.len + 1; args.in_args[1].value = oldent->d_name.name; args.in_args[2].size = newent->d_name.len + 1; args.in_args[2].value = newent->d_name.name; err = fuse_simple_idmap_request(idmap, fm, &args); if (!err) { /* ctime changes */ fuse_update_ctime(d_inode(oldent)); if (flags & RENAME_EXCHANGE) fuse_update_ctime(d_inode(newent)); fuse_dir_changed(olddir); if (olddir != newdir) fuse_dir_changed(newdir); /* newent will end up negative */ if (!(flags & RENAME_EXCHANGE) && d_really_is_positive(newent)) fuse_entry_unlinked(newent); } else if (err == -EINTR || err == -ENOENT) { /* If request was interrupted, DEITY only knows if the rename actually took place. If the invalidation fails (e.g. some process has CWD under the renamed directory), then there can be inconsistency between the dcache and the real filesystem. Tough luck. */ fuse_invalidate_entry(oldent); if (d_really_is_positive(newent)) fuse_invalidate_entry(newent); } return err; } static int fuse_rename2(struct mnt_idmap *idmap, struct inode *olddir, struct dentry *oldent, struct inode *newdir, struct dentry *newent, unsigned int flags) { struct fuse_conn *fc = get_fuse_conn(olddir); int err; if (fuse_is_bad(olddir)) return -EIO; if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE | RENAME_WHITEOUT)) return -EINVAL; if (flags) { if (fc->no_rename2 || fc->minor < 23) return -EINVAL; err = fuse_rename_common((flags & RENAME_WHITEOUT) ? idmap : &invalid_mnt_idmap, olddir, oldent, newdir, newent, flags, FUSE_RENAME2, sizeof(struct fuse_rename2_in)); if (err == -ENOSYS) { fc->no_rename2 = 1; err = -EINVAL; } } else { err = fuse_rename_common(&invalid_mnt_idmap, olddir, oldent, newdir, newent, 0, FUSE_RENAME, sizeof(struct fuse_rename_in)); } return err; } static int fuse_link(struct dentry *entry, struct inode *newdir, struct dentry *newent) { int err; struct fuse_link_in inarg; struct inode *inode = d_inode(entry); struct fuse_mount *fm = get_fuse_mount(inode); FUSE_ARGS(args); memset(&inarg, 0, sizeof(inarg)); inarg.oldnodeid = get_node_id(inode); args.opcode = FUSE_LINK; args.in_numargs = 2; args.in_args[0].size = sizeof(inarg); args.in_args[0].value = &inarg; args.in_args[1].size = newent->d_name.len + 1; args.in_args[1].value = newent->d_name.name; err = create_new_entry(&invalid_mnt_idmap, fm, &args, newdir, newent, inode->i_mode); if (!err) fuse_update_ctime_in_cache(inode); else if (err == -EINTR) fuse_invalidate_attr(inode); return err; } static void fuse_fillattr(struct mnt_idmap *idmap, struct inode *inode, struct fuse_attr *attr, struct kstat *stat) { unsigned int blkbits; struct fuse_conn *fc = get_fuse_conn(inode); vfsuid_t vfsuid = make_vfsuid(idmap, fc->user_ns, make_kuid(fc->user_ns, attr->uid)); vfsgid_t vfsgid = make_vfsgid(idmap, fc->user_ns, make_kgid(fc->user_ns, attr->gid)); stat->dev = inode->i_sb->s_dev; stat->ino = attr->ino; stat->mode = (inode->i_mode & S_IFMT) | (attr->mode & 07777); stat->nlink = attr->nlink; stat->uid = vfsuid_into_kuid(vfsuid); stat->gid = vfsgid_into_kgid(vfsgid); stat->rdev = inode->i_rdev; stat->atime.tv_sec = attr->atime; stat->atime.tv_nsec = attr->atimensec; stat->mtime.tv_sec = attr->mtime; stat->mtime.tv_nsec = attr->mtimensec; stat->ctime.tv_sec = attr->ctime; stat->ctime.tv_nsec = attr->ctimensec; stat->size = attr->size; stat->blocks = attr->blocks; if (attr->blksize != 0) blkbits = ilog2(attr->blksize); else blkbits = inode->i_sb->s_blocksize_bits; stat->blksize = 1 << blkbits; } static void fuse_statx_to_attr(struct fuse_statx *sx, struct fuse_attr *attr) { memset(attr, 0, sizeof(*attr)); attr->ino = sx->ino; attr->size = sx->size; attr->blocks = sx->blocks; attr->atime = sx->atime.tv_sec; attr->mtime = sx->mtime.tv_sec; attr->ctime = sx->ctime.tv_sec; attr->atimensec = sx->atime.tv_nsec; attr->mtimensec = sx->mtime.tv_nsec; attr->ctimensec = sx->ctime.tv_nsec; attr->mode = sx->mode; attr->nlink = sx->nlink; attr->uid = sx->uid; attr->gid = sx->gid; attr->rdev = new_encode_dev(MKDEV(sx->rdev_major, sx->rdev_minor)); attr->blksize = sx->blksize; } static int fuse_do_statx(struct mnt_idmap *idmap, struct inode *inode, struct file *file, struct kstat *stat) { int err; struct fuse_attr attr; struct fuse_statx *sx; struct fuse_statx_in inarg; struct fuse_statx_out outarg; struct fuse_mount *fm = get_fuse_mount(inode); u64 attr_version = fuse_get_attr_version(fm->fc); FUSE_ARGS(args); memset(&inarg, 0, sizeof(inarg)); memset(&outarg, 0, sizeof(outarg)); /* Directories have separate file-handle space */ if (file && S_ISREG(inode->i_mode)) { struct fuse_file *ff = file->private_data; inarg.getattr_flags |= FUSE_GETATTR_FH; inarg.fh = ff->fh; } /* For now leave sync hints as the default, request all stats. */ inarg.sx_flags = 0; inarg.sx_mask = STATX_BASIC_STATS | STATX_BTIME; args.opcode = FUSE_STATX; args.nodeid = get_node_id(inode); args.in_numargs = 1; args.in_args[0].size = sizeof(inarg); args.in_args[0].value = &inarg; args.out_numargs = 1; args.out_args[0].size = sizeof(outarg); args.out_args[0].value = &outarg; err = fuse_simple_request(fm, &args); if (err) return err; sx = &outarg.stat; if (((sx->mask & STATX_SIZE) && !fuse_valid_size(sx->size)) || ((sx->mask & STATX_TYPE) && (!fuse_valid_type(sx->mode) || inode_wrong_type(inode, sx->mode)))) { fuse_make_bad(inode); return -EIO; } fuse_statx_to_attr(&outarg.stat, &attr); if ((sx->mask & STATX_BASIC_STATS) == STATX_BASIC_STATS) { fuse_change_attributes(inode, &attr, &outarg.stat, ATTR_TIMEOUT(&outarg), attr_version); } if (stat) { stat->result_mask = sx->mask & (STATX_BASIC_STATS | STATX_BTIME); stat->btime.tv_sec = sx->btime.tv_sec; stat->btime.tv_nsec = min_t(u32, sx->btime.tv_nsec, NSEC_PER_SEC - 1); fuse_fillattr(idmap, inode, &attr, stat); stat->result_mask |= STATX_TYPE; } return 0; } static int fuse_do_getattr(struct mnt_idmap *idmap, struct inode *inode, struct kstat *stat, struct file *file) { int err; struct fuse_getattr_in inarg; struct fuse_attr_out outarg; struct fuse_mount *fm = get_fuse_mount(inode); FUSE_ARGS(args); u64 attr_version; attr_version = fuse_get_attr_version(fm->fc); memset(&inarg, 0, sizeof(inarg)); memset(&outarg, 0, sizeof(outarg)); /* Directories have separate file-handle space */ if (file && S_ISREG(inode->i_mode)) { struct fuse_file *ff = file->private_data; inarg.getattr_flags |= FUSE_GETATTR_FH; inarg.fh = ff->fh; } args.opcode = FUSE_GETATTR; args.nodeid = get_node_id(inode); args.in_numargs = 1; args.in_args[0].size = sizeof(inarg); args.in_args[0].value = &inarg; args.out_numargs = 1; args.out_args[0].size = sizeof(outarg); args.out_args[0].value = &outarg; err = fuse_simple_request(fm, &args); if (!err) { if (fuse_invalid_attr(&outarg.attr) || inode_wrong_type(inode, outarg.attr.mode)) { fuse_make_bad(inode); err = -EIO; } else { fuse_change_attributes(inode, &outarg.attr, NULL, ATTR_TIMEOUT(&outarg), attr_version); if (stat) fuse_fillattr(idmap, inode, &outarg.attr, stat); } } return err; } static int fuse_update_get_attr(struct mnt_idmap *idmap, struct inode *inode, struct file *file, struct kstat *stat, u32 request_mask, unsigned int flags) { struct fuse_inode *fi = get_fuse_inode(inode); struct fuse_conn *fc = get_fuse_conn(inode); int err = 0; bool sync; u32 inval_mask = READ_ONCE(fi->inval_mask); u32 cache_mask = fuse_get_cache_mask(inode); /* FUSE only supports basic stats and possibly btime */ request_mask &= STATX_BASIC_STATS | STATX_BTIME; retry: if (fc->no_statx) request_mask &= STATX_BASIC_STATS; if (!request_mask) sync = false; else if (flags & AT_STATX_FORCE_SYNC) sync = true; else if (flags & AT_STATX_DONT_SYNC) sync = false; else if (request_mask & inval_mask & ~cache_mask) sync = true; else sync = time_before64(fi->i_time, get_jiffies_64()); if (sync) { forget_all_cached_acls(inode); /* Try statx if BTIME is requested */ if (!fc->no_statx && (request_mask & ~STATX_BASIC_STATS)) { err = fuse_do_statx(idmap, inode, file, stat); if (err == -ENOSYS) { fc->no_statx = 1; err = 0; goto retry; } } else { err = fuse_do_getattr(idmap, inode, stat, file); } } else if (stat) { generic_fillattr(idmap, request_mask, inode, stat); stat->mode = fi->orig_i_mode; stat->ino = fi->orig_ino; if (test_bit(FUSE_I_BTIME, &fi->state)) { stat->btime = fi->i_btime; stat->result_mask |= STATX_BTIME; } } return err; } int fuse_update_attributes(struct inode *inode, struct file *file, u32 mask) { return fuse_update_get_attr(&nop_mnt_idmap, inode, file, NULL, mask, 0); } int fuse_reverse_inval_entry(struct fuse_conn *fc, u64 parent_nodeid, u64 child_nodeid, struct qstr *name, u32 flags) { int err = -ENOTDIR; struct inode *parent; struct dentry *dir; struct dentry *entry; parent = fuse_ilookup(fc, parent_nodeid, NULL); if (!parent) return -ENOENT; inode_lock_nested(parent, I_MUTEX_PARENT); if (!S_ISDIR(parent->i_mode)) goto unlock; err = -ENOENT; dir = d_find_alias(parent); if (!dir) goto unlock; name->hash = full_name_hash(dir, name->name, name->len); entry = d_lookup(dir, name); dput(dir); if (!entry) goto unlock; fuse_dir_changed(parent); if (!(flags & FUSE_EXPIRE_ONLY)) d_invalidate(entry); fuse_invalidate_entry_cache(entry); if (child_nodeid != 0 && d_really_is_positive(entry)) { inode_lock(d_inode(entry)); if (get_node_id(d_inode(entry)) != child_nodeid) { err = -ENOENT; goto badentry; } if (d_mountpoint(entry)) { err = -EBUSY; goto badentry; } if (d_is_dir(entry)) { shrink_dcache_parent(entry); if (!simple_empty(entry)) { err = -ENOTEMPTY; goto badentry; } d_inode(entry)->i_flags |= S_DEAD; } dont_mount(entry); clear_nlink(d_inode(entry)); err = 0; badentry: inode_unlock(d_inode(entry)); if (!err) d_delete(entry); } else { err = 0; } dput(entry); unlock: inode_unlock(parent); iput(parent); return err; } static inline bool fuse_permissible_uidgid(struct fuse_conn *fc) { const struct cred *cred = current_cred(); return (uid_eq(cred->euid, fc->user_id) && uid_eq(cred->suid, fc->user_id) && uid_eq(cred->uid, fc->user_id) && gid_eq(cred->egid, fc->group_id) && gid_eq(cred->sgid, fc->group_id) && gid_eq(cred->gid, fc->group_id)); } /* * Calling into a user-controlled filesystem gives the filesystem * daemon ptrace-like capabilities over the current process. This * means, that the filesystem daemon is able to record the exact * filesystem operations performed, and can also control the behavior * of the requester process in otherwise impossible ways. For example * it can delay the operation for arbitrary length of time allowing * DoS against the requester. * * For this reason only those processes can call into the filesystem, * for which the owner of the mount has ptrace privilege. This * excludes processes started by other users, suid or sgid processes. */ bool fuse_allow_current_process(struct fuse_conn *fc) { bool allow; if (fc->allow_other) allow = current_in_userns(fc->user_ns); else allow = fuse_permissible_uidgid(fc); if (!allow && allow_sys_admin_access && capable(CAP_SYS_ADMIN)) allow = true; return allow; } static int fuse_access(struct inode *inode, int mask) { struct fuse_mount *fm = get_fuse_mount(inode); FUSE_ARGS(args); struct fuse_access_in inarg; int err; BUG_ON(mask & MAY_NOT_BLOCK); /* * We should not send FUSE_ACCESS to the userspace * when idmapped mounts are enabled as for this case * we have fc->default_permissions = 1 and access * permission checks are done on the kernel side. */ WARN_ON_ONCE(!(fm->sb->s_iflags & SB_I_NOIDMAP)); if (fm->fc->no_access) return 0; memset(&inarg, 0, sizeof(inarg)); inarg.mask = mask & (MAY_READ | MAY_WRITE | MAY_EXEC); args.opcode = FUSE_ACCESS; args.nodeid = get_node_id(inode); args.in_numargs = 1; args.in_args[0].size = sizeof(inarg); args.in_args[0].value = &inarg; err = fuse_simple_request(fm, &args); if (err == -ENOSYS) { fm->fc->no_access = 1; err = 0; } return err; } static int fuse_perm_getattr(struct inode *inode, int mask) { if (mask & MAY_NOT_BLOCK) return -ECHILD; forget_all_cached_acls(inode); return fuse_do_getattr(&nop_mnt_idmap, inode, NULL, NULL); } /* * Check permission. The two basic access models of FUSE are: * * 1) Local access checking ('default_permissions' mount option) based * on file mode. This is the plain old disk filesystem permission * model. * * 2) "Remote" access checking, where server is responsible for * checking permission in each inode operation. An exception to this * is if ->permission() was invoked from sys_access() in which case an * access request is sent. Execute permission is still checked * locally based on file mode. */ static int fuse_permission(struct mnt_idmap *idmap, struct inode *inode, int mask) { struct fuse_conn *fc = get_fuse_conn(inode); bool refreshed = false; int err = 0; if (fuse_is_bad(inode)) return -EIO; if (!fuse_allow_current_process(fc)) return -EACCES; /* * If attributes are needed, refresh them before proceeding */ if (fc->default_permissions || ((mask & MAY_EXEC) && S_ISREG(inode->i_mode))) { struct fuse_inode *fi = get_fuse_inode(inode); u32 perm_mask = STATX_MODE | STATX_UID | STATX_GID; if (perm_mask & READ_ONCE(fi->inval_mask) || time_before64(fi->i_time, get_jiffies_64())) { refreshed = true; err = fuse_perm_getattr(inode, mask); if (err) return err; } } if (fc->default_permissions) { err = generic_permission(idmap, inode, mask); /* If permission is denied, try to refresh file attributes. This is also needed, because the root node will at first have no permissions */ if (err == -EACCES && !refreshed) { err = fuse_perm_getattr(inode, mask); if (!err) err = generic_permission(idmap, inode, mask); } /* Note: the opposite of the above test does not exist. So if permissions are revoked this won't be noticed immediately, only after the attribute timeout has expired */ } else if (mask & (MAY_ACCESS | MAY_CHDIR)) { err = fuse_access(inode, mask); } else if ((mask & MAY_EXEC) && S_ISREG(inode->i_mode)) { if (!(inode->i_mode & S_IXUGO)) { if (refreshed) return -EACCES; err = fuse_perm_getattr(inode, mask); if (!err && !(inode->i_mode & S_IXUGO)) return -EACCES; } } return err; } static int fuse_readlink_page(struct inode *inode, struct folio *folio) { struct fuse_mount *fm = get_fuse_mount(inode); struct fuse_folio_desc desc = { .length = PAGE_SIZE - 1 }; struct fuse_args_pages ap = { .num_folios = 1, .folios = &folio, .descs = &desc, }; char *link; ssize_t res; ap.args.opcode = FUSE_READLINK; ap.args.nodeid = get_node_id(inode); ap.args.out_pages = true; ap.args.out_argvar = true; ap.args.page_zeroing = true; ap.args.out_numargs = 1; ap.args.out_args[0].size = desc.length; res = fuse_simple_request(fm, &ap.args); fuse_invalidate_atime(inode); if (res < 0) return res; if (WARN_ON(res >= PAGE_SIZE)) return -EIO; link = folio_address(folio); link[res] = '\0'; return 0; } static const char *fuse_get_link(struct dentry *dentry, struct inode *inode, struct delayed_call *callback) { struct fuse_conn *fc = get_fuse_conn(inode); struct folio *folio; int err; err = -EIO; if (fuse_is_bad(inode)) goto out_err; if (fc->cache_symlinks) return page_get_link(dentry, inode, callback); err = -ECHILD; if (!dentry) goto out_err; folio = folio_alloc(GFP_KERNEL, 0); err = -ENOMEM; if (!folio) goto out_err; err = fuse_readlink_page(inode, folio); if (err) { folio_put(folio); goto out_err; } set_delayed_call(callback, page_put_link, &folio->page); return folio_address(folio); out_err: return ERR_PTR(err); } static int fuse_dir_open(struct inode *inode, struct file *file) { struct fuse_mount *fm = get_fuse_mount(inode); int err; if (fuse_is_bad(inode)) return -EIO; err = generic_file_open(inode, file); if (err) return err; err = fuse_do_open(fm, get_node_id(inode), file, true); if (!err) { struct fuse_file *ff = file->private_data; /* * Keep handling FOPEN_STREAM and FOPEN_NONSEEKABLE for * directories for backward compatibility, though it's unlikely * to be useful. */ if (ff->open_flags & (FOPEN_STREAM | FOPEN_NONSEEKABLE)) nonseekable_open(inode, file); } return err; } static int fuse_dir_release(struct inode *inode, struct file *file) { fuse_release_common(file, true); return 0; } static int fuse_dir_fsync(struct file *file, loff_t start, loff_t end, int datasync) { struct inode *inode = file->f_mapping->host; struct fuse_conn *fc = get_fuse_conn(inode); int err; if (fuse_is_bad(inode)) return -EIO; if (fc->no_fsyncdir) return 0; inode_lock(inode); err = fuse_fsync_common(file, start, end, datasync, FUSE_FSYNCDIR); if (err == -ENOSYS) { fc->no_fsyncdir = 1; err = 0; } inode_unlock(inode); return err; } static long fuse_dir_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct fuse_conn *fc = get_fuse_conn(file->f_mapping->host); /* FUSE_IOCTL_DIR only supported for API version >= 7.18 */ if (fc->minor < 18) return -ENOTTY; return fuse_ioctl_common(file, cmd, arg, FUSE_IOCTL_DIR); } static long fuse_dir_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct fuse_conn *fc = get_fuse_conn(file->f_mapping->host); if (fc->minor < 18) return -ENOTTY; return fuse_ioctl_common(file, cmd, arg, FUSE_IOCTL_COMPAT | FUSE_IOCTL_DIR); } static bool update_mtime(unsigned ivalid, bool trust_local_mtime) { /* Always update if mtime is explicitly set */ if (ivalid & ATTR_MTIME_SET) return true; /* Or if kernel i_mtime is the official one */ if (trust_local_mtime) return true; /* If it's an open(O_TRUNC) or an ftruncate(), don't update */ if ((ivalid & ATTR_SIZE) && (ivalid & (ATTR_OPEN | ATTR_FILE))) return false; /* In all other cases update */ return true; } static void iattr_to_fattr(struct mnt_idmap *idmap, struct fuse_conn *fc, struct iattr *iattr, struct fuse_setattr_in *arg, bool trust_local_cmtime) { unsigned ivalid = iattr->ia_valid; if (ivalid & ATTR_MODE) arg->valid |= FATTR_MODE, arg->mode = iattr->ia_mode; if (ivalid & ATTR_UID) { kuid_t fsuid = from_vfsuid(idmap, fc->user_ns, iattr->ia_vfsuid); arg->valid |= FATTR_UID; arg->uid = from_kuid(fc->user_ns, fsuid); } if (ivalid & ATTR_GID) { kgid_t fsgid = from_vfsgid(idmap, fc->user_ns, iattr->ia_vfsgid); arg->valid |= FATTR_GID; arg->gid = from_kgid(fc->user_ns, fsgid); } if (ivalid & ATTR_SIZE) arg->valid |= FATTR_SIZE, arg->size = iattr->ia_size; if (ivalid & ATTR_ATIME) { arg->valid |= FATTR_ATIME; arg->atime = iattr->ia_atime.tv_sec; arg->atimensec = iattr->ia_atime.tv_nsec; if (!(ivalid & ATTR_ATIME_SET)) arg->valid |= FATTR_ATIME_NOW; } if ((ivalid & ATTR_MTIME) && update_mtime(ivalid, trust_local_cmtime)) { arg->valid |= FATTR_MTIME; arg->mtime = iattr->ia_mtime.tv_sec; arg->mtimensec = iattr->ia_mtime.tv_nsec; if (!(ivalid & ATTR_MTIME_SET) && !trust_local_cmtime) arg->valid |= FATTR_MTIME_NOW; } if ((ivalid & ATTR_CTIME) && trust_local_cmtime) { arg->valid |= FATTR_CTIME; arg->ctime = iattr->ia_ctime.tv_sec; arg->ctimensec = iattr->ia_ctime.tv_nsec; } } /* * Prevent concurrent writepages on inode * * This is done by adding a negative bias to the inode write counter * and waiting for all pending writes to finish. */ void fuse_set_nowrite(struct inode *inode) { struct fuse_inode *fi = get_fuse_inode(inode); BUG_ON(!inode_is_locked(inode)); spin_lock(&fi->lock); BUG_ON(fi->writectr < 0); fi->writectr += FUSE_NOWRITE; spin_unlock(&fi->lock); wait_event(fi->page_waitq, fi->writectr == FUSE_NOWRITE); } /* * Allow writepages on inode * * Remove the bias from the writecounter and send any queued * writepages. */ static void __fuse_release_nowrite(struct inode *inode) { struct fuse_inode *fi = get_fuse_inode(inode); BUG_ON(fi->writectr != FUSE_NOWRITE); fi->writectr = 0; fuse_flush_writepages(inode); } void fuse_release_nowrite(struct inode *inode) { struct fuse_inode *fi = get_fuse_inode(inode); spin_lock(&fi->lock); __fuse_release_nowrite(inode); spin_unlock(&fi->lock); } static void fuse_setattr_fill(struct fuse_conn *fc, struct fuse_args *args, struct inode *inode, struct fuse_setattr_in *inarg_p, struct fuse_attr_out *outarg_p) { args->opcode = FUSE_SETATTR; args->nodeid = get_node_id(inode); args->in_numargs = 1; args->in_args[0].size = sizeof(*inarg_p); args->in_args[0].value = inarg_p; args->out_numargs = 1; args->out_args[0].size = sizeof(*outarg_p); args->out_args[0].value = outarg_p; } /* * Flush inode->i_mtime to the server */ int fuse_flush_times(struct inode *inode, struct fuse_file *ff) { struct fuse_mount *fm = get_fuse_mount(inode); FUSE_ARGS(args); struct fuse_setattr_in inarg; struct fuse_attr_out outarg; memset(&inarg, 0, sizeof(inarg)); memset(&outarg, 0, sizeof(outarg)); inarg.valid = FATTR_MTIME; inarg.mtime = inode_get_mtime_sec(inode); inarg.mtimensec = inode_get_mtime_nsec(inode); if (fm->fc->minor >= 23) { inarg.valid |= FATTR_CTIME; inarg.ctime = inode_get_ctime_sec(inode); inarg.ctimensec = inode_get_ctime_nsec(inode); } if (ff) { inarg.valid |= FATTR_FH; inarg.fh = ff->fh; } fuse_setattr_fill(fm->fc, &args, inode, &inarg, &outarg); return fuse_simple_request(fm, &args); } /* * Set attributes, and at the same time refresh them. * * Truncation is slightly complicated, because the 'truncate' request * may fail, in which case we don't want to touch the mapping. * vmtruncate() doesn't allow for this case, so do the rlimit checking * and the actual truncation by hand. */ int fuse_do_setattr(struct mnt_idmap *idmap, struct dentry *dentry, struct iattr *attr, struct file *file) { struct inode *inode = d_inode(dentry); struct fuse_mount *fm = get_fuse_mount(inode); struct fuse_conn *fc = fm->fc; struct fuse_inode *fi = get_fuse_inode(inode); struct address_space *mapping = inode->i_mapping; FUSE_ARGS(args); struct fuse_setattr_in inarg; struct fuse_attr_out outarg; bool is_truncate = false; bool is_wb = fc->writeback_cache && S_ISREG(inode->i_mode); loff_t oldsize; int err; bool trust_local_cmtime = is_wb; bool fault_blocked = false; if (!fc->default_permissions) attr->ia_valid |= ATTR_FORCE; err = setattr_prepare(idmap, dentry, attr); if (err) return err; if (attr->ia_valid & ATTR_SIZE) { if (WARN_ON(!S_ISREG(inode->i_mode))) return -EIO; is_truncate = true; } if (FUSE_IS_DAX(inode) && is_truncate) { filemap_invalidate_lock(mapping); fault_blocked = true; err = fuse_dax_break_layouts(inode, 0, 0); if (err) { filemap_invalidate_unlock(mapping); return err; } } if (attr->ia_valid & ATTR_OPEN) { /* This is coming from open(..., ... | O_TRUNC); */ WARN_ON(!(attr->ia_valid & ATTR_SIZE)); WARN_ON(attr->ia_size != 0); if (fc->atomic_o_trunc) { /* * No need to send request to userspace, since actual * truncation has already been done by OPEN. But still * need to truncate page cache. */ i_size_write(inode, 0); truncate_pagecache(inode, 0); goto out; } file = NULL; } /* Flush dirty data/metadata before non-truncate SETATTR */ if (is_wb && attr->ia_valid & (ATTR_MODE | ATTR_UID | ATTR_GID | ATTR_MTIME_SET | ATTR_TIMES_SET)) { err = write_inode_now(inode, true); if (err) return err; fuse_set_nowrite(inode); fuse_release_nowrite(inode); } if (is_truncate) { fuse_set_nowrite(inode); set_bit(FUSE_I_SIZE_UNSTABLE, &fi->state); if (trust_local_cmtime && attr->ia_size != inode->i_size) attr->ia_valid |= ATTR_MTIME | ATTR_CTIME; } memset(&inarg, 0, sizeof(inarg)); memset(&outarg, 0, sizeof(outarg)); iattr_to_fattr(idmap, fc, attr, &inarg, trust_local_cmtime); if (file) { struct fuse_file *ff = file->private_data; inarg.valid |= FATTR_FH; inarg.fh = ff->fh; } /* Kill suid/sgid for non-directory chown unconditionally */ if (fc->handle_killpriv_v2 && !S_ISDIR(inode->i_mode) && attr->ia_valid & (ATTR_UID | ATTR_GID)) inarg.valid |= FATTR_KILL_SUIDGID; if (attr->ia_valid & ATTR_SIZE) { /* For mandatory locking in truncate */ inarg.valid |= FATTR_LOCKOWNER; inarg.lock_owner = fuse_lock_owner_id(fc, current->files); /* Kill suid/sgid for truncate only if no CAP_FSETID */ if (fc->handle_killpriv_v2 && !capable(CAP_FSETID)) inarg.valid |= FATTR_KILL_SUIDGID; } fuse_setattr_fill(fc, &args, inode, &inarg, &outarg); err = fuse_simple_request(fm, &args); if (err) { if (err == -EINTR) fuse_invalidate_attr(inode); goto error; } if (fuse_invalid_attr(&outarg.attr) || inode_wrong_type(inode, outarg.attr.mode)) { fuse_make_bad(inode); err = -EIO; goto error; } spin_lock(&fi->lock); /* the kernel maintains i_mtime locally */ if (trust_local_cmtime) { if (attr->ia_valid & ATTR_MTIME) inode_set_mtime_to_ts(inode, attr->ia_mtime); if (attr->ia_valid & ATTR_CTIME) inode_set_ctime_to_ts(inode, attr->ia_ctime); /* FIXME: clear I_DIRTY_SYNC? */ } fuse_change_attributes_common(inode, &outarg.attr, NULL, ATTR_TIMEOUT(&outarg), fuse_get_cache_mask(inode), 0); oldsize = inode->i_size; /* see the comment in fuse_change_attributes() */ if (!is_wb || is_truncate) i_size_write(inode, outarg.attr.size); if (is_truncate) { /* NOTE: this may release/reacquire fi->lock */ __fuse_release_nowrite(inode); } spin_unlock(&fi->lock); /* * Only call invalidate_inode_pages2() after removing * FUSE_NOWRITE, otherwise fuse_launder_folio() would deadlock. */ if ((is_truncate || !is_wb) && S_ISREG(inode->i_mode) && oldsize != outarg.attr.size) { truncate_pagecache(inode, outarg.attr.size); invalidate_inode_pages2(mapping); } clear_bit(FUSE_I_SIZE_UNSTABLE, &fi->state); out: if (fault_blocked) filemap_invalidate_unlock(mapping); return 0; error: if (is_truncate) fuse_release_nowrite(inode); clear_bit(FUSE_I_SIZE_UNSTABLE, &fi->state); if (fault_blocked) filemap_invalidate_unlock(mapping); return err; } static int fuse_setattr(struct mnt_idmap *idmap, struct dentry *entry, struct iattr *attr) { struct inode *inode = d_inode(entry); struct fuse_conn *fc = get_fuse_conn(inode); struct file *file = (attr->ia_valid & ATTR_FILE) ? attr->ia_file : NULL; int ret; if (fuse_is_bad(inode)) return -EIO; if (!fuse_allow_current_process(get_fuse_conn(inode))) return -EACCES; if (attr->ia_valid & (ATTR_KILL_SUID | ATTR_KILL_SGID)) { attr->ia_valid &= ~(ATTR_KILL_SUID | ATTR_KILL_SGID | ATTR_MODE); /* * The only sane way to reliably kill suid/sgid is to do it in * the userspace filesystem * * This should be done on write(), truncate() and chown(). */ if (!fc->handle_killpriv && !fc->handle_killpriv_v2) { /* * ia_mode calculation may have used stale i_mode. * Refresh and recalculate. */ ret = fuse_do_getattr(idmap, inode, NULL, file); if (ret) return ret; attr->ia_mode = inode->i_mode; if (inode->i_mode & S_ISUID) { attr->ia_valid |= ATTR_MODE; attr->ia_mode &= ~S_ISUID; } if ((inode->i_mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) { attr->ia_valid |= ATTR_MODE; attr->ia_mode &= ~S_ISGID; } } } if (!attr->ia_valid) return 0; ret = fuse_do_setattr(idmap, entry, attr, file); if (!ret) { /* * If filesystem supports acls it may have updated acl xattrs in * the filesystem, so forget cached acls for the inode. */ if (fc->posix_acl) forget_all_cached_acls(inode); /* Directory mode changed, may need to revalidate access */ if (d_is_dir(entry) && (attr->ia_valid & ATTR_MODE)) fuse_invalidate_entry_cache(entry); } return ret; } static int fuse_getattr(struct mnt_idmap *idmap, const struct path *path, struct kstat *stat, u32 request_mask, unsigned int flags) { struct inode *inode = d_inode(path->dentry); struct fuse_conn *fc = get_fuse_conn(inode); if (fuse_is_bad(inode)) return -EIO; if (!fuse_allow_current_process(fc)) { if (!request_mask) { /* * If user explicitly requested *nothing* then don't * error out, but return st_dev only. */ stat->result_mask = 0; stat->dev = inode->i_sb->s_dev; return 0; } return -EACCES; } return fuse_update_get_attr(idmap, inode, NULL, stat, request_mask, flags); } static const struct inode_operations fuse_dir_inode_operations = { .lookup = fuse_lookup, .mkdir = fuse_mkdir, .symlink = fuse_symlink, .unlink = fuse_unlink, .rmdir = fuse_rmdir, .rename = fuse_rename2, .link = fuse_link, .setattr = fuse_setattr, .create = fuse_create, .atomic_open = fuse_atomic_open, .tmpfile = fuse_tmpfile, .mknod = fuse_mknod, .permission = fuse_permission, .getattr = fuse_getattr, .listxattr = fuse_listxattr, .get_inode_acl = fuse_get_inode_acl, .get_acl = fuse_get_acl, .set_acl = fuse_set_acl, .fileattr_get = fuse_fileattr_get, .fileattr_set = fuse_fileattr_set, }; static const struct file_operations fuse_dir_operations = { .llseek = generic_file_llseek, .read = generic_read_dir, .iterate_shared = fuse_readdir, .open = fuse_dir_open, .release = fuse_dir_release, .fsync = fuse_dir_fsync, .unlocked_ioctl = fuse_dir_ioctl, .compat_ioctl = fuse_dir_compat_ioctl, }; static const struct inode_operations fuse_common_inode_operations = { .setattr = fuse_setattr, .permission = fuse_permission, .getattr = fuse_getattr, .listxattr = fuse_listxattr, .get_inode_acl = fuse_get_inode_acl, .get_acl = fuse_get_acl, .set_acl = fuse_set_acl, .fileattr_get = fuse_fileattr_get, .fileattr_set = fuse_fileattr_set, }; static const struct inode_operations fuse_symlink_inode_operations = { .setattr = fuse_setattr, .get_link = fuse_get_link, .getattr = fuse_getattr, .listxattr = fuse_listxattr, }; void fuse_init_common(struct inode *inode) { inode->i_op = &fuse_common_inode_operations; } void fuse_init_dir(struct inode *inode) { struct fuse_inode *fi = get_fuse_inode(inode); inode->i_op = &fuse_dir_inode_operations; inode->i_fop = &fuse_dir_operations; spin_lock_init(&fi->rdc.lock); fi->rdc.cached = false; fi->rdc.size = 0; fi->rdc.pos = 0; fi->rdc.version = 0; } static int fuse_symlink_read_folio(struct file *null, struct folio *folio) { int err = fuse_readlink_page(folio->mapping->host, folio); if (!err) folio_mark_uptodate(folio); folio_unlock(folio); return err; } static const struct address_space_operations fuse_symlink_aops = { .read_folio = fuse_symlink_read_folio, }; void fuse_init_symlink(struct inode *inode) { inode->i_op = &fuse_symlink_inode_operations; inode->i_data.a_ops = &fuse_symlink_aops; inode_nohighmem(inode); }
13 13 8 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 // SPDX-License-Identifier: GPL-2.0-only /* * The NFC Controller Interface is the communication protocol between an * NFC Controller (NFCC) and a Device Host (DH). * This is the HCI over NCI implementation, as specified in the 10.2 * section of the NCI 1.1 specification. * * Copyright (C) 2014 STMicroelectronics SAS. All rights reserved. */ #include <linux/skbuff.h> #include "../nfc.h" #include <net/nfc/nci.h> #include <net/nfc/nci_core.h> #include <linux/nfc.h> #include <linux/kcov.h> struct nci_data { u8 conn_id; u8 pipe; u8 cmd; const u8 *data; u32 data_len; } __packed; struct nci_hci_create_pipe_params { u8 src_gate; u8 dest_host; u8 dest_gate; } __packed; struct nci_hci_create_pipe_resp { u8 src_host; u8 src_gate; u8 dest_host; u8 dest_gate; u8 pipe; } __packed; struct nci_hci_delete_pipe_noti { u8 pipe; } __packed; struct nci_hci_all_pipe_cleared_noti { u8 host; } __packed; struct nci_hcp_message { u8 header; /* type -cmd,evt,rsp- + instruction */ u8 data[]; } __packed; struct nci_hcp_packet { u8 header; /* cbit+pipe */ struct nci_hcp_message message; } __packed; #define NCI_HCI_ANY_SET_PARAMETER 0x01 #define NCI_HCI_ANY_GET_PARAMETER 0x02 #define NCI_HCI_ANY_CLOSE_PIPE 0x04 #define NCI_HCI_ADM_CLEAR_ALL_PIPE 0x14 #define NCI_HFP_NO_CHAINING 0x80 #define NCI_NFCEE_ID_HCI 0x80 #define NCI_EVT_HOT_PLUG 0x03 #define NCI_HCI_ADMIN_PARAM_SESSION_IDENTITY 0x01 #define NCI_HCI_ADM_CREATE_PIPE 0x10 #define NCI_HCI_ADM_DELETE_PIPE 0x11 /* HCP headers */ #define NCI_HCI_HCP_PACKET_HEADER_LEN 1 #define NCI_HCI_HCP_MESSAGE_HEADER_LEN 1 #define NCI_HCI_HCP_HEADER_LEN 2 /* HCP types */ #define NCI_HCI_HCP_COMMAND 0x00 #define NCI_HCI_HCP_EVENT 0x01 #define NCI_HCI_HCP_RESPONSE 0x02 #define NCI_HCI_ADM_NOTIFY_PIPE_CREATED 0x12 #define NCI_HCI_ADM_NOTIFY_PIPE_DELETED 0x13 #define NCI_HCI_ADM_NOTIFY_ALL_PIPE_CLEARED 0x15 #define NCI_HCI_FRAGMENT 0x7f #define NCI_HCP_HEADER(type, instr) ((((type) & 0x03) << 6) |\ ((instr) & 0x3f)) #define NCI_HCP_MSG_GET_TYPE(header) ((header & 0xc0) >> 6) #define NCI_HCP_MSG_GET_CMD(header) (header & 0x3f) #define NCI_HCP_MSG_GET_PIPE(header) (header & 0x7f) static int nci_hci_result_to_errno(u8 result) { switch (result) { case NCI_HCI_ANY_OK: return 0; case NCI_HCI_ANY_E_REG_PAR_UNKNOWN: return -EOPNOTSUPP; case NCI_HCI_ANY_E_TIMEOUT: return -ETIME; default: return -1; } } /* HCI core */ static void nci_hci_reset_pipes(struct nci_hci_dev *hdev) { int i; for (i = 0; i < NCI_HCI_MAX_PIPES; i++) { hdev->pipes[i].gate = NCI_HCI_INVALID_GATE; hdev->pipes[i].host = NCI_HCI_INVALID_HOST; } memset(hdev->gate2pipe, NCI_HCI_INVALID_PIPE, sizeof(hdev->gate2pipe)); } static void nci_hci_reset_pipes_per_host(struct nci_dev *ndev, u8 host) { int i; for (i = 0; i < NCI_HCI_MAX_PIPES; i++) { if (ndev->hci_dev->pipes[i].host == host) { ndev->hci_dev->pipes[i].gate = NCI_HCI_INVALID_GATE; ndev->hci_dev->pipes[i].host = NCI_HCI_INVALID_HOST; } } } /* Fragment HCI data over NCI packet. * NFC Forum NCI 10.2.2 Data Exchange: * The payload of the Data Packets sent on the Logical Connection SHALL be * valid HCP packets, as defined within [ETSI_102622]. Each Data Packet SHALL * contain a single HCP packet. NCI Segmentation and Reassembly SHALL NOT be * applied to Data Messages in either direction. The HCI fragmentation mechanism * is used if required. */ static int nci_hci_send_data(struct nci_dev *ndev, u8 pipe, const u8 data_type, const u8 *data, size_t data_len) { const struct nci_conn_info *conn_info; struct sk_buff *skb; int len, i, r; u8 cb = pipe; conn_info = ndev->hci_dev->conn_info; if (!conn_info) return -EPROTO; i = 0; skb = nci_skb_alloc(ndev, conn_info->max_pkt_payload_len + NCI_DATA_HDR_SIZE, GFP_ATOMIC); if (!skb) return -ENOMEM; skb_reserve(skb, NCI_DATA_HDR_SIZE + 2); *(u8 *)skb_push(skb, 1) = data_type; do { /* If last packet add NCI_HFP_NO_CHAINING */ if (i + conn_info->max_pkt_payload_len - (skb->len + 1) >= data_len) { cb |= NCI_HFP_NO_CHAINING; len = data_len - i; } else { len = conn_info->max_pkt_payload_len - skb->len - 1; } *(u8 *)skb_push(skb, 1) = cb; if (len > 0) skb_put_data(skb, data + i, len); r = nci_send_data(ndev, conn_info->conn_id, skb); if (r < 0) return r; i += len; if (i < data_len) { skb = nci_skb_alloc(ndev, conn_info->max_pkt_payload_len + NCI_DATA_HDR_SIZE, GFP_ATOMIC); if (!skb) return -ENOMEM; skb_reserve(skb, NCI_DATA_HDR_SIZE + 1); } } while (i < data_len); return i; } static void nci_hci_send_data_req(struct nci_dev *ndev, const void *opt) { const struct nci_data *data = opt; nci_hci_send_data(ndev, data->pipe, data->cmd, data->data, data->data_len); } int nci_hci_send_event(struct nci_dev *ndev, u8 gate, u8 event, const u8 *param, size_t param_len) { u8 pipe = ndev->hci_dev->gate2pipe[gate]; if (pipe == NCI_HCI_INVALID_PIPE) return -EADDRNOTAVAIL; return nci_hci_send_data(ndev, pipe, NCI_HCP_HEADER(NCI_HCI_HCP_EVENT, event), param, param_len); } EXPORT_SYMBOL(nci_hci_send_event); int nci_hci_send_cmd(struct nci_dev *ndev, u8 gate, u8 cmd, const u8 *param, size_t param_len, struct sk_buff **skb) { const struct nci_hcp_message *message; const struct nci_conn_info *conn_info; struct nci_data data; int r; u8 pipe = ndev->hci_dev->gate2pipe[gate]; if (pipe == NCI_HCI_INVALID_PIPE) return -EADDRNOTAVAIL; conn_info = ndev->hci_dev->conn_info; if (!conn_info) return -EPROTO; data.conn_id = conn_info->conn_id; data.pipe = pipe; data.cmd = NCI_HCP_HEADER(NCI_HCI_HCP_COMMAND, cmd); data.data = param; data.data_len = param_len; r = nci_request(ndev, nci_hci_send_data_req, &data, msecs_to_jiffies(NCI_DATA_TIMEOUT)); if (r == NCI_STATUS_OK) { message = (struct nci_hcp_message *)conn_info->rx_skb->data; r = nci_hci_result_to_errno( NCI_HCP_MSG_GET_CMD(message->header)); skb_pull(conn_info->rx_skb, NCI_HCI_HCP_MESSAGE_HEADER_LEN); if (!r && skb) *skb = conn_info->rx_skb; } return r; } EXPORT_SYMBOL(nci_hci_send_cmd); int nci_hci_clear_all_pipes(struct nci_dev *ndev) { int r; r = nci_hci_send_cmd(ndev, NCI_HCI_ADMIN_GATE, NCI_HCI_ADM_CLEAR_ALL_PIPE, NULL, 0, NULL); if (r < 0) return r; nci_hci_reset_pipes(ndev->hci_dev); return r; } EXPORT_SYMBOL(nci_hci_clear_all_pipes); static void nci_hci_event_received(struct nci_dev *ndev, u8 pipe, u8 event, struct sk_buff *skb) { if (ndev->ops->hci_event_received) ndev->ops->hci_event_received(ndev, pipe, event, skb); } static void nci_hci_cmd_received(struct nci_dev *ndev, u8 pipe, u8 cmd, struct sk_buff *skb) { u8 gate = ndev->hci_dev->pipes[pipe].gate; u8 status = NCI_HCI_ANY_OK | ~NCI_HCI_FRAGMENT; u8 dest_gate, new_pipe; struct nci_hci_create_pipe_resp *create_info; struct nci_hci_delete_pipe_noti *delete_info; struct nci_hci_all_pipe_cleared_noti *cleared_info; pr_debug("from gate %x pipe %x cmd %x\n", gate, pipe, cmd); switch (cmd) { case NCI_HCI_ADM_NOTIFY_PIPE_CREATED: if (skb->len != 5) { status = NCI_HCI_ANY_E_NOK; goto exit; } create_info = (struct nci_hci_create_pipe_resp *)skb->data; dest_gate = create_info->dest_gate; new_pipe = create_info->pipe; if (new_pipe >= NCI_HCI_MAX_PIPES) { status = NCI_HCI_ANY_E_NOK; goto exit; } /* Save the new created pipe and bind with local gate, * the description for skb->data[3] is destination gate id * but since we received this cmd from host controller, we * are the destination and it is our local gate */ ndev->hci_dev->gate2pipe[dest_gate] = new_pipe; ndev->hci_dev->pipes[new_pipe].gate = dest_gate; ndev->hci_dev->pipes[new_pipe].host = create_info->src_host; break; case NCI_HCI_ANY_OPEN_PIPE: /* If the pipe is not created report an error */ if (gate == NCI_HCI_INVALID_GATE) { status = NCI_HCI_ANY_E_NOK; goto exit; } break; case NCI_HCI_ADM_NOTIFY_PIPE_DELETED: if (skb->len != 1) { status = NCI_HCI_ANY_E_NOK; goto exit; } delete_info = (struct nci_hci_delete_pipe_noti *)skb->data; if (delete_info->pipe >= NCI_HCI_MAX_PIPES) { status = NCI_HCI_ANY_E_NOK; goto exit; } ndev->hci_dev->pipes[delete_info->pipe].gate = NCI_HCI_INVALID_GATE; ndev->hci_dev->pipes[delete_info->pipe].host = NCI_HCI_INVALID_HOST; break; case NCI_HCI_ADM_NOTIFY_ALL_PIPE_CLEARED: if (skb->len != 1) { status = NCI_HCI_ANY_E_NOK; goto exit; } cleared_info = (struct nci_hci_all_pipe_cleared_noti *)skb->data; nci_hci_reset_pipes_per_host(ndev, cleared_info->host); break; default: pr_debug("Discarded unknown cmd %x to gate %x\n", cmd, gate); break; } if (ndev->ops->hci_cmd_received) ndev->ops->hci_cmd_received(ndev, pipe, cmd, skb); exit: nci_hci_send_data(ndev, pipe, status, NULL, 0); kfree_skb(skb); } static void nci_hci_resp_received(struct nci_dev *ndev, u8 pipe, struct sk_buff *skb) { struct nci_conn_info *conn_info; conn_info = ndev->hci_dev->conn_info; if (!conn_info) goto exit; conn_info->rx_skb = skb; exit: nci_req_complete(ndev, NCI_STATUS_OK); } /* Receive hcp message for pipe, with type and cmd. * skb contains optional message data only. */ static void nci_hci_hcp_message_rx(struct nci_dev *ndev, u8 pipe, u8 type, u8 instruction, struct sk_buff *skb) { switch (type) { case NCI_HCI_HCP_RESPONSE: nci_hci_resp_received(ndev, pipe, skb); break; case NCI_HCI_HCP_COMMAND: nci_hci_cmd_received(ndev, pipe, instruction, skb); break; case NCI_HCI_HCP_EVENT: nci_hci_event_received(ndev, pipe, instruction, skb); break; default: pr_err("UNKNOWN MSG Type %d, instruction=%d\n", type, instruction); kfree_skb(skb); break; } nci_req_complete(ndev, NCI_STATUS_OK); } static void nci_hci_msg_rx_work(struct work_struct *work) { struct nci_hci_dev *hdev = container_of(work, struct nci_hci_dev, msg_rx_work); struct sk_buff *skb; const struct nci_hcp_message *message; u8 pipe, type, instruction; for (; (skb = skb_dequeue(&hdev->msg_rx_queue)); kcov_remote_stop()) { kcov_remote_start_common(skb_get_kcov_handle(skb)); pipe = NCI_HCP_MSG_GET_PIPE(skb->data[0]); skb_pull(skb, NCI_HCI_HCP_PACKET_HEADER_LEN); message = (struct nci_hcp_message *)skb->data; type = NCI_HCP_MSG_GET_TYPE(message->header); instruction = NCI_HCP_MSG_GET_CMD(message->header); skb_pull(skb, NCI_HCI_HCP_MESSAGE_HEADER_LEN); nci_hci_hcp_message_rx(hdev->ndev, pipe, type, instruction, skb); } } void nci_hci_data_received_cb(void *context, struct sk_buff *skb, int err) { struct nci_dev *ndev = (struct nci_dev *)context; struct nci_hcp_packet *packet; u8 pipe, type; struct sk_buff *hcp_skb; struct sk_buff *frag_skb; int msg_len; if (err) { nci_req_complete(ndev, err); return; } packet = (struct nci_hcp_packet *)skb->data; if ((packet->header & ~NCI_HCI_FRAGMENT) == 0) { skb_queue_tail(&ndev->hci_dev->rx_hcp_frags, skb); return; } /* it's the last fragment. Does it need re-aggregation? */ if (skb_queue_len(&ndev->hci_dev->rx_hcp_frags)) { pipe = NCI_HCP_MSG_GET_PIPE(packet->header); skb_queue_tail(&ndev->hci_dev->rx_hcp_frags, skb); msg_len = 0; skb_queue_walk(&ndev->hci_dev->rx_hcp_frags, frag_skb) { msg_len += (frag_skb->len - NCI_HCI_HCP_PACKET_HEADER_LEN); } hcp_skb = nfc_alloc_recv_skb(NCI_HCI_HCP_PACKET_HEADER_LEN + msg_len, GFP_KERNEL); if (!hcp_skb) { nci_req_complete(ndev, -ENOMEM); return; } skb_put_u8(hcp_skb, pipe); skb_queue_walk(&ndev->hci_dev->rx_hcp_frags, frag_skb) { msg_len = frag_skb->len - NCI_HCI_HCP_PACKET_HEADER_LEN; skb_put_data(hcp_skb, frag_skb->data + NCI_HCI_HCP_PACKET_HEADER_LEN, msg_len); } skb_queue_purge(&ndev->hci_dev->rx_hcp_frags); } else { packet->header &= NCI_HCI_FRAGMENT; hcp_skb = skb; } /* if this is a response, dispatch immediately to * unblock waiting cmd context. Otherwise, enqueue to dispatch * in separate context where handler can also execute command. */ packet = (struct nci_hcp_packet *)hcp_skb->data; type = NCI_HCP_MSG_GET_TYPE(packet->message.header); if (type == NCI_HCI_HCP_RESPONSE) { pipe = NCI_HCP_MSG_GET_PIPE(packet->header); skb_pull(hcp_skb, NCI_HCI_HCP_PACKET_HEADER_LEN); nci_hci_hcp_message_rx(ndev, pipe, type, NCI_STATUS_OK, hcp_skb); } else { skb_queue_tail(&ndev->hci_dev->msg_rx_queue, hcp_skb); schedule_work(&ndev->hci_dev->msg_rx_work); } } int nci_hci_open_pipe(struct nci_dev *ndev, u8 pipe) { struct nci_data data; const struct nci_conn_info *conn_info; conn_info = ndev->hci_dev->conn_info; if (!conn_info) return -EPROTO; data.conn_id = conn_info->conn_id; data.pipe = pipe; data.cmd = NCI_HCP_HEADER(NCI_HCI_HCP_COMMAND, NCI_HCI_ANY_OPEN_PIPE); data.data = NULL; data.data_len = 0; return nci_request(ndev, nci_hci_send_data_req, &data, msecs_to_jiffies(NCI_DATA_TIMEOUT)); } EXPORT_SYMBOL(nci_hci_open_pipe); static u8 nci_hci_create_pipe(struct nci_dev *ndev, u8 dest_host, u8 dest_gate, int *result) { u8 pipe; struct sk_buff *skb; struct nci_hci_create_pipe_params params; const struct nci_hci_create_pipe_resp *resp; pr_debug("gate=%d\n", dest_gate); params.src_gate = NCI_HCI_ADMIN_GATE; params.dest_host = dest_host; params.dest_gate = dest_gate; *result = nci_hci_send_cmd(ndev, NCI_HCI_ADMIN_GATE, NCI_HCI_ADM_CREATE_PIPE, (u8 *)&params, sizeof(params), &skb); if (*result < 0) return NCI_HCI_INVALID_PIPE; resp = (struct nci_hci_create_pipe_resp *)skb->data; pipe = resp->pipe; kfree_skb(skb); pr_debug("pipe created=%d\n", pipe); return pipe; } static int nci_hci_delete_pipe(struct nci_dev *ndev, u8 pipe) { return nci_hci_send_cmd(ndev, NCI_HCI_ADMIN_GATE, NCI_HCI_ADM_DELETE_PIPE, &pipe, 1, NULL); } int nci_hci_set_param(struct nci_dev *ndev, u8 gate, u8 idx, const u8 *param, size_t param_len) { const struct nci_hcp_message *message; const struct nci_conn_info *conn_info; struct nci_data data; int r; u8 *tmp; u8 pipe = ndev->hci_dev->gate2pipe[gate]; pr_debug("idx=%d to gate %d\n", idx, gate); if (pipe == NCI_HCI_INVALID_PIPE) return -EADDRNOTAVAIL; conn_info = ndev->hci_dev->conn_info; if (!conn_info) return -EPROTO; tmp = kmalloc(1 + param_len, GFP_KERNEL); if (!tmp) return -ENOMEM; *tmp = idx; memcpy(tmp + 1, param, param_len); data.conn_id = conn_info->conn_id; data.pipe = pipe; data.cmd = NCI_HCP_HEADER(NCI_HCI_HCP_COMMAND, NCI_HCI_ANY_SET_PARAMETER); data.data = tmp; data.data_len = param_len + 1; r = nci_request(ndev, nci_hci_send_data_req, &data, msecs_to_jiffies(NCI_DATA_TIMEOUT)); if (r == NCI_STATUS_OK) { message = (struct nci_hcp_message *)conn_info->rx_skb->data; r = nci_hci_result_to_errno( NCI_HCP_MSG_GET_CMD(message->header)); skb_pull(conn_info->rx_skb, NCI_HCI_HCP_MESSAGE_HEADER_LEN); } kfree(tmp); return r; } EXPORT_SYMBOL(nci_hci_set_param); int nci_hci_get_param(struct nci_dev *ndev, u8 gate, u8 idx, struct sk_buff **skb) { const struct nci_hcp_message *message; const struct nci_conn_info *conn_info; struct nci_data data; int r; u8 pipe = ndev->hci_dev->gate2pipe[gate]; pr_debug("idx=%d to gate %d\n", idx, gate); if (pipe == NCI_HCI_INVALID_PIPE) return -EADDRNOTAVAIL; conn_info = ndev->hci_dev->conn_info; if (!conn_info) return -EPROTO; data.conn_id = conn_info->conn_id; data.pipe = pipe; data.cmd = NCI_HCP_HEADER(NCI_HCI_HCP_COMMAND, NCI_HCI_ANY_GET_PARAMETER); data.data = &idx; data.data_len = 1; r = nci_request(ndev, nci_hci_send_data_req, &data, msecs_to_jiffies(NCI_DATA_TIMEOUT)); if (r == NCI_STATUS_OK) { message = (struct nci_hcp_message *)conn_info->rx_skb->data; r = nci_hci_result_to_errno( NCI_HCP_MSG_GET_CMD(message->header)); skb_pull(conn_info->rx_skb, NCI_HCI_HCP_MESSAGE_HEADER_LEN); if (!r && skb) *skb = conn_info->rx_skb; } return r; } EXPORT_SYMBOL(nci_hci_get_param); int nci_hci_connect_gate(struct nci_dev *ndev, u8 dest_host, u8 dest_gate, u8 pipe) { bool pipe_created = false; int r; if (pipe == NCI_HCI_DO_NOT_OPEN_PIPE) return 0; if (ndev->hci_dev->gate2pipe[dest_gate] != NCI_HCI_INVALID_PIPE) return -EADDRINUSE; if (pipe != NCI_HCI_INVALID_PIPE) goto open_pipe; switch (dest_gate) { case NCI_HCI_LINK_MGMT_GATE: pipe = NCI_HCI_LINK_MGMT_PIPE; break; case NCI_HCI_ADMIN_GATE: pipe = NCI_HCI_ADMIN_PIPE; break; default: pipe = nci_hci_create_pipe(ndev, dest_host, dest_gate, &r); if (pipe == NCI_HCI_INVALID_PIPE) return r; pipe_created = true; break; } open_pipe: r = nci_hci_open_pipe(ndev, pipe); if (r < 0) { if (pipe_created) { if (nci_hci_delete_pipe(ndev, pipe) < 0) { /* TODO: Cannot clean by deleting pipe... * -> inconsistent state */ } } return r; } ndev->hci_dev->pipes[pipe].gate = dest_gate; ndev->hci_dev->pipes[pipe].host = dest_host; ndev->hci_dev->gate2pipe[dest_gate] = pipe; return 0; } EXPORT_SYMBOL(nci_hci_connect_gate); static int nci_hci_dev_connect_gates(struct nci_dev *ndev, u8 gate_count, const struct nci_hci_gate *gates) { int r; while (gate_count--) { r = nci_hci_connect_gate(ndev, gates->dest_host, gates->gate, gates->pipe); if (r < 0) return r; gates++; } return 0; } int nci_hci_dev_session_init(struct nci_dev *ndev) { struct nci_conn_info *conn_info; struct sk_buff *skb; int r; ndev->hci_dev->count_pipes = 0; ndev->hci_dev->expected_pipes = 0; conn_info = ndev->hci_dev->conn_info; if (!conn_info) return -EPROTO; conn_info->data_exchange_cb = nci_hci_data_received_cb; conn_info->data_exchange_cb_context = ndev; nci_hci_reset_pipes(ndev->hci_dev); if (ndev->hci_dev->init_data.gates[0].gate != NCI_HCI_ADMIN_GATE) return -EPROTO; r = nci_hci_connect_gate(ndev, ndev->hci_dev->init_data.gates[0].dest_host, ndev->hci_dev->init_data.gates[0].gate, ndev->hci_dev->init_data.gates[0].pipe); if (r < 0) return r; r = nci_hci_get_param(ndev, NCI_HCI_ADMIN_GATE, NCI_HCI_ADMIN_PARAM_SESSION_IDENTITY, &skb); if (r < 0) return r; if (skb->len && skb->len == strlen(ndev->hci_dev->init_data.session_id) && !memcmp(ndev->hci_dev->init_data.session_id, skb->data, skb->len) && ndev->ops->hci_load_session) { /* Restore gate<->pipe table from some proprietary location. */ r = ndev->ops->hci_load_session(ndev); } else { r = nci_hci_clear_all_pipes(ndev); if (r < 0) goto exit; r = nci_hci_dev_connect_gates(ndev, ndev->hci_dev->init_data.gate_count, ndev->hci_dev->init_data.gates); if (r < 0) goto exit; r = nci_hci_set_param(ndev, NCI_HCI_ADMIN_GATE, NCI_HCI_ADMIN_PARAM_SESSION_IDENTITY, ndev->hci_dev->init_data.session_id, strlen(ndev->hci_dev->init_data.session_id)); } exit: kfree_skb(skb); return r; } EXPORT_SYMBOL(nci_hci_dev_session_init); struct nci_hci_dev *nci_hci_allocate(struct nci_dev *ndev) { struct nci_hci_dev *hdev; hdev = kzalloc(sizeof(*hdev), GFP_KERNEL); if (!hdev) return NULL; skb_queue_head_init(&hdev->rx_hcp_frags); INIT_WORK(&hdev->msg_rx_work, nci_hci_msg_rx_work); skb_queue_head_init(&hdev->msg_rx_queue); hdev->ndev = ndev; return hdev; } void nci_hci_deallocate(struct nci_dev *ndev) { kfree(ndev->hci_dev); }
297 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* SCTP kernel implementation * (C) Copyright 2007 Hewlett-Packard Development Company, L.P. * * This file is part of the SCTP kernel implementation * * Please send any bug reports or fixes you make to the * email address(es): * lksctp developers <linux-sctp@vger.kernel.org> * * Written or modified by: * Vlad Yasevich <vladislav.yasevich@hp.com> */ #ifndef __sctp_auth_h__ #define __sctp_auth_h__ #include <linux/list.h> #include <linux/refcount.h> struct sctp_endpoint; struct sctp_association; struct sctp_authkey; struct sctp_hmacalgo; struct crypto_shash; /* * Define a generic struct that will hold all the info * necessary for an HMAC transform */ struct sctp_hmac { __u16 hmac_id; /* one of the above ids */ char *hmac_name; /* name for loading */ __u16 hmac_len; /* length of the signature */ }; /* This is generic structure that containst authentication bytes used * as keying material. It's a what is referred to as byte-vector all * over SCTP-AUTH */ struct sctp_auth_bytes { refcount_t refcnt; __u32 len; __u8 data[]; }; /* Definition for a shared key, weather endpoint or association */ struct sctp_shared_key { struct list_head key_list; struct sctp_auth_bytes *key; refcount_t refcnt; __u16 key_id; __u8 deactivated; }; #define key_for_each(__key, __list_head) \ list_for_each_entry(__key, __list_head, key_list) #define key_for_each_safe(__key, __tmp, __list_head) \ list_for_each_entry_safe(__key, __tmp, __list_head, key_list) static inline void sctp_auth_key_hold(struct sctp_auth_bytes *key) { if (!key) return; refcount_inc(&key->refcnt); } void sctp_auth_key_put(struct sctp_auth_bytes *key); struct sctp_shared_key *sctp_auth_shkey_create(__u16 key_id, gfp_t gfp); void sctp_auth_destroy_keys(struct list_head *keys); int sctp_auth_asoc_init_active_key(struct sctp_association *asoc, gfp_t gfp); struct sctp_shared_key *sctp_auth_get_shkey( const struct sctp_association *asoc, __u16 key_id); int sctp_auth_asoc_copy_shkeys(const struct sctp_endpoint *ep, struct sctp_association *asoc, gfp_t gfp); int sctp_auth_init_hmacs(struct sctp_endpoint *ep, gfp_t gfp); void sctp_auth_destroy_hmacs(struct crypto_shash *auth_hmacs[]); struct sctp_hmac *sctp_auth_get_hmac(__u16 hmac_id); struct sctp_hmac *sctp_auth_asoc_get_hmac(const struct sctp_association *asoc); void sctp_auth_asoc_set_default_hmac(struct sctp_association *asoc, struct sctp_hmac_algo_param *hmacs); int sctp_auth_asoc_verify_hmac_id(const struct sctp_association *asoc, __be16 hmac_id); int sctp_auth_send_cid(enum sctp_cid chunk, const struct sctp_association *asoc); int sctp_auth_recv_cid(enum sctp_cid chunk, const struct sctp_association *asoc); void sctp_auth_calculate_hmac(const struct sctp_association *asoc, struct sk_buff *skb, struct sctp_auth_chunk *auth, struct sctp_shared_key *ep_key, gfp_t gfp); void sctp_auth_shkey_release(struct sctp_shared_key *sh_key); void sctp_auth_shkey_hold(struct sctp_shared_key *sh_key); /* API Helpers */ int sctp_auth_ep_add_chunkid(struct sctp_endpoint *ep, __u8 chunk_id); int sctp_auth_ep_set_hmacs(struct sctp_endpoint *ep, struct sctp_hmacalgo *hmacs); int sctp_auth_set_key(struct sctp_endpoint *ep, struct sctp_association *asoc, struct sctp_authkey *auth_key); int sctp_auth_set_active_key(struct sctp_endpoint *ep, struct sctp_association *asoc, __u16 key_id); int sctp_auth_del_key_id(struct sctp_endpoint *ep, struct sctp_association *asoc, __u16 key_id); int sctp_auth_deact_key_id(struct sctp_endpoint *ep, struct sctp_association *asoc, __u16 key_id); int sctp_auth_init(struct sctp_endpoint *ep, gfp_t gfp); void sctp_auth_free(struct sctp_endpoint *ep); #endif
1 1 1 5 5 5 5 5 1 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 2 2 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 // SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved. */ #include <linux/sched.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/completion.h> #include <linux/buffer_head.h> #include <linux/mempool.h> #include <linux/gfs2_ondisk.h> #include <linux/bio.h> #include <linux/fs.h> #include <linux/list_sort.h> #include <linux/blkdev.h> #include "bmap.h" #include "dir.h" #include "gfs2.h" #include "incore.h" #include "inode.h" #include "glock.h" #include "glops.h" #include "log.h" #include "lops.h" #include "meta_io.h" #include "recovery.h" #include "rgrp.h" #include "trans.h" #include "util.h" #include "trace_gfs2.h" /** * gfs2_pin - Pin a buffer in memory * @sdp: The superblock * @bh: The buffer to be pinned * * The log lock must be held when calling this function */ void gfs2_pin(struct gfs2_sbd *sdp, struct buffer_head *bh) { struct gfs2_bufdata *bd; BUG_ON(!current->journal_info); clear_buffer_dirty(bh); if (test_set_buffer_pinned(bh)) gfs2_assert_withdraw(sdp, 0); if (!buffer_uptodate(bh)) gfs2_io_error_bh_wd(sdp, bh); bd = bh->b_private; /* If this buffer is in the AIL and it has already been written * to in-place disk block, remove it from the AIL. */ spin_lock(&sdp->sd_ail_lock); if (bd->bd_tr) list_move(&bd->bd_ail_st_list, &bd->bd_tr->tr_ail2_list); spin_unlock(&sdp->sd_ail_lock); get_bh(bh); atomic_inc(&sdp->sd_log_pinned); trace_gfs2_pin(bd, 1); } static bool buffer_is_rgrp(const struct gfs2_bufdata *bd) { return bd->bd_gl->gl_name.ln_type == LM_TYPE_RGRP; } static void maybe_release_space(struct gfs2_bufdata *bd) { struct gfs2_glock *gl = bd->bd_gl; struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(gl); unsigned int index = bd->bd_bh->b_blocknr - gl->gl_name.ln_number; struct gfs2_bitmap *bi = rgd->rd_bits + index; rgrp_lock_local(rgd); if (bi->bi_clone == NULL) goto out; if (sdp->sd_args.ar_discard) gfs2_rgrp_send_discards(sdp, rgd->rd_data0, bd->bd_bh, bi, 1, NULL); memcpy(bi->bi_clone + bi->bi_offset, bd->bd_bh->b_data + bi->bi_offset, bi->bi_bytes); clear_bit(GBF_FULL, &bi->bi_flags); rgd->rd_free_clone = rgd->rd_free; BUG_ON(rgd->rd_free_clone < rgd->rd_reserved); rgd->rd_extfail_pt = rgd->rd_free; out: rgrp_unlock_local(rgd); } /** * gfs2_unpin - Unpin a buffer * @sdp: the filesystem the buffer belongs to * @bh: The buffer to unpin * @tr: The system transaction being flushed */ static void gfs2_unpin(struct gfs2_sbd *sdp, struct buffer_head *bh, struct gfs2_trans *tr) { struct gfs2_bufdata *bd = bh->b_private; BUG_ON(!buffer_uptodate(bh)); BUG_ON(!buffer_pinned(bh)); lock_buffer(bh); mark_buffer_dirty(bh); clear_buffer_pinned(bh); if (buffer_is_rgrp(bd)) maybe_release_space(bd); spin_lock(&sdp->sd_ail_lock); if (bd->bd_tr) { list_del(&bd->bd_ail_st_list); brelse(bh); } else { struct gfs2_glock *gl = bd->bd_gl; list_add(&bd->bd_ail_gl_list, &gl->gl_ail_list); atomic_inc(&gl->gl_ail_count); } bd->bd_tr = tr; list_add(&bd->bd_ail_st_list, &tr->tr_ail1_list); spin_unlock(&sdp->sd_ail_lock); clear_bit(GLF_LFLUSH, &bd->bd_gl->gl_flags); trace_gfs2_pin(bd, 0); unlock_buffer(bh); atomic_dec(&sdp->sd_log_pinned); } void gfs2_log_incr_head(struct gfs2_sbd *sdp) { BUG_ON((sdp->sd_log_flush_head == sdp->sd_log_tail) && (sdp->sd_log_flush_head != sdp->sd_log_head)); if (++sdp->sd_log_flush_head == sdp->sd_jdesc->jd_blocks) sdp->sd_log_flush_head = 0; } u64 gfs2_log_bmap(struct gfs2_jdesc *jd, unsigned int lblock) { struct gfs2_journal_extent *je; list_for_each_entry(je, &jd->extent_list, list) { if (lblock >= je->lblock && lblock < je->lblock + je->blocks) return je->dblock + lblock - je->lblock; } return -1; } /** * gfs2_end_log_write_bh - end log write of pagecache data with buffers * @sdp: The superblock * @bvec: The bio_vec * @error: The i/o status * * This finds the relevant buffers and unlocks them and sets the * error flag according to the status of the i/o request. This is * used when the log is writing data which has an in-place version * that is pinned in the pagecache. */ static void gfs2_end_log_write_bh(struct gfs2_sbd *sdp, struct bio_vec *bvec, blk_status_t error) { struct buffer_head *bh, *next; struct page *page = bvec->bv_page; unsigned size; bh = page_buffers(page); size = bvec->bv_len; while (bh_offset(bh) < bvec->bv_offset) bh = bh->b_this_page; do { if (error) mark_buffer_write_io_error(bh); unlock_buffer(bh); next = bh->b_this_page; size -= bh->b_size; brelse(bh); bh = next; } while(bh && size); } /** * gfs2_end_log_write - end of i/o to the log * @bio: The bio * * Each bio_vec contains either data from the pagecache or data * relating to the log itself. Here we iterate over the bio_vec * array, processing both kinds of data. * */ static void gfs2_end_log_write(struct bio *bio) { struct gfs2_sbd *sdp = bio->bi_private; struct bio_vec *bvec; struct page *page; struct bvec_iter_all iter_all; if (bio->bi_status) { if (!cmpxchg(&sdp->sd_log_error, 0, (int)bio->bi_status)) fs_err(sdp, "Error %d writing to journal, jid=%u\n", bio->bi_status, sdp->sd_jdesc->jd_jid); gfs2_withdraw_delayed(sdp); /* prevent more writes to the journal */ clear_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags); wake_up(&sdp->sd_logd_waitq); } bio_for_each_segment_all(bvec, bio, iter_all) { page = bvec->bv_page; if (page_has_buffers(page)) gfs2_end_log_write_bh(sdp, bvec, bio->bi_status); else mempool_free(page, gfs2_page_pool); } bio_put(bio); if (atomic_dec_and_test(&sdp->sd_log_in_flight)) wake_up(&sdp->sd_log_flush_wait); } /** * gfs2_log_submit_bio - Submit any pending log bio * @biop: Address of the bio pointer * @opf: REQ_OP | op_flags * * Submit any pending part-built or full bio to the block device. If * there is no pending bio, then this is a no-op. */ void gfs2_log_submit_bio(struct bio **biop, blk_opf_t opf) { struct bio *bio = *biop; if (bio) { struct gfs2_sbd *sdp = bio->bi_private; atomic_inc(&sdp->sd_log_in_flight); bio->bi_opf = opf; submit_bio(bio); *biop = NULL; } } /** * gfs2_log_alloc_bio - Allocate a bio * @sdp: The super block * @blkno: The device block number we want to write to * @end_io: The bi_end_io callback * * Allocate a new bio, initialize it with the given parameters and return it. * * Returns: The newly allocated bio */ static struct bio *gfs2_log_alloc_bio(struct gfs2_sbd *sdp, u64 blkno, bio_end_io_t *end_io) { struct super_block *sb = sdp->sd_vfs; struct bio *bio = bio_alloc(sb->s_bdev, BIO_MAX_VECS, 0, GFP_NOIO); bio->bi_iter.bi_sector = blkno << sdp->sd_fsb2bb_shift; bio->bi_end_io = end_io; bio->bi_private = sdp; return bio; } /** * gfs2_log_get_bio - Get cached log bio, or allocate a new one * @sdp: The super block * @blkno: The device block number we want to write to * @biop: The bio to get or allocate * @op: REQ_OP * @end_io: The bi_end_io callback * @flush: Always flush the current bio and allocate a new one? * * If there is a cached bio, then if the next block number is sequential * with the previous one, return it, otherwise flush the bio to the * device. If there is no cached bio, or we just flushed it, then * allocate a new one. * * Returns: The bio to use for log writes */ static struct bio *gfs2_log_get_bio(struct gfs2_sbd *sdp, u64 blkno, struct bio **biop, enum req_op op, bio_end_io_t *end_io, bool flush) { struct bio *bio = *biop; if (bio) { u64 nblk; nblk = bio_end_sector(bio); nblk >>= sdp->sd_fsb2bb_shift; if (blkno == nblk && !flush) return bio; gfs2_log_submit_bio(biop, op); } *biop = gfs2_log_alloc_bio(sdp, blkno, end_io); return *biop; } /** * gfs2_log_write - write to log * @sdp: the filesystem * @jd: The journal descriptor * @page: the page to write * @size: the size of the data to write * @offset: the offset within the page * @blkno: block number of the log entry * * Try and add the page segment to the current bio. If that fails, * submit the current bio to the device and create a new one, and * then add the page segment to that. */ void gfs2_log_write(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd, struct page *page, unsigned size, unsigned offset, u64 blkno) { struct bio *bio; int ret; bio = gfs2_log_get_bio(sdp, blkno, &jd->jd_log_bio, REQ_OP_WRITE, gfs2_end_log_write, false); ret = bio_add_page(bio, page, size, offset); if (ret == 0) { bio = gfs2_log_get_bio(sdp, blkno, &jd->jd_log_bio, REQ_OP_WRITE, gfs2_end_log_write, true); ret = bio_add_page(bio, page, size, offset); WARN_ON(ret == 0); } } /** * gfs2_log_write_bh - write a buffer's content to the log * @sdp: The super block * @bh: The buffer pointing to the in-place location * * This writes the content of the buffer to the next available location * in the log. The buffer will be unlocked once the i/o to the log has * completed. */ static void gfs2_log_write_bh(struct gfs2_sbd *sdp, struct buffer_head *bh) { u64 dblock; dblock = gfs2_log_bmap(sdp->sd_jdesc, sdp->sd_log_flush_head); gfs2_log_incr_head(sdp); gfs2_log_write(sdp, sdp->sd_jdesc, bh->b_page, bh->b_size, bh_offset(bh), dblock); } /** * gfs2_log_write_page - write one block stored in a page, into the log * @sdp: The superblock * @page: The struct page * * This writes the first block-sized part of the page into the log. Note * that the page must have been allocated from the gfs2_page_pool mempool * and that after this has been called, ownership has been transferred and * the page may be freed at any time. */ static void gfs2_log_write_page(struct gfs2_sbd *sdp, struct page *page) { struct super_block *sb = sdp->sd_vfs; u64 dblock; dblock = gfs2_log_bmap(sdp->sd_jdesc, sdp->sd_log_flush_head); gfs2_log_incr_head(sdp); gfs2_log_write(sdp, sdp->sd_jdesc, page, sb->s_blocksize, 0, dblock); } /** * gfs2_end_log_read - end I/O callback for reads from the log * @bio: The bio * * Simply unlock the pages in the bio. The main thread will wait on them and * process them in order as necessary. */ static void gfs2_end_log_read(struct bio *bio) { int error = blk_status_to_errno(bio->bi_status); struct folio_iter fi; bio_for_each_folio_all(fi, bio) { /* We're abusing wb_err to get the error to gfs2_find_jhead */ filemap_set_wb_err(fi.folio->mapping, error); folio_end_read(fi.folio, !error); } bio_put(bio); } /** * gfs2_jhead_pg_srch - Look for the journal head in a given page. * @jd: The journal descriptor * @head: The journal head to start from * @page: The page to look in * * Returns: 1 if found, 0 otherwise. */ static bool gfs2_jhead_pg_srch(struct gfs2_jdesc *jd, struct gfs2_log_header_host *head, struct page *page) { struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode); struct gfs2_log_header_host lh; void *kaddr; unsigned int offset; bool ret = false; kaddr = kmap_local_page(page); for (offset = 0; offset < PAGE_SIZE; offset += sdp->sd_sb.sb_bsize) { if (!__get_log_header(sdp, kaddr + offset, 0, &lh)) { if (lh.lh_sequence >= head->lh_sequence) *head = lh; else { ret = true; break; } } } kunmap_local(kaddr); return ret; } /** * gfs2_jhead_process_page - Search/cleanup a page * @jd: The journal descriptor * @index: Index of the page to look into * @head: The journal head to start from * @done: If set, perform only cleanup, else search and set if found. * * Find the folio with 'index' in the journal's mapping. Search the folio for * the journal head if requested (cleanup == false). Release refs on the * folio so the page cache can reclaim it. We grabbed a * reference on this folio twice, first when we did a grab_cache_page() * to obtain the folio to add it to the bio and second when we do a * filemap_get_folio() here to get the folio to wait on while I/O on it is being * completed. * This function is also used to free up a folio we might've grabbed but not * used. Maybe we added it to a bio, but not submitted it for I/O. Or we * submitted the I/O, but we already found the jhead so we only need to drop * our references to the folio. */ static void gfs2_jhead_process_page(struct gfs2_jdesc *jd, unsigned long index, struct gfs2_log_header_host *head, bool *done) { struct folio *folio; folio = filemap_get_folio(jd->jd_inode->i_mapping, index); folio_wait_locked(folio); if (!folio_test_uptodate(folio)) *done = true; if (!*done) *done = gfs2_jhead_pg_srch(jd, head, &folio->page); /* filemap_get_folio() and the earlier grab_cache_page() */ folio_put_refs(folio, 2); } static struct bio *gfs2_chain_bio(struct bio *prev, unsigned int nr_iovecs) { struct bio *new; new = bio_alloc(prev->bi_bdev, nr_iovecs, prev->bi_opf, GFP_NOIO); bio_clone_blkg_association(new, prev); new->bi_iter.bi_sector = bio_end_sector(prev); bio_chain(new, prev); submit_bio(prev); return new; } /** * gfs2_find_jhead - find the head of a log * @jd: The journal descriptor * @head: The log descriptor for the head of the log is returned here * @keep_cache: If set inode pages will not be truncated * * Do a search of a journal by reading it in large chunks using bios and find * the valid log entry with the highest sequence number. (i.e. the log head) * * Returns: 0 on success, errno otherwise */ int gfs2_find_jhead(struct gfs2_jdesc *jd, struct gfs2_log_header_host *head, bool keep_cache) { struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode); struct address_space *mapping = jd->jd_inode->i_mapping; unsigned int block = 0, blocks_submitted = 0, blocks_read = 0; unsigned int bsize = sdp->sd_sb.sb_bsize, off; unsigned int bsize_shift = sdp->sd_sb.sb_bsize_shift; unsigned int shift = PAGE_SHIFT - bsize_shift; unsigned int max_blocks = 2 * 1024 * 1024 >> bsize_shift; struct gfs2_journal_extent *je; int sz, ret = 0; struct bio *bio = NULL; struct page *page = NULL; bool done = false; errseq_t since; memset(head, 0, sizeof(*head)); if (list_empty(&jd->extent_list)) gfs2_map_journal_extents(sdp, jd); since = filemap_sample_wb_err(mapping); list_for_each_entry(je, &jd->extent_list, list) { u64 dblock = je->dblock; for (; block < je->lblock + je->blocks; block++, dblock++) { if (!page) { page = grab_cache_page(mapping, block >> shift); if (!page) { ret = -ENOMEM; done = true; goto out; } off = 0; } if (bio && (off || block < blocks_submitted + max_blocks)) { sector_t sector = dblock << sdp->sd_fsb2bb_shift; if (bio_end_sector(bio) == sector) { sz = bio_add_page(bio, page, bsize, off); if (sz == bsize) goto block_added; } if (off) { unsigned int blocks = (PAGE_SIZE - off) >> bsize_shift; bio = gfs2_chain_bio(bio, blocks); goto add_block_to_new_bio; } } if (bio) { blocks_submitted = block; submit_bio(bio); } bio = gfs2_log_alloc_bio(sdp, dblock, gfs2_end_log_read); bio->bi_opf = REQ_OP_READ; add_block_to_new_bio: sz = bio_add_page(bio, page, bsize, off); BUG_ON(sz != bsize); block_added: off += bsize; if (off == PAGE_SIZE) page = NULL; if (blocks_submitted <= blocks_read + max_blocks) { /* Keep at least one bio in flight */ continue; } gfs2_jhead_process_page(jd, blocks_read >> shift, head, &done); blocks_read += PAGE_SIZE >> bsize_shift; if (done) goto out; /* found */ } } out: if (bio) submit_bio(bio); while (blocks_read < block) { gfs2_jhead_process_page(jd, blocks_read >> shift, head, &done); blocks_read += PAGE_SIZE >> bsize_shift; } if (!ret) ret = filemap_check_wb_err(mapping, since); if (!keep_cache) truncate_inode_pages(mapping, 0); return ret; } static struct page *gfs2_get_log_desc(struct gfs2_sbd *sdp, u32 ld_type, u32 ld_length, u32 ld_data1) { struct page *page = mempool_alloc(gfs2_page_pool, GFP_NOIO); struct gfs2_log_descriptor *ld = page_address(page); clear_page(ld); ld->ld_header.mh_magic = cpu_to_be32(GFS2_MAGIC); ld->ld_header.mh_type = cpu_to_be32(GFS2_METATYPE_LD); ld->ld_header.mh_format = cpu_to_be32(GFS2_FORMAT_LD); ld->ld_type = cpu_to_be32(ld_type); ld->ld_length = cpu_to_be32(ld_length); ld->ld_data1 = cpu_to_be32(ld_data1); ld->ld_data2 = 0; return page; } static void gfs2_check_magic(struct buffer_head *bh) { void *kaddr; __be32 *ptr; clear_buffer_escaped(bh); kaddr = kmap_local_page(bh->b_page); ptr = kaddr + bh_offset(bh); if (*ptr == cpu_to_be32(GFS2_MAGIC)) set_buffer_escaped(bh); kunmap_local(kaddr); } static int blocknr_cmp(void *priv, const struct list_head *a, const struct list_head *b) { struct gfs2_bufdata *bda, *bdb; bda = list_entry(a, struct gfs2_bufdata, bd_list); bdb = list_entry(b, struct gfs2_bufdata, bd_list); if (bda->bd_bh->b_blocknr < bdb->bd_bh->b_blocknr) return -1; if (bda->bd_bh->b_blocknr > bdb->bd_bh->b_blocknr) return 1; return 0; } static void gfs2_before_commit(struct gfs2_sbd *sdp, unsigned int limit, unsigned int total, struct list_head *blist, bool is_databuf) { struct gfs2_log_descriptor *ld; struct gfs2_bufdata *bd1 = NULL, *bd2; struct page *page; unsigned int num; unsigned n; __be64 *ptr; gfs2_log_lock(sdp); list_sort(NULL, blist, blocknr_cmp); bd1 = bd2 = list_prepare_entry(bd1, blist, bd_list); while(total) { num = total; if (total > limit) num = limit; gfs2_log_unlock(sdp); page = gfs2_get_log_desc(sdp, is_databuf ? GFS2_LOG_DESC_JDATA : GFS2_LOG_DESC_METADATA, num + 1, num); ld = page_address(page); gfs2_log_lock(sdp); ptr = (__be64 *)(ld + 1); n = 0; list_for_each_entry_continue(bd1, blist, bd_list) { *ptr++ = cpu_to_be64(bd1->bd_bh->b_blocknr); if (is_databuf) { gfs2_check_magic(bd1->bd_bh); *ptr++ = cpu_to_be64(buffer_escaped(bd1->bd_bh) ? 1 : 0); } if (++n >= num) break; } gfs2_log_unlock(sdp); gfs2_log_write_page(sdp, page); gfs2_log_lock(sdp); n = 0; list_for_each_entry_continue(bd2, blist, bd_list) { get_bh(bd2->bd_bh); gfs2_log_unlock(sdp); lock_buffer(bd2->bd_bh); if (buffer_escaped(bd2->bd_bh)) { void *p; page = mempool_alloc(gfs2_page_pool, GFP_NOIO); p = page_address(page); memcpy_from_page(p, page, bh_offset(bd2->bd_bh), bd2->bd_bh->b_size); *(__be32 *)p = 0; clear_buffer_escaped(bd2->bd_bh); unlock_buffer(bd2->bd_bh); brelse(bd2->bd_bh); gfs2_log_write_page(sdp, page); } else { gfs2_log_write_bh(sdp, bd2->bd_bh); } gfs2_log_lock(sdp); if (++n >= num) break; } BUG_ON(total < num); total -= num; } gfs2_log_unlock(sdp); } static void buf_lo_before_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr) { unsigned int limit = buf_limit(sdp); /* 503 for 4k blocks */ unsigned int nbuf; if (tr == NULL) return; nbuf = tr->tr_num_buf_new - tr->tr_num_buf_rm; gfs2_before_commit(sdp, limit, nbuf, &tr->tr_buf, 0); } static void buf_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr) { struct list_head *head; struct gfs2_bufdata *bd; if (tr == NULL) return; head = &tr->tr_buf; while (!list_empty(head)) { bd = list_first_entry(head, struct gfs2_bufdata, bd_list); list_del_init(&bd->bd_list); gfs2_unpin(sdp, bd->bd_bh, tr); } } static void buf_lo_before_scan(struct gfs2_jdesc *jd, struct gfs2_log_header_host *head, int pass) { if (pass != 0) return; jd->jd_found_blocks = 0; jd->jd_replayed_blocks = 0; } #define obsolete_rgrp_replay \ "Replaying 0x%llx from jid=%d/0x%llx but we already have a bh!\n" #define obsolete_rgrp_replay2 \ "busy:%d, pinned:%d rg_gen:0x%llx, j_gen:0x%llx\n" static void obsolete_rgrp(struct gfs2_jdesc *jd, struct buffer_head *bh_log, u64 blkno) { struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode); struct gfs2_rgrpd *rgd; struct gfs2_rgrp *jrgd = (struct gfs2_rgrp *)bh_log->b_data; rgd = gfs2_blk2rgrpd(sdp, blkno, false); if (rgd && rgd->rd_addr == blkno && rgd->rd_bits && rgd->rd_bits->bi_bh) { fs_info(sdp, obsolete_rgrp_replay, (unsigned long long)blkno, jd->jd_jid, bh_log->b_blocknr); fs_info(sdp, obsolete_rgrp_replay2, buffer_busy(rgd->rd_bits->bi_bh) ? 1 : 0, buffer_pinned(rgd->rd_bits->bi_bh), rgd->rd_igeneration, be64_to_cpu(jrgd->rg_igeneration)); gfs2_dump_glock(NULL, rgd->rd_gl, true); } } static int buf_lo_scan_elements(struct gfs2_jdesc *jd, u32 start, struct gfs2_log_descriptor *ld, __be64 *ptr, int pass) { struct gfs2_inode *ip = GFS2_I(jd->jd_inode); struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode); struct gfs2_glock *gl = ip->i_gl; unsigned int blks = be32_to_cpu(ld->ld_data1); struct buffer_head *bh_log, *bh_ip; u64 blkno; int error = 0; if (pass != 1 || be32_to_cpu(ld->ld_type) != GFS2_LOG_DESC_METADATA) return 0; gfs2_replay_incr_blk(jd, &start); for (; blks; gfs2_replay_incr_blk(jd, &start), blks--) { blkno = be64_to_cpu(*ptr++); jd->jd_found_blocks++; if (gfs2_revoke_check(jd, blkno, start)) continue; error = gfs2_replay_read_block(jd, start, &bh_log); if (error) return error; bh_ip = gfs2_meta_new(gl, blkno); memcpy(bh_ip->b_data, bh_log->b_data, bh_log->b_size); if (gfs2_meta_check(sdp, bh_ip)) error = -EIO; else { struct gfs2_meta_header *mh = (struct gfs2_meta_header *)bh_ip->b_data; if (mh->mh_type == cpu_to_be32(GFS2_METATYPE_RG)) obsolete_rgrp(jd, bh_log, blkno); mark_buffer_dirty(bh_ip); } brelse(bh_log); brelse(bh_ip); if (error) break; jd->jd_replayed_blocks++; } return error; } static void buf_lo_after_scan(struct gfs2_jdesc *jd, int error, int pass) { struct gfs2_inode *ip = GFS2_I(jd->jd_inode); struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode); if (error) { gfs2_inode_metasync(ip->i_gl); return; } if (pass != 1) return; gfs2_inode_metasync(ip->i_gl); fs_info(sdp, "jid=%u: Replayed %u of %u blocks\n", jd->jd_jid, jd->jd_replayed_blocks, jd->jd_found_blocks); } static void revoke_lo_before_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr) { struct gfs2_meta_header *mh; unsigned int offset; struct list_head *head = &sdp->sd_log_revokes; struct gfs2_bufdata *bd; struct page *page; unsigned int length; gfs2_flush_revokes(sdp); if (!sdp->sd_log_num_revoke) return; length = gfs2_struct2blk(sdp, sdp->sd_log_num_revoke); page = gfs2_get_log_desc(sdp, GFS2_LOG_DESC_REVOKE, length, sdp->sd_log_num_revoke); offset = sizeof(struct gfs2_log_descriptor); list_for_each_entry(bd, head, bd_list) { sdp->sd_log_num_revoke--; if (offset + sizeof(u64) > sdp->sd_sb.sb_bsize) { gfs2_log_write_page(sdp, page); page = mempool_alloc(gfs2_page_pool, GFP_NOIO); mh = page_address(page); clear_page(mh); mh->mh_magic = cpu_to_be32(GFS2_MAGIC); mh->mh_type = cpu_to_be32(GFS2_METATYPE_LB); mh->mh_format = cpu_to_be32(GFS2_FORMAT_LB); offset = sizeof(struct gfs2_meta_header); } *(__be64 *)(page_address(page) + offset) = cpu_to_be64(bd->bd_blkno); offset += sizeof(u64); } gfs2_assert_withdraw(sdp, !sdp->sd_log_num_revoke); gfs2_log_write_page(sdp, page); } void gfs2_drain_revokes(struct gfs2_sbd *sdp) { struct list_head *head = &sdp->sd_log_revokes; struct gfs2_bufdata *bd; struct gfs2_glock *gl; while (!list_empty(head)) { bd = list_first_entry(head, struct gfs2_bufdata, bd_list); list_del_init(&bd->bd_list); gl = bd->bd_gl; gfs2_glock_remove_revoke(gl); kmem_cache_free(gfs2_bufdata_cachep, bd); } } static void revoke_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr) { gfs2_drain_revokes(sdp); } static void revoke_lo_before_scan(struct gfs2_jdesc *jd, struct gfs2_log_header_host *head, int pass) { if (pass != 0) return; jd->jd_found_revokes = 0; jd->jd_replay_tail = head->lh_tail; } static int revoke_lo_scan_elements(struct gfs2_jdesc *jd, u32 start, struct gfs2_log_descriptor *ld, __be64 *ptr, int pass) { struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode); unsigned int blks = be32_to_cpu(ld->ld_length); unsigned int revokes = be32_to_cpu(ld->ld_data1); struct buffer_head *bh; unsigned int offset; u64 blkno; int first = 1; int error; if (pass != 0 || be32_to_cpu(ld->ld_type) != GFS2_LOG_DESC_REVOKE) return 0; offset = sizeof(struct gfs2_log_descriptor); for (; blks; gfs2_replay_incr_blk(jd, &start), blks--) { error = gfs2_replay_read_block(jd, start, &bh); if (error) return error; if (!first) gfs2_metatype_check(sdp, bh, GFS2_METATYPE_LB); while (offset + sizeof(u64) <= sdp->sd_sb.sb_bsize) { blkno = be64_to_cpu(*(__be64 *)(bh->b_data + offset)); error = gfs2_revoke_add(jd, blkno, start); if (error < 0) { brelse(bh); return error; } else if (error) jd->jd_found_revokes++; if (!--revokes) break; offset += sizeof(u64); } brelse(bh); offset = sizeof(struct gfs2_meta_header); first = 0; } return 0; } static void revoke_lo_after_scan(struct gfs2_jdesc *jd, int error, int pass) { struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode); if (error) { gfs2_revoke_clean(jd); return; } if (pass != 1) return; fs_info(sdp, "jid=%u: Found %u revoke tags\n", jd->jd_jid, jd->jd_found_revokes); gfs2_revoke_clean(jd); } /** * databuf_lo_before_commit - Scan the data buffers, writing as we go * @sdp: The filesystem * @tr: The system transaction being flushed */ static void databuf_lo_before_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr) { unsigned int limit = databuf_limit(sdp); unsigned int nbuf; if (tr == NULL) return; nbuf = tr->tr_num_databuf_new - tr->tr_num_databuf_rm; gfs2_before_commit(sdp, limit, nbuf, &tr->tr_databuf, 1); } static int databuf_lo_scan_elements(struct gfs2_jdesc *jd, u32 start, struct gfs2_log_descriptor *ld, __be64 *ptr, int pass) { struct gfs2_inode *ip = GFS2_I(jd->jd_inode); struct gfs2_glock *gl = ip->i_gl; unsigned int blks = be32_to_cpu(ld->ld_data1); struct buffer_head *bh_log, *bh_ip; u64 blkno; u64 esc; int error = 0; if (pass != 1 || be32_to_cpu(ld->ld_type) != GFS2_LOG_DESC_JDATA) return 0; gfs2_replay_incr_blk(jd, &start); for (; blks; gfs2_replay_incr_blk(jd, &start), blks--) { blkno = be64_to_cpu(*ptr++); esc = be64_to_cpu(*ptr++); jd->jd_found_blocks++; if (gfs2_revoke_check(jd, blkno, start)) continue; error = gfs2_replay_read_block(jd, start, &bh_log); if (error) return error; bh_ip = gfs2_meta_new(gl, blkno); memcpy(bh_ip->b_data, bh_log->b_data, bh_log->b_size); /* Unescape */ if (esc) { __be32 *eptr = (__be32 *)bh_ip->b_data; *eptr = cpu_to_be32(GFS2_MAGIC); } mark_buffer_dirty(bh_ip); brelse(bh_log); brelse(bh_ip); jd->jd_replayed_blocks++; } return error; } /* FIXME: sort out accounting for log blocks etc. */ static void databuf_lo_after_scan(struct gfs2_jdesc *jd, int error, int pass) { struct gfs2_inode *ip = GFS2_I(jd->jd_inode); struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode); if (error) { gfs2_inode_metasync(ip->i_gl); return; } if (pass != 1) return; /* data sync? */ gfs2_inode_metasync(ip->i_gl); fs_info(sdp, "jid=%u: Replayed %u of %u data blocks\n", jd->jd_jid, jd->jd_replayed_blocks, jd->jd_found_blocks); } static void databuf_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr) { struct list_head *head; struct gfs2_bufdata *bd; if (tr == NULL) return; head = &tr->tr_databuf; while (!list_empty(head)) { bd = list_first_entry(head, struct gfs2_bufdata, bd_list); list_del_init(&bd->bd_list); gfs2_unpin(sdp, bd->bd_bh, tr); } } static const struct gfs2_log_operations gfs2_buf_lops = { .lo_before_commit = buf_lo_before_commit, .lo_after_commit = buf_lo_after_commit, .lo_before_scan = buf_lo_before_scan, .lo_scan_elements = buf_lo_scan_elements, .lo_after_scan = buf_lo_after_scan, .lo_name = "buf", }; static const struct gfs2_log_operations gfs2_revoke_lops = { .lo_before_commit = revoke_lo_before_commit, .lo_after_commit = revoke_lo_after_commit, .lo_before_scan = revoke_lo_before_scan, .lo_scan_elements = revoke_lo_scan_elements, .lo_after_scan = revoke_lo_after_scan, .lo_name = "revoke", }; static const struct gfs2_log_operations gfs2_databuf_lops = { .lo_before_commit = databuf_lo_before_commit, .lo_after_commit = databuf_lo_after_commit, .lo_scan_elements = databuf_lo_scan_elements, .lo_after_scan = databuf_lo_after_scan, .lo_name = "databuf", }; const struct gfs2_log_operations *gfs2_log_ops[] = { &gfs2_databuf_lops, &gfs2_buf_lops, &gfs2_revoke_lops, NULL, };
6 6 6 6 6 3 3 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 // SPDX-License-Identifier: GPL-2.0-only /* * Copyright (c) 2015 HGST, a Western Digital Company. */ #include <linux/err.h> #include <linux/slab.h> #include <rdma/ib_verbs.h> #include "core_priv.h" #include <trace/events/rdma_core.h> /* Max size for shared CQ, may require tuning */ #define IB_MAX_SHARED_CQ_SZ 4096U /* # of WCs to poll for with a single call to ib_poll_cq */ #define IB_POLL_BATCH 16 #define IB_POLL_BATCH_DIRECT 8 /* # of WCs to iterate over before yielding */ #define IB_POLL_BUDGET_IRQ 256 #define IB_POLL_BUDGET_WORKQUEUE 65536 #define IB_POLL_FLAGS \ (IB_CQ_NEXT_COMP | IB_CQ_REPORT_MISSED_EVENTS) static const struct dim_cq_moder rdma_dim_prof[RDMA_DIM_PARAMS_NUM_PROFILES] = { {1, 0, 1, 0}, {1, 0, 4, 0}, {2, 0, 4, 0}, {2, 0, 8, 0}, {4, 0, 8, 0}, {16, 0, 8, 0}, {16, 0, 16, 0}, {32, 0, 16, 0}, {32, 0, 32, 0}, }; static void ib_cq_rdma_dim_work(struct work_struct *w) { struct dim *dim = container_of(w, struct dim, work); struct ib_cq *cq = dim->priv; u16 usec = rdma_dim_prof[dim->profile_ix].usec; u16 comps = rdma_dim_prof[dim->profile_ix].comps; dim->state = DIM_START_MEASURE; trace_cq_modify(cq, comps, usec); cq->device->ops.modify_cq(cq, comps, usec); } static void rdma_dim_init(struct ib_cq *cq) { struct dim *dim; if (!cq->device->ops.modify_cq || !cq->device->use_cq_dim || cq->poll_ctx == IB_POLL_DIRECT) return; dim = kzalloc(sizeof(struct dim), GFP_KERNEL); if (!dim) return; dim->state = DIM_START_MEASURE; dim->tune_state = DIM_GOING_RIGHT; dim->profile_ix = RDMA_DIM_START_PROFILE; dim->priv = cq; cq->dim = dim; INIT_WORK(&dim->work, ib_cq_rdma_dim_work); } static void rdma_dim_destroy(struct ib_cq *cq) { if (!cq->dim) return; cancel_work_sync(&cq->dim->work); kfree(cq->dim); } static int __poll_cq(struct ib_cq *cq, int num_entries, struct ib_wc *wc) { int rc; rc = ib_poll_cq(cq, num_entries, wc); trace_cq_poll(cq, num_entries, rc); return rc; } static int __ib_process_cq(struct ib_cq *cq, int budget, struct ib_wc *wcs, int batch) { int i, n, completed = 0; trace_cq_process(cq); /* * budget might be (-1) if the caller does not * want to bound this call, thus we need unsigned * minimum here. */ while ((n = __poll_cq(cq, min_t(u32, batch, budget - completed), wcs)) > 0) { for (i = 0; i < n; i++) { struct ib_wc *wc = &wcs[i]; if (wc->wr_cqe) wc->wr_cqe->done(cq, wc); else WARN_ON_ONCE(wc->status == IB_WC_SUCCESS); } completed += n; if (n != batch || (budget != -1 && completed >= budget)) break; } return completed; } /** * ib_process_cq_direct - process a CQ in caller context * @cq: CQ to process * @budget: number of CQEs to poll for * * This function is used to process all outstanding CQ entries. * It does not offload CQ processing to a different context and does * not ask for completion interrupts from the HCA. * Using direct processing on CQ with non IB_POLL_DIRECT type may trigger * concurrent processing. * * Note: do not pass -1 as %budget unless it is guaranteed that the number * of completions that will be processed is small. */ int ib_process_cq_direct(struct ib_cq *cq, int budget) { struct ib_wc wcs[IB_POLL_BATCH_DIRECT]; return __ib_process_cq(cq, budget, wcs, IB_POLL_BATCH_DIRECT); } EXPORT_SYMBOL(ib_process_cq_direct); static void ib_cq_completion_direct(struct ib_cq *cq, void *private) { WARN_ONCE(1, "got unsolicited completion for CQ 0x%p\n", cq); } static int ib_poll_handler(struct irq_poll *iop, int budget) { struct ib_cq *cq = container_of(iop, struct ib_cq, iop); struct dim *dim = cq->dim; int completed; completed = __ib_process_cq(cq, budget, cq->wc, IB_POLL_BATCH); if (completed < budget) { irq_poll_complete(&cq->iop); if (ib_req_notify_cq(cq, IB_POLL_FLAGS) > 0) { trace_cq_reschedule(cq); irq_poll_sched(&cq->iop); } } if (dim) rdma_dim(dim, completed); return completed; } static void ib_cq_completion_softirq(struct ib_cq *cq, void *private) { trace_cq_schedule(cq); irq_poll_sched(&cq->iop); } static void ib_cq_poll_work(struct work_struct *work) { struct ib_cq *cq = container_of(work, struct ib_cq, work); int completed; completed = __ib_process_cq(cq, IB_POLL_BUDGET_WORKQUEUE, cq->wc, IB_POLL_BATCH); if (completed >= IB_POLL_BUDGET_WORKQUEUE || ib_req_notify_cq(cq, IB_POLL_FLAGS) > 0) queue_work(cq->comp_wq, &cq->work); else if (cq->dim) rdma_dim(cq->dim, completed); } static void ib_cq_completion_workqueue(struct ib_cq *cq, void *private) { trace_cq_schedule(cq); queue_work(cq->comp_wq, &cq->work); } /** * __ib_alloc_cq - allocate a completion queue * @dev: device to allocate the CQ for * @private: driver private data, accessible from cq->cq_context * @nr_cqe: number of CQEs to allocate * @comp_vector: HCA completion vectors for this CQ * @poll_ctx: context to poll the CQ from. * @caller: module owner name. * * This is the proper interface to allocate a CQ for in-kernel users. A * CQ allocated with this interface will automatically be polled from the * specified context. The ULP must use wr->wr_cqe instead of wr->wr_id * to use this CQ abstraction. */ struct ib_cq *__ib_alloc_cq(struct ib_device *dev, void *private, int nr_cqe, int comp_vector, enum ib_poll_context poll_ctx, const char *caller) { struct ib_cq_init_attr cq_attr = { .cqe = nr_cqe, .comp_vector = comp_vector, }; struct ib_cq *cq; int ret = -ENOMEM; cq = rdma_zalloc_drv_obj(dev, ib_cq); if (!cq) return ERR_PTR(ret); cq->device = dev; cq->cq_context = private; cq->poll_ctx = poll_ctx; atomic_set(&cq->usecnt, 0); cq->comp_vector = comp_vector; cq->wc = kmalloc_array(IB_POLL_BATCH, sizeof(*cq->wc), GFP_KERNEL); if (!cq->wc) goto out_free_cq; rdma_restrack_new(&cq->res, RDMA_RESTRACK_CQ); rdma_restrack_set_name(&cq->res, caller); ret = dev->ops.create_cq(cq, &cq_attr, NULL); if (ret) goto out_free_wc; rdma_dim_init(cq); switch (cq->poll_ctx) { case IB_POLL_DIRECT: cq->comp_handler = ib_cq_completion_direct; break; case IB_POLL_SOFTIRQ: cq->comp_handler = ib_cq_completion_softirq; irq_poll_init(&cq->iop, IB_POLL_BUDGET_IRQ, ib_poll_handler); ib_req_notify_cq(cq, IB_CQ_NEXT_COMP); break; case IB_POLL_WORKQUEUE: case IB_POLL_UNBOUND_WORKQUEUE: cq->comp_handler = ib_cq_completion_workqueue; INIT_WORK(&cq->work, ib_cq_poll_work); ib_req_notify_cq(cq, IB_CQ_NEXT_COMP); cq->comp_wq = (cq->poll_ctx == IB_POLL_WORKQUEUE) ? ib_comp_wq : ib_comp_unbound_wq; break; default: ret = -EINVAL; goto out_destroy_cq; } rdma_restrack_add(&cq->res); trace_cq_alloc(cq, nr_cqe, comp_vector, poll_ctx); return cq; out_destroy_cq: rdma_dim_destroy(cq); cq->device->ops.destroy_cq(cq, NULL); out_free_wc: rdma_restrack_put(&cq->res); kfree(cq->wc); out_free_cq: kfree(cq); trace_cq_alloc_error(nr_cqe, comp_vector, poll_ctx, ret); return ERR_PTR(ret); } EXPORT_SYMBOL(__ib_alloc_cq); /** * __ib_alloc_cq_any - allocate a completion queue * @dev: device to allocate the CQ for * @private: driver private data, accessible from cq->cq_context * @nr_cqe: number of CQEs to allocate * @poll_ctx: context to poll the CQ from * @caller: module owner name * * Attempt to spread ULP Completion Queues over each device's interrupt * vectors. A simple best-effort mechanism is used. */ struct ib_cq *__ib_alloc_cq_any(struct ib_device *dev, void *private, int nr_cqe, enum ib_poll_context poll_ctx, const char *caller) { static atomic_t counter; int comp_vector = 0; if (dev->num_comp_vectors > 1) comp_vector = atomic_inc_return(&counter) % min_t(int, dev->num_comp_vectors, num_online_cpus()); return __ib_alloc_cq(dev, private, nr_cqe, comp_vector, poll_ctx, caller); } EXPORT_SYMBOL(__ib_alloc_cq_any); /** * ib_free_cq - free a completion queue * @cq: completion queue to free. */ void ib_free_cq(struct ib_cq *cq) { int ret; if (WARN_ON_ONCE(atomic_read(&cq->usecnt))) return; if (WARN_ON_ONCE(cq->cqe_used)) return; switch (cq->poll_ctx) { case IB_POLL_DIRECT: break; case IB_POLL_SOFTIRQ: irq_poll_disable(&cq->iop); break; case IB_POLL_WORKQUEUE: case IB_POLL_UNBOUND_WORKQUEUE: cancel_work_sync(&cq->work); break; default: WARN_ON_ONCE(1); } rdma_dim_destroy(cq); trace_cq_free(cq); ret = cq->device->ops.destroy_cq(cq, NULL); WARN_ONCE(ret, "Destroy of kernel CQ shouldn't fail"); rdma_restrack_del(&cq->res); kfree(cq->wc); kfree(cq); } EXPORT_SYMBOL(ib_free_cq); void ib_cq_pool_cleanup(struct ib_device *dev) { struct ib_cq *cq, *n; unsigned int i; for (i = 0; i < ARRAY_SIZE(dev->cq_pools); i++) { list_for_each_entry_safe(cq, n, &dev->cq_pools[i], pool_entry) { WARN_ON(cq->cqe_used); list_del(&cq->pool_entry); cq->shared = false; ib_free_cq(cq); } } } static int ib_alloc_cqs(struct ib_device *dev, unsigned int nr_cqes, enum ib_poll_context poll_ctx) { LIST_HEAD(tmp_list); unsigned int nr_cqs, i; struct ib_cq *cq, *n; int ret; if (poll_ctx > IB_POLL_LAST_POOL_TYPE) { WARN_ON_ONCE(poll_ctx > IB_POLL_LAST_POOL_TYPE); return -EINVAL; } /* * Allocate at least as many CQEs as requested, and otherwise * a reasonable batch size so that we can share CQs between * multiple users instead of allocating a larger number of CQs. */ nr_cqes = min_t(unsigned int, dev->attrs.max_cqe, max(nr_cqes, IB_MAX_SHARED_CQ_SZ)); nr_cqs = min_t(unsigned int, dev->num_comp_vectors, num_online_cpus()); for (i = 0; i < nr_cqs; i++) { cq = ib_alloc_cq(dev, NULL, nr_cqes, i, poll_ctx); if (IS_ERR(cq)) { ret = PTR_ERR(cq); goto out_free_cqs; } cq->shared = true; list_add_tail(&cq->pool_entry, &tmp_list); } spin_lock_irq(&dev->cq_pools_lock); list_splice(&tmp_list, &dev->cq_pools[poll_ctx]); spin_unlock_irq(&dev->cq_pools_lock); return 0; out_free_cqs: list_for_each_entry_safe(cq, n, &tmp_list, pool_entry) { cq->shared = false; ib_free_cq(cq); } return ret; } /** * ib_cq_pool_get() - Find the least used completion queue that matches * a given cpu hint (or least used for wild card affinity) and fits * nr_cqe. * @dev: rdma device * @nr_cqe: number of needed cqe entries * @comp_vector_hint: completion vector hint (-1) for the driver to assign * a comp vector based on internal counter * @poll_ctx: cq polling context * * Finds a cq that satisfies @comp_vector_hint and @nr_cqe requirements and * claim entries in it for us. In case there is no available cq, allocate * a new cq with the requirements and add it to the device pool. * IB_POLL_DIRECT cannot be used for shared cqs so it is not a valid value * for @poll_ctx. */ struct ib_cq *ib_cq_pool_get(struct ib_device *dev, unsigned int nr_cqe, int comp_vector_hint, enum ib_poll_context poll_ctx) { static unsigned int default_comp_vector; unsigned int vector, num_comp_vectors; struct ib_cq *cq, *found = NULL; int ret; if (poll_ctx > IB_POLL_LAST_POOL_TYPE) { WARN_ON_ONCE(poll_ctx > IB_POLL_LAST_POOL_TYPE); return ERR_PTR(-EINVAL); } num_comp_vectors = min_t(unsigned int, dev->num_comp_vectors, num_online_cpus()); /* Project the affinty to the device completion vector range */ if (comp_vector_hint < 0) { comp_vector_hint = (READ_ONCE(default_comp_vector) + 1) % num_comp_vectors; WRITE_ONCE(default_comp_vector, comp_vector_hint); } vector = comp_vector_hint % num_comp_vectors; /* * Find the least used CQ with correct affinity and * enough free CQ entries */ while (!found) { spin_lock_irq(&dev->cq_pools_lock); list_for_each_entry(cq, &dev->cq_pools[poll_ctx], pool_entry) { /* * Check to see if we have found a CQ with the * correct completion vector */ if (vector != cq->comp_vector) continue; if (cq->cqe_used + nr_cqe > cq->cqe) continue; found = cq; break; } if (found) { found->cqe_used += nr_cqe; spin_unlock_irq(&dev->cq_pools_lock); return found; } spin_unlock_irq(&dev->cq_pools_lock); /* * Didn't find a match or ran out of CQs in the device * pool, allocate a new array of CQs. */ ret = ib_alloc_cqs(dev, nr_cqe, poll_ctx); if (ret) return ERR_PTR(ret); } return found; } EXPORT_SYMBOL(ib_cq_pool_get); /** * ib_cq_pool_put - Return a CQ taken from a shared pool. * @cq: The CQ to return. * @nr_cqe: The max number of cqes that the user had requested. */ void ib_cq_pool_put(struct ib_cq *cq, unsigned int nr_cqe) { if (WARN_ON_ONCE(nr_cqe > cq->cqe_used)) return; spin_lock_irq(&cq->device->cq_pools_lock); cq->cqe_used -= nr_cqe; spin_unlock_irq(&cq->device->cq_pools_lock); } EXPORT_SYMBOL(ib_cq_pool_put);
1032 616 1 1757 1035 1035 1032 6 2 1035 1036 2 1035 236 1313 605 1311 1043 783 782 784 784 230 564 558 1044 237 237 234 1265 1265 1048 617 549 492 1046 1269 1266 615 616 3 114 113 114 157 156 157 5 5 5 261 92 257 91 89 40 96 94 95 95 32 73 7 215 1163 1162 44 13 11 3 4 13 661 2 107 570 67 67 66 67 66 67 67 67 69 766 768 70 1724 59 2 129 4 56 193 4014 3 1685 5249 3978 52 122 88 65 13 104 17 65 70 112 113 18 18 4015 4011 247 3798 4014 3 3973 63 4031 4012 1765 1774 113 1669 1742 1750 39 39 33 14 13 115 142 39 1401 1411 1374 1377 118 119 139 140 132 139 138 140 135 136 134 3 7 56 85 134 58 42 111 134 85 85 6 3 4 2 3 77 54 76 55 132 131 2 132 6 140 102 245 247 142 13 133 127 126 97 97 33 126 112 16 127 123 26 2 32 80 11 5 89 39 27 89 100 101 7 10 2 32 68 2 98 9 21 15 86 98 99 15 87 81 29 97 127 127 126 37 5 93 618 616 618 155 73 2 175 33 156 172 173 106 173 173 174 8 174 33 156 166 165 18 159 22 116 163 163 163 159 18 158 97 154 159 33 33 18 9 2 8 56 56 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 /* * mm/rmap.c - physical to virtual reverse mappings * * Copyright 2001, Rik van Riel <riel@conectiva.com.br> * Released under the General Public License (GPL). * * Simple, low overhead reverse mapping scheme. * Please try to keep this thing as modular as possible. * * Provides methods for unmapping each kind of mapped page: * the anon methods track anonymous pages, and * the file methods track pages belonging to an inode. * * Original design by Rik van Riel <riel@conectiva.com.br> 2001 * File methods by Dave McCracken <dmccr@us.ibm.com> 2003, 2004 * Anonymous methods by Andrea Arcangeli <andrea@suse.de> 2004 * Contributions by Hugh Dickins 2003, 2004 */ /* * Lock ordering in mm: * * inode->i_rwsem (while writing or truncating, not reading or faulting) * mm->mmap_lock * mapping->invalidate_lock (in filemap_fault) * folio_lock * hugetlbfs_i_mmap_rwsem_key (in huge_pmd_share, see hugetlbfs below) * vma_start_write * mapping->i_mmap_rwsem * anon_vma->rwsem * mm->page_table_lock or pte_lock * swap_lock (in swap_duplicate, swap_info_get) * mmlist_lock (in mmput, drain_mmlist and others) * mapping->private_lock (in block_dirty_folio) * i_pages lock (widely used) * lruvec->lru_lock (in folio_lruvec_lock_irq) * inode->i_lock (in set_page_dirty's __mark_inode_dirty) * bdi.wb->list_lock (in set_page_dirty's __mark_inode_dirty) * sb_lock (within inode_lock in fs/fs-writeback.c) * i_pages lock (widely used, in set_page_dirty, * in arch-dependent flush_dcache_mmap_lock, * within bdi.wb->list_lock in __sync_single_inode) * * anon_vma->rwsem,mapping->i_mmap_rwsem (memory_failure, collect_procs_anon) * ->tasklist_lock * pte map lock * * hugetlbfs PageHuge() take locks in this order: * hugetlb_fault_mutex (hugetlbfs specific page fault mutex) * vma_lock (hugetlb specific lock for pmd_sharing) * mapping->i_mmap_rwsem (also used for hugetlb pmd sharing) * folio_lock */ #include <linux/mm.h> #include <linux/sched/mm.h> #include <linux/sched/task.h> #include <linux/pagemap.h> #include <linux/swap.h> #include <linux/swapops.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/ksm.h> #include <linux/rmap.h> #include <linux/rcupdate.h> #include <linux/export.h> #include <linux/memcontrol.h> #include <linux/mmu_notifier.h> #include <linux/migrate.h> #include <linux/hugetlb.h> #include <linux/huge_mm.h> #include <linux/backing-dev.h> #include <linux/page_idle.h> #include <linux/memremap.h> #include <linux/userfaultfd_k.h> #include <linux/mm_inline.h> #include <linux/oom.h> #include <asm/tlbflush.h> #define CREATE_TRACE_POINTS #include <trace/events/tlb.h> #include <trace/events/migrate.h> #include "internal.h" static struct kmem_cache *anon_vma_cachep; static struct kmem_cache *anon_vma_chain_cachep; static inline struct anon_vma *anon_vma_alloc(void) { struct anon_vma *anon_vma; anon_vma = kmem_cache_alloc(anon_vma_cachep, GFP_KERNEL); if (anon_vma) { atomic_set(&anon_vma->refcount, 1); anon_vma->num_children = 0; anon_vma->num_active_vmas = 0; anon_vma->parent = anon_vma; /* * Initialise the anon_vma root to point to itself. If called * from fork, the root will be reset to the parents anon_vma. */ anon_vma->root = anon_vma; } return anon_vma; } static inline void anon_vma_free(struct anon_vma *anon_vma) { VM_BUG_ON(atomic_read(&anon_vma->refcount)); /* * Synchronize against folio_lock_anon_vma_read() such that * we can safely hold the lock without the anon_vma getting * freed. * * Relies on the full mb implied by the atomic_dec_and_test() from * put_anon_vma() against the acquire barrier implied by * down_read_trylock() from folio_lock_anon_vma_read(). This orders: * * folio_lock_anon_vma_read() VS put_anon_vma() * down_read_trylock() atomic_dec_and_test() * LOCK MB * atomic_read() rwsem_is_locked() * * LOCK should suffice since the actual taking of the lock must * happen _before_ what follows. */ might_sleep(); if (rwsem_is_locked(&anon_vma->root->rwsem)) { anon_vma_lock_write(anon_vma); anon_vma_unlock_write(anon_vma); } kmem_cache_free(anon_vma_cachep, anon_vma); } static inline struct anon_vma_chain *anon_vma_chain_alloc(gfp_t gfp) { return kmem_cache_alloc(anon_vma_chain_cachep, gfp); } static void anon_vma_chain_free(struct anon_vma_chain *anon_vma_chain) { kmem_cache_free(anon_vma_chain_cachep, anon_vma_chain); } static void anon_vma_chain_link(struct vm_area_struct *vma, struct anon_vma_chain *avc, struct anon_vma *anon_vma) { avc->vma = vma; avc->anon_vma = anon_vma; list_add(&avc->same_vma, &vma->anon_vma_chain); anon_vma_interval_tree_insert(avc, &anon_vma->rb_root); } /** * __anon_vma_prepare - attach an anon_vma to a memory region * @vma: the memory region in question * * This makes sure the memory mapping described by 'vma' has * an 'anon_vma' attached to it, so that we can associate the * anonymous pages mapped into it with that anon_vma. * * The common case will be that we already have one, which * is handled inline by anon_vma_prepare(). But if * not we either need to find an adjacent mapping that we * can re-use the anon_vma from (very common when the only * reason for splitting a vma has been mprotect()), or we * allocate a new one. * * Anon-vma allocations are very subtle, because we may have * optimistically looked up an anon_vma in folio_lock_anon_vma_read() * and that may actually touch the rwsem even in the newly * allocated vma (it depends on RCU to make sure that the * anon_vma isn't actually destroyed). * * As a result, we need to do proper anon_vma locking even * for the new allocation. At the same time, we do not want * to do any locking for the common case of already having * an anon_vma. */ int __anon_vma_prepare(struct vm_area_struct *vma) { struct mm_struct *mm = vma->vm_mm; struct anon_vma *anon_vma, *allocated; struct anon_vma_chain *avc; mmap_assert_locked(mm); might_sleep(); avc = anon_vma_chain_alloc(GFP_KERNEL); if (!avc) goto out_enomem; anon_vma = find_mergeable_anon_vma(vma); allocated = NULL; if (!anon_vma) { anon_vma = anon_vma_alloc(); if (unlikely(!anon_vma)) goto out_enomem_free_avc; anon_vma->num_children++; /* self-parent link for new root */ allocated = anon_vma; } anon_vma_lock_write(anon_vma); /* page_table_lock to protect against threads */ spin_lock(&mm->page_table_lock); if (likely(!vma->anon_vma)) { vma->anon_vma = anon_vma; anon_vma_chain_link(vma, avc, anon_vma); anon_vma->num_active_vmas++; allocated = NULL; avc = NULL; } spin_unlock(&mm->page_table_lock); anon_vma_unlock_write(anon_vma); if (unlikely(allocated)) put_anon_vma(allocated); if (unlikely(avc)) anon_vma_chain_free(avc); return 0; out_enomem_free_avc: anon_vma_chain_free(avc); out_enomem: return -ENOMEM; } /* * This is a useful helper function for locking the anon_vma root as * we traverse the vma->anon_vma_chain, looping over anon_vma's that * have the same vma. * * Such anon_vma's should have the same root, so you'd expect to see * just a single mutex_lock for the whole traversal. */ static inline struct anon_vma *lock_anon_vma_root(struct anon_vma *root, struct anon_vma *anon_vma) { struct anon_vma *new_root = anon_vma->root; if (new_root != root) { if (WARN_ON_ONCE(root)) up_write(&root->rwsem); root = new_root; down_write(&root->rwsem); } return root; } static inline void unlock_anon_vma_root(struct anon_vma *root) { if (root) up_write(&root->rwsem); } /* * Attach the anon_vmas from src to dst. * Returns 0 on success, -ENOMEM on failure. * * anon_vma_clone() is called by vma_expand(), vma_merge(), __split_vma(), * copy_vma() and anon_vma_fork(). The first four want an exact copy of src, * while the last one, anon_vma_fork(), may try to reuse an existing anon_vma to * prevent endless growth of anon_vma. Since dst->anon_vma is set to NULL before * call, we can identify this case by checking (!dst->anon_vma && * src->anon_vma). * * If (!dst->anon_vma && src->anon_vma) is true, this function tries to find * and reuse existing anon_vma which has no vmas and only one child anon_vma. * This prevents degradation of anon_vma hierarchy to endless linear chain in * case of constantly forking task. On the other hand, an anon_vma with more * than one child isn't reused even if there was no alive vma, thus rmap * walker has a good chance of avoiding scanning the whole hierarchy when it * searches where page is mapped. */ int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src) { struct anon_vma_chain *avc, *pavc; struct anon_vma *root = NULL; list_for_each_entry_reverse(pavc, &src->anon_vma_chain, same_vma) { struct anon_vma *anon_vma; avc = anon_vma_chain_alloc(GFP_NOWAIT | __GFP_NOWARN); if (unlikely(!avc)) { unlock_anon_vma_root(root); root = NULL; avc = anon_vma_chain_alloc(GFP_KERNEL); if (!avc) goto enomem_failure; } anon_vma = pavc->anon_vma; root = lock_anon_vma_root(root, anon_vma); anon_vma_chain_link(dst, avc, anon_vma); /* * Reuse existing anon_vma if it has no vma and only one * anon_vma child. * * Root anon_vma is never reused: * it has self-parent reference and at least one child. */ if (!dst->anon_vma && src->anon_vma && anon_vma->num_children < 2 && anon_vma->num_active_vmas == 0) dst->anon_vma = anon_vma; } if (dst->anon_vma) dst->anon_vma->num_active_vmas++; unlock_anon_vma_root(root); return 0; enomem_failure: /* * dst->anon_vma is dropped here otherwise its num_active_vmas can * be incorrectly decremented in unlink_anon_vmas(). * We can safely do this because callers of anon_vma_clone() don't care * about dst->anon_vma if anon_vma_clone() failed. */ dst->anon_vma = NULL; unlink_anon_vmas(dst); return -ENOMEM; } /* * Attach vma to its own anon_vma, as well as to the anon_vmas that * the corresponding VMA in the parent process is attached to. * Returns 0 on success, non-zero on failure. */ int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma) { struct anon_vma_chain *avc; struct anon_vma *anon_vma; int error; /* Don't bother if the parent process has no anon_vma here. */ if (!pvma->anon_vma) return 0; /* Drop inherited anon_vma, we'll reuse existing or allocate new. */ vma->anon_vma = NULL; /* * First, attach the new VMA to the parent VMA's anon_vmas, * so rmap can find non-COWed pages in child processes. */ error = anon_vma_clone(vma, pvma); if (error) return error; /* An existing anon_vma has been reused, all done then. */ if (vma->anon_vma) return 0; /* Then add our own anon_vma. */ anon_vma = anon_vma_alloc(); if (!anon_vma) goto out_error; anon_vma->num_active_vmas++; avc = anon_vma_chain_alloc(GFP_KERNEL); if (!avc) goto out_error_free_anon_vma; /* * The root anon_vma's rwsem is the lock actually used when we * lock any of the anon_vmas in this anon_vma tree. */ anon_vma->root = pvma->anon_vma->root; anon_vma->parent = pvma->anon_vma; /* * With refcounts, an anon_vma can stay around longer than the * process it belongs to. The root anon_vma needs to be pinned until * this anon_vma is freed, because the lock lives in the root. */ get_anon_vma(anon_vma->root); /* Mark this anon_vma as the one where our new (COWed) pages go. */ vma->anon_vma = anon_vma; anon_vma_lock_write(anon_vma); anon_vma_chain_link(vma, avc, anon_vma); anon_vma->parent->num_children++; anon_vma_unlock_write(anon_vma); return 0; out_error_free_anon_vma: put_anon_vma(anon_vma); out_error: unlink_anon_vmas(vma); return -ENOMEM; } void unlink_anon_vmas(struct vm_area_struct *vma) { struct anon_vma_chain *avc, *next; struct anon_vma *root = NULL; /* * Unlink each anon_vma chained to the VMA. This list is ordered * from newest to oldest, ensuring the root anon_vma gets freed last. */ list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) { struct anon_vma *anon_vma = avc->anon_vma; root = lock_anon_vma_root(root, anon_vma); anon_vma_interval_tree_remove(avc, &anon_vma->rb_root); /* * Leave empty anon_vmas on the list - we'll need * to free them outside the lock. */ if (RB_EMPTY_ROOT(&anon_vma->rb_root.rb_root)) { anon_vma->parent->num_children--; continue; } list_del(&avc->same_vma); anon_vma_chain_free(avc); } if (vma->anon_vma) { vma->anon_vma->num_active_vmas--; /* * vma would still be needed after unlink, and anon_vma will be prepared * when handle fault. */ vma->anon_vma = NULL; } unlock_anon_vma_root(root); /* * Iterate the list once more, it now only contains empty and unlinked * anon_vmas, destroy them. Could not do before due to __put_anon_vma() * needing to write-acquire the anon_vma->root->rwsem. */ list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) { struct anon_vma *anon_vma = avc->anon_vma; VM_WARN_ON(anon_vma->num_children); VM_WARN_ON(anon_vma->num_active_vmas); put_anon_vma(anon_vma); list_del(&avc->same_vma); anon_vma_chain_free(avc); } } static void anon_vma_ctor(void *data) { struct anon_vma *anon_vma = data; init_rwsem(&anon_vma->rwsem); atomic_set(&anon_vma->refcount, 0); anon_vma->rb_root = RB_ROOT_CACHED; } void __init anon_vma_init(void) { anon_vma_cachep = kmem_cache_create("anon_vma", sizeof(struct anon_vma), 0, SLAB_TYPESAFE_BY_RCU|SLAB_PANIC|SLAB_ACCOUNT, anon_vma_ctor); anon_vma_chain_cachep = KMEM_CACHE(anon_vma_chain, SLAB_PANIC|SLAB_ACCOUNT); } /* * Getting a lock on a stable anon_vma from a page off the LRU is tricky! * * Since there is no serialization what so ever against folio_remove_rmap_*() * the best this function can do is return a refcount increased anon_vma * that might have been relevant to this page. * * The page might have been remapped to a different anon_vma or the anon_vma * returned may already be freed (and even reused). * * In case it was remapped to a different anon_vma, the new anon_vma will be a * child of the old anon_vma, and the anon_vma lifetime rules will therefore * ensure that any anon_vma obtained from the page will still be valid for as * long as we observe page_mapped() [ hence all those page_mapped() tests ]. * * All users of this function must be very careful when walking the anon_vma * chain and verify that the page in question is indeed mapped in it * [ something equivalent to page_mapped_in_vma() ]. * * Since anon_vma's slab is SLAB_TYPESAFE_BY_RCU and we know from * folio_remove_rmap_*() that the anon_vma pointer from page->mapping is valid * if there is a mapcount, we can dereference the anon_vma after observing * those. * * NOTE: the caller should normally hold folio lock when calling this. If * not, the caller needs to double check the anon_vma didn't change after * taking the anon_vma lock for either read or write (UFFDIO_MOVE can modify it * concurrently without folio lock protection). See folio_lock_anon_vma_read() * which has already covered that, and comment above remap_pages(). */ struct anon_vma *folio_get_anon_vma(const struct folio *folio) { struct anon_vma *anon_vma = NULL; unsigned long anon_mapping; rcu_read_lock(); anon_mapping = (unsigned long)READ_ONCE(folio->mapping); if ((anon_mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON) goto out; if (!folio_mapped(folio)) goto out; anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON); if (!atomic_inc_not_zero(&anon_vma->refcount)) { anon_vma = NULL; goto out; } /* * If this folio is still mapped, then its anon_vma cannot have been * freed. But if it has been unmapped, we have no security against the * anon_vma structure being freed and reused (for another anon_vma: * SLAB_TYPESAFE_BY_RCU guarantees that - so the atomic_inc_not_zero() * above cannot corrupt). */ if (!folio_mapped(folio)) { rcu_read_unlock(); put_anon_vma(anon_vma); return NULL; } out: rcu_read_unlock(); return anon_vma; } /* * Similar to folio_get_anon_vma() except it locks the anon_vma. * * Its a little more complex as it tries to keep the fast path to a single * atomic op -- the trylock. If we fail the trylock, we fall back to getting a * reference like with folio_get_anon_vma() and then block on the mutex * on !rwc->try_lock case. */ struct anon_vma *folio_lock_anon_vma_read(const struct folio *folio, struct rmap_walk_control *rwc) { struct anon_vma *anon_vma = NULL; struct anon_vma *root_anon_vma; unsigned long anon_mapping; retry: rcu_read_lock(); anon_mapping = (unsigned long)READ_ONCE(folio->mapping); if ((anon_mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON) goto out; if (!folio_mapped(folio)) goto out; anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON); root_anon_vma = READ_ONCE(anon_vma->root); if (down_read_trylock(&root_anon_vma->rwsem)) { /* * folio_move_anon_rmap() might have changed the anon_vma as we * might not hold the folio lock here. */ if (unlikely((unsigned long)READ_ONCE(folio->mapping) != anon_mapping)) { up_read(&root_anon_vma->rwsem); rcu_read_unlock(); goto retry; } /* * If the folio is still mapped, then this anon_vma is still * its anon_vma, and holding the mutex ensures that it will * not go away, see anon_vma_free(). */ if (!folio_mapped(folio)) { up_read(&root_anon_vma->rwsem); anon_vma = NULL; } goto out; } if (rwc && rwc->try_lock) { anon_vma = NULL; rwc->contended = true; goto out; } /* trylock failed, we got to sleep */ if (!atomic_inc_not_zero(&anon_vma->refcount)) { anon_vma = NULL; goto out; } if (!folio_mapped(folio)) { rcu_read_unlock(); put_anon_vma(anon_vma); return NULL; } /* we pinned the anon_vma, its safe to sleep */ rcu_read_unlock(); anon_vma_lock_read(anon_vma); /* * folio_move_anon_rmap() might have changed the anon_vma as we might * not hold the folio lock here. */ if (unlikely((unsigned long)READ_ONCE(folio->mapping) != anon_mapping)) { anon_vma_unlock_read(anon_vma); put_anon_vma(anon_vma); anon_vma = NULL; goto retry; } if (atomic_dec_and_test(&anon_vma->refcount)) { /* * Oops, we held the last refcount, release the lock * and bail -- can't simply use put_anon_vma() because * we'll deadlock on the anon_vma_lock_write() recursion. */ anon_vma_unlock_read(anon_vma); __put_anon_vma(anon_vma); anon_vma = NULL; } return anon_vma; out: rcu_read_unlock(); return anon_vma; } #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH /* * Flush TLB entries for recently unmapped pages from remote CPUs. It is * important if a PTE was dirty when it was unmapped that it's flushed * before any IO is initiated on the page to prevent lost writes. Similarly, * it must be flushed before freeing to prevent data leakage. */ void try_to_unmap_flush(void) { struct tlbflush_unmap_batch *tlb_ubc = &current->tlb_ubc; if (!tlb_ubc->flush_required) return; arch_tlbbatch_flush(&tlb_ubc->arch); tlb_ubc->flush_required = false; tlb_ubc->writable = false; } /* Flush iff there are potentially writable TLB entries that can race with IO */ void try_to_unmap_flush_dirty(void) { struct tlbflush_unmap_batch *tlb_ubc = &current->tlb_ubc; if (tlb_ubc->writable) try_to_unmap_flush(); } /* * Bits 0-14 of mm->tlb_flush_batched record pending generations. * Bits 16-30 of mm->tlb_flush_batched bit record flushed generations. */ #define TLB_FLUSH_BATCH_FLUSHED_SHIFT 16 #define TLB_FLUSH_BATCH_PENDING_MASK \ ((1 << (TLB_FLUSH_BATCH_FLUSHED_SHIFT - 1)) - 1) #define TLB_FLUSH_BATCH_PENDING_LARGE \ (TLB_FLUSH_BATCH_PENDING_MASK / 2) static void set_tlb_ubc_flush_pending(struct mm_struct *mm, pte_t pteval, unsigned long uaddr) { struct tlbflush_unmap_batch *tlb_ubc = &current->tlb_ubc; int batch; bool writable = pte_dirty(pteval); if (!pte_accessible(mm, pteval)) return; arch_tlbbatch_add_pending(&tlb_ubc->arch, mm, uaddr); tlb_ubc->flush_required = true; /* * Ensure compiler does not re-order the setting of tlb_flush_batched * before the PTE is cleared. */ barrier(); batch = atomic_read(&mm->tlb_flush_batched); retry: if ((batch & TLB_FLUSH_BATCH_PENDING_MASK) > TLB_FLUSH_BATCH_PENDING_LARGE) { /* * Prevent `pending' from catching up with `flushed' because of * overflow. Reset `pending' and `flushed' to be 1 and 0 if * `pending' becomes large. */ if (!atomic_try_cmpxchg(&mm->tlb_flush_batched, &batch, 1)) goto retry; } else { atomic_inc(&mm->tlb_flush_batched); } /* * If the PTE was dirty then it's best to assume it's writable. The * caller must use try_to_unmap_flush_dirty() or try_to_unmap_flush() * before the page is queued for IO. */ if (writable) tlb_ubc->writable = true; } /* * Returns true if the TLB flush should be deferred to the end of a batch of * unmap operations to reduce IPIs. */ static bool should_defer_flush(struct mm_struct *mm, enum ttu_flags flags) { if (!(flags & TTU_BATCH_FLUSH)) return false; return arch_tlbbatch_should_defer(mm); } /* * Reclaim unmaps pages under the PTL but do not flush the TLB prior to * releasing the PTL if TLB flushes are batched. It's possible for a parallel * operation such as mprotect or munmap to race between reclaim unmapping * the page and flushing the page. If this race occurs, it potentially allows * access to data via a stale TLB entry. Tracking all mm's that have TLB * batching in flight would be expensive during reclaim so instead track * whether TLB batching occurred in the past and if so then do a flush here * if required. This will cost one additional flush per reclaim cycle paid * by the first operation at risk such as mprotect and mumap. * * This must be called under the PTL so that an access to tlb_flush_batched * that is potentially a "reclaim vs mprotect/munmap/etc" race will synchronise * via the PTL. */ void flush_tlb_batched_pending(struct mm_struct *mm) { int batch = atomic_read(&mm->tlb_flush_batched); int pending = batch & TLB_FLUSH_BATCH_PENDING_MASK; int flushed = batch >> TLB_FLUSH_BATCH_FLUSHED_SHIFT; if (pending != flushed) { arch_flush_tlb_batched_pending(mm); /* * If the new TLB flushing is pending during flushing, leave * mm->tlb_flush_batched as is, to avoid losing flushing. */ atomic_cmpxchg(&mm->tlb_flush_batched, batch, pending | (pending << TLB_FLUSH_BATCH_FLUSHED_SHIFT)); } } #else static void set_tlb_ubc_flush_pending(struct mm_struct *mm, pte_t pteval, unsigned long uaddr) { } static bool should_defer_flush(struct mm_struct *mm, enum ttu_flags flags) { return false; } #endif /* CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH */ /** * page_address_in_vma - The virtual address of a page in this VMA. * @folio: The folio containing the page. * @page: The page within the folio. * @vma: The VMA we need to know the address in. * * Calculates the user virtual address of this page in the specified VMA. * It is the caller's responsibililty to check the page is actually * within the VMA. There may not currently be a PTE pointing at this * page, but if a page fault occurs at this address, this is the page * which will be accessed. * * Context: Caller should hold a reference to the folio. Caller should * hold a lock (eg the i_mmap_lock or the mmap_lock) which keeps the * VMA from being altered. * * Return: The virtual address corresponding to this page in the VMA. */ unsigned long page_address_in_vma(const struct folio *folio, const struct page *page, const struct vm_area_struct *vma) { if (folio_test_anon(folio)) { struct anon_vma *page__anon_vma = folio_anon_vma(folio); /* * Note: swapoff's unuse_vma() is more efficient with this * check, and needs it to match anon_vma when KSM is active. */ if (!vma->anon_vma || !page__anon_vma || vma->anon_vma->root != page__anon_vma->root) return -EFAULT; } else if (!vma->vm_file) { return -EFAULT; } else if (vma->vm_file->f_mapping != folio->mapping) { return -EFAULT; } /* KSM folios don't reach here because of the !page__anon_vma check */ return vma_address(vma, page_pgoff(folio, page), 1); } /* * Returns the actual pmd_t* where we expect 'address' to be mapped from, or * NULL if it doesn't exist. No guarantees / checks on what the pmd_t* * represents. */ pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address) { pgd_t *pgd; p4d_t *p4d; pud_t *pud; pmd_t *pmd = NULL; pgd = pgd_offset(mm, address); if (!pgd_present(*pgd)) goto out; p4d = p4d_offset(pgd, address); if (!p4d_present(*p4d)) goto out; pud = pud_offset(p4d, address); if (!pud_present(*pud)) goto out; pmd = pmd_offset(pud, address); out: return pmd; } struct folio_referenced_arg { int mapcount; int referenced; unsigned long vm_flags; struct mem_cgroup *memcg; }; /* * arg: folio_referenced_arg will be passed */ static bool folio_referenced_one(struct folio *folio, struct vm_area_struct *vma, unsigned long address, void *arg) { struct folio_referenced_arg *pra = arg; DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0); int referenced = 0; unsigned long start = address, ptes = 0; while (page_vma_mapped_walk(&pvmw)) { address = pvmw.address; if (vma->vm_flags & VM_LOCKED) { if (!folio_test_large(folio) || !pvmw.pte) { /* Restore the mlock which got missed */ mlock_vma_folio(folio, vma); page_vma_mapped_walk_done(&pvmw); pra->vm_flags |= VM_LOCKED; return false; /* To break the loop */ } /* * For large folio fully mapped to VMA, will * be handled after the pvmw loop. * * For large folio cross VMA boundaries, it's * expected to be picked by page reclaim. But * should skip reference of pages which are in * the range of VM_LOCKED vma. As page reclaim * should just count the reference of pages out * the range of VM_LOCKED vma. */ ptes++; pra->mapcount--; continue; } /* * Skip the non-shared swapbacked folio mapped solely by * the exiting or OOM-reaped process. This avoids redundant * swap-out followed by an immediate unmap. */ if ((!atomic_read(&vma->vm_mm->mm_users) || check_stable_address_space(vma->vm_mm)) && folio_test_anon(folio) && folio_test_swapbacked(folio) && !folio_likely_mapped_shared(folio)) { pra->referenced = -1; page_vma_mapped_walk_done(&pvmw); return false; } if (lru_gen_enabled() && pvmw.pte) { if (lru_gen_look_around(&pvmw)) referenced++; } else if (pvmw.pte) { if (ptep_clear_flush_young_notify(vma, address, pvmw.pte)) referenced++; } else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) { if (pmdp_clear_flush_young_notify(vma, address, pvmw.pmd)) referenced++; } else { /* unexpected pmd-mapped folio? */ WARN_ON_ONCE(1); } pra->mapcount--; } if ((vma->vm_flags & VM_LOCKED) && folio_test_large(folio) && folio_within_vma(folio, vma)) { unsigned long s_align, e_align; s_align = ALIGN_DOWN(start, PMD_SIZE); e_align = ALIGN_DOWN(start + folio_size(folio) - 1, PMD_SIZE); /* folio doesn't cross page table boundary and fully mapped */ if ((s_align == e_align) && (ptes == folio_nr_pages(folio))) { /* Restore the mlock which got missed */ mlock_vma_folio(folio, vma); pra->vm_flags |= VM_LOCKED; return false; /* To break the loop */ } } if (referenced) folio_clear_idle(folio); if (folio_test_clear_young(folio)) referenced++; if (referenced) { pra->referenced++; pra->vm_flags |= vma->vm_flags & ~VM_LOCKED; } if (!pra->mapcount) return false; /* To break the loop */ return true; } static bool invalid_folio_referenced_vma(struct vm_area_struct *vma, void *arg) { struct folio_referenced_arg *pra = arg; struct mem_cgroup *memcg = pra->memcg; /* * Ignore references from this mapping if it has no recency. If the * folio has been used in another mapping, we will catch it; if this * other mapping is already gone, the unmap path will have set the * referenced flag or activated the folio in zap_pte_range(). */ if (!vma_has_recency(vma)) return true; /* * If we are reclaiming on behalf of a cgroup, skip counting on behalf * of references from different cgroups. */ if (memcg && !mm_match_cgroup(vma->vm_mm, memcg)) return true; return false; } /** * folio_referenced() - Test if the folio was referenced. * @folio: The folio to test. * @is_locked: Caller holds lock on the folio. * @memcg: target memory cgroup * @vm_flags: A combination of all the vma->vm_flags which referenced the folio. * * Quick test_and_clear_referenced for all mappings of a folio, * * Return: The number of mappings which referenced the folio. Return -1 if * the function bailed out due to rmap lock contention. */ int folio_referenced(struct folio *folio, int is_locked, struct mem_cgroup *memcg, unsigned long *vm_flags) { bool we_locked = false; struct folio_referenced_arg pra = { .mapcount = folio_mapcount(folio), .memcg = memcg, }; struct rmap_walk_control rwc = { .rmap_one = folio_referenced_one, .arg = (void *)&pra, .anon_lock = folio_lock_anon_vma_read, .try_lock = true, .invalid_vma = invalid_folio_referenced_vma, }; *vm_flags = 0; if (!pra.mapcount) return 0; if (!folio_raw_mapping(folio)) return 0; if (!is_locked && (!folio_test_anon(folio) || folio_test_ksm(folio))) { we_locked = folio_trylock(folio); if (!we_locked) return 1; } rmap_walk(folio, &rwc); *vm_flags = pra.vm_flags; if (we_locked) folio_unlock(folio); return rwc.contended ? -1 : pra.referenced; } static int page_vma_mkclean_one(struct page_vma_mapped_walk *pvmw) { int cleaned = 0; struct vm_area_struct *vma = pvmw->vma; struct mmu_notifier_range range; unsigned long address = pvmw->address; /* * We have to assume the worse case ie pmd for invalidation. Note that * the folio can not be freed from this function. */ mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_PAGE, 0, vma->vm_mm, address, vma_address_end(pvmw)); mmu_notifier_invalidate_range_start(&range); while (page_vma_mapped_walk(pvmw)) { int ret = 0; address = pvmw->address; if (pvmw->pte) { pte_t *pte = pvmw->pte; pte_t entry = ptep_get(pte); if (!pte_dirty(entry) && !pte_write(entry)) continue; flush_cache_page(vma, address, pte_pfn(entry)); entry = ptep_clear_flush(vma, address, pte); entry = pte_wrprotect(entry); entry = pte_mkclean(entry); set_pte_at(vma->vm_mm, address, pte, entry); ret = 1; } else { #ifdef CONFIG_TRANSPARENT_HUGEPAGE pmd_t *pmd = pvmw->pmd; pmd_t entry; if (!pmd_dirty(*pmd) && !pmd_write(*pmd)) continue; flush_cache_range(vma, address, address + HPAGE_PMD_SIZE); entry = pmdp_invalidate(vma, address, pmd); entry = pmd_wrprotect(entry); entry = pmd_mkclean(entry); set_pmd_at(vma->vm_mm, address, pmd, entry); ret = 1; #else /* unexpected pmd-mapped folio? */ WARN_ON_ONCE(1); #endif } if (ret) cleaned++; } mmu_notifier_invalidate_range_end(&range); return cleaned; } static bool page_mkclean_one(struct folio *folio, struct vm_area_struct *vma, unsigned long address, void *arg) { DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, PVMW_SYNC); int *cleaned = arg; *cleaned += page_vma_mkclean_one(&pvmw); return true; } static bool invalid_mkclean_vma(struct vm_area_struct *vma, void *arg) { if (vma->vm_flags & VM_SHARED) return false; return true; } int folio_mkclean(struct folio *folio) { int cleaned = 0; struct address_space *mapping; struct rmap_walk_control rwc = { .arg = (void *)&cleaned, .rmap_one = page_mkclean_one, .invalid_vma = invalid_mkclean_vma, }; BUG_ON(!folio_test_locked(folio)); if (!folio_mapped(folio)) return 0; mapping = folio_mapping(folio); if (!mapping) return 0; rmap_walk(folio, &rwc); return cleaned; } EXPORT_SYMBOL_GPL(folio_mkclean); /** * pfn_mkclean_range - Cleans the PTEs (including PMDs) mapped with range of * [@pfn, @pfn + @nr_pages) at the specific offset (@pgoff) * within the @vma of shared mappings. And since clean PTEs * should also be readonly, write protects them too. * @pfn: start pfn. * @nr_pages: number of physically contiguous pages srarting with @pfn. * @pgoff: page offset that the @pfn mapped with. * @vma: vma that @pfn mapped within. * * Returns the number of cleaned PTEs (including PMDs). */ int pfn_mkclean_range(unsigned long pfn, unsigned long nr_pages, pgoff_t pgoff, struct vm_area_struct *vma) { struct page_vma_mapped_walk pvmw = { .pfn = pfn, .nr_pages = nr_pages, .pgoff = pgoff, .vma = vma, .flags = PVMW_SYNC, }; if (invalid_mkclean_vma(vma, NULL)) return 0; pvmw.address = vma_address(vma, pgoff, nr_pages); VM_BUG_ON_VMA(pvmw.address == -EFAULT, vma); return page_vma_mkclean_one(&pvmw); } static __always_inline unsigned int __folio_add_rmap(struct folio *folio, struct page *page, int nr_pages, enum rmap_level level, int *nr_pmdmapped) { atomic_t *mapped = &folio->_nr_pages_mapped; const int orig_nr_pages = nr_pages; int first = 0, nr = 0; __folio_rmap_sanity_checks(folio, page, nr_pages, level); switch (level) { case RMAP_LEVEL_PTE: if (!folio_test_large(folio)) { nr = atomic_inc_and_test(&folio->_mapcount); break; } do { first += atomic_inc_and_test(&page->_mapcount); } while (page++, --nr_pages > 0); if (first && atomic_add_return_relaxed(first, mapped) < ENTIRELY_MAPPED) nr = first; atomic_add(orig_nr_pages, &folio->_large_mapcount); break; case RMAP_LEVEL_PMD: first = atomic_inc_and_test(&folio->_entire_mapcount); if (first) { nr = atomic_add_return_relaxed(ENTIRELY_MAPPED, mapped); if (likely(nr < ENTIRELY_MAPPED + ENTIRELY_MAPPED)) { *nr_pmdmapped = folio_nr_pages(folio); nr = *nr_pmdmapped - (nr & FOLIO_PAGES_MAPPED); /* Raced ahead of a remove and another add? */ if (unlikely(nr < 0)) nr = 0; } else { /* Raced ahead of a remove of ENTIRELY_MAPPED */ nr = 0; } } atomic_inc(&folio->_large_mapcount); break; } return nr; } /** * folio_move_anon_rmap - move a folio to our anon_vma * @folio: The folio to move to our anon_vma * @vma: The vma the folio belongs to * * When a folio belongs exclusively to one process after a COW event, * that folio can be moved into the anon_vma that belongs to just that * process, so the rmap code will not search the parent or sibling processes. */ void folio_move_anon_rmap(struct folio *folio, struct vm_area_struct *vma) { void *anon_vma = vma->anon_vma; VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); VM_BUG_ON_VMA(!anon_vma, vma); anon_vma += PAGE_MAPPING_ANON; /* * Ensure that anon_vma and the PAGE_MAPPING_ANON bit are written * simultaneously, so a concurrent reader (eg folio_referenced()'s * folio_test_anon()) will not see one without the other. */ WRITE_ONCE(folio->mapping, anon_vma); } /** * __folio_set_anon - set up a new anonymous rmap for a folio * @folio: The folio to set up the new anonymous rmap for. * @vma: VM area to add the folio to. * @address: User virtual address of the mapping * @exclusive: Whether the folio is exclusive to the process. */ static void __folio_set_anon(struct folio *folio, struct vm_area_struct *vma, unsigned long address, bool exclusive) { struct anon_vma *anon_vma = vma->anon_vma; BUG_ON(!anon_vma); /* * If the folio isn't exclusive to this vma, we must use the _oldest_ * possible anon_vma for the folio mapping! */ if (!exclusive) anon_vma = anon_vma->root; /* * page_idle does a lockless/optimistic rmap scan on folio->mapping. * Make sure the compiler doesn't split the stores of anon_vma and * the PAGE_MAPPING_ANON type identifier, otherwise the rmap code * could mistake the mapping for a struct address_space and crash. */ anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON; WRITE_ONCE(folio->mapping, (struct address_space *) anon_vma); folio->index = linear_page_index(vma, address); } /** * __page_check_anon_rmap - sanity check anonymous rmap addition * @folio: The folio containing @page. * @page: the page to check the mapping of * @vma: the vm area in which the mapping is added * @address: the user virtual address mapped */ static void __page_check_anon_rmap(const struct folio *folio, const struct page *page, struct vm_area_struct *vma, unsigned long address) { /* * The page's anon-rmap details (mapping and index) are guaranteed to * be set up correctly at this point. * * We have exclusion against folio_add_anon_rmap_*() because the caller * always holds the page locked. * * We have exclusion against folio_add_new_anon_rmap because those pages * are initially only visible via the pagetables, and the pte is locked * over the call to folio_add_new_anon_rmap. */ VM_BUG_ON_FOLIO(folio_anon_vma(folio)->root != vma->anon_vma->root, folio); VM_BUG_ON_PAGE(page_pgoff(folio, page) != linear_page_index(vma, address), page); } static void __folio_mod_stat(struct folio *folio, int nr, int nr_pmdmapped) { int idx; if (nr) { idx = folio_test_anon(folio) ? NR_ANON_MAPPED : NR_FILE_MAPPED; __lruvec_stat_mod_folio(folio, idx, nr); } if (nr_pmdmapped) { if (folio_test_anon(folio)) { idx = NR_ANON_THPS; __lruvec_stat_mod_folio(folio, idx, nr_pmdmapped); } else { /* NR_*_PMDMAPPED are not maintained per-memcg */ idx = folio_test_swapbacked(folio) ? NR_SHMEM_PMDMAPPED : NR_FILE_PMDMAPPED; __mod_node_page_state(folio_pgdat(folio), idx, nr_pmdmapped); } } } static __always_inline void __folio_add_anon_rmap(struct folio *folio, struct page *page, int nr_pages, struct vm_area_struct *vma, unsigned long address, rmap_t flags, enum rmap_level level) { int i, nr, nr_pmdmapped = 0; VM_WARN_ON_FOLIO(!folio_test_anon(folio), folio); nr = __folio_add_rmap(folio, page, nr_pages, level, &nr_pmdmapped); if (likely(!folio_test_ksm(folio))) __page_check_anon_rmap(folio, page, vma, address); __folio_mod_stat(folio, nr, nr_pmdmapped); if (flags & RMAP_EXCLUSIVE) { switch (level) { case RMAP_LEVEL_PTE: for (i = 0; i < nr_pages; i++) SetPageAnonExclusive(page + i); break; case RMAP_LEVEL_PMD: SetPageAnonExclusive(page); break; } } for (i = 0; i < nr_pages; i++) { struct page *cur_page = page + i; /* While PTE-mapping a THP we have a PMD and a PTE mapping. */ VM_WARN_ON_FOLIO((atomic_read(&cur_page->_mapcount) > 0 || (folio_test_large(folio) && folio_entire_mapcount(folio) > 1)) && PageAnonExclusive(cur_page), folio); } /* * For large folio, only mlock it if it's fully mapped to VMA. It's * not easy to check whether the large folio is fully mapped to VMA * here. Only mlock normal 4K folio and leave page reclaim to handle * large folio. */ if (!folio_test_large(folio)) mlock_vma_folio(folio, vma); } /** * folio_add_anon_rmap_ptes - add PTE mappings to a page range of an anon folio * @folio: The folio to add the mappings to * @page: The first page to add * @nr_pages: The number of pages which will be mapped * @vma: The vm area in which the mappings are added * @address: The user virtual address of the first page to map * @flags: The rmap flags * * The page range of folio is defined by [first_page, first_page + nr_pages) * * The caller needs to hold the page table lock, and the page must be locked in * the anon_vma case: to serialize mapping,index checking after setting, * and to ensure that an anon folio is not being upgraded racily to a KSM folio * (but KSM folios are never downgraded). */ void folio_add_anon_rmap_ptes(struct folio *folio, struct page *page, int nr_pages, struct vm_area_struct *vma, unsigned long address, rmap_t flags) { __folio_add_anon_rmap(folio, page, nr_pages, vma, address, flags, RMAP_LEVEL_PTE); } /** * folio_add_anon_rmap_pmd - add a PMD mapping to a page range of an anon folio * @folio: The folio to add the mapping to * @page: The first page to add * @vma: The vm area in which the mapping is added * @address: The user virtual address of the first page to map * @flags: The rmap flags * * The page range of folio is defined by [first_page, first_page + HPAGE_PMD_NR) * * The caller needs to hold the page table lock, and the page must be locked in * the anon_vma case: to serialize mapping,index checking after setting. */ void folio_add_anon_rmap_pmd(struct folio *folio, struct page *page, struct vm_area_struct *vma, unsigned long address, rmap_t flags) { #ifdef CONFIG_TRANSPARENT_HUGEPAGE __folio_add_anon_rmap(folio, page, HPAGE_PMD_NR, vma, address, flags, RMAP_LEVEL_PMD); #else WARN_ON_ONCE(true); #endif } /** * folio_add_new_anon_rmap - Add mapping to a new anonymous folio. * @folio: The folio to add the mapping to. * @vma: the vm area in which the mapping is added * @address: the user virtual address mapped * @flags: The rmap flags * * Like folio_add_anon_rmap_*() but must only be called on *new* folios. * This means the inc-and-test can be bypassed. * The folio doesn't necessarily need to be locked while it's exclusive * unless two threads map it concurrently. However, the folio must be * locked if it's shared. * * If the folio is pmd-mappable, it is accounted as a THP. */ void folio_add_new_anon_rmap(struct folio *folio, struct vm_area_struct *vma, unsigned long address, rmap_t flags) { const int nr = folio_nr_pages(folio); const bool exclusive = flags & RMAP_EXCLUSIVE; int nr_pmdmapped = 0; VM_WARN_ON_FOLIO(folio_test_hugetlb(folio), folio); VM_WARN_ON_FOLIO(!exclusive && !folio_test_locked(folio), folio); VM_BUG_ON_VMA(address < vma->vm_start || address + (nr << PAGE_SHIFT) > vma->vm_end, vma); /* * VM_DROPPABLE mappings don't swap; instead they're just dropped when * under memory pressure. */ if (!folio_test_swapbacked(folio) && !(vma->vm_flags & VM_DROPPABLE)) __folio_set_swapbacked(folio); __folio_set_anon(folio, vma, address, exclusive); if (likely(!folio_test_large(folio))) { /* increment count (starts at -1) */ atomic_set(&folio->_mapcount, 0); if (exclusive) SetPageAnonExclusive(&folio->page); } else if (!folio_test_pmd_mappable(folio)) { int i; for (i = 0; i < nr; i++) { struct page *page = folio_page(folio, i); /* increment count (starts at -1) */ atomic_set(&page->_mapcount, 0); if (exclusive) SetPageAnonExclusive(page); } /* increment count (starts at -1) */ atomic_set(&folio->_large_mapcount, nr - 1); atomic_set(&folio->_nr_pages_mapped, nr); } else { /* increment count (starts at -1) */ atomic_set(&folio->_entire_mapcount, 0); /* increment count (starts at -1) */ atomic_set(&folio->_large_mapcount, 0); atomic_set(&folio->_nr_pages_mapped, ENTIRELY_MAPPED); if (exclusive) SetPageAnonExclusive(&folio->page); nr_pmdmapped = nr; } __folio_mod_stat(folio, nr, nr_pmdmapped); mod_mthp_stat(folio_order(folio), MTHP_STAT_NR_ANON, 1); } static __always_inline void __folio_add_file_rmap(struct folio *folio, struct page *page, int nr_pages, struct vm_area_struct *vma, enum rmap_level level) { int nr, nr_pmdmapped = 0; VM_WARN_ON_FOLIO(folio_test_anon(folio), folio); nr = __folio_add_rmap(folio, page, nr_pages, level, &nr_pmdmapped); __folio_mod_stat(folio, nr, nr_pmdmapped); /* See comments in folio_add_anon_rmap_*() */ if (!folio_test_large(folio)) mlock_vma_folio(folio, vma); } /** * folio_add_file_rmap_ptes - add PTE mappings to a page range of a folio * @folio: The folio to add the mappings to * @page: The first page to add * @nr_pages: The number of pages that will be mapped using PTEs * @vma: The vm area in which the mappings are added * * The page range of the folio is defined by [page, page + nr_pages) * * The caller needs to hold the page table lock. */ void folio_add_file_rmap_ptes(struct folio *folio, struct page *page, int nr_pages, struct vm_area_struct *vma) { __folio_add_file_rmap(folio, page, nr_pages, vma, RMAP_LEVEL_PTE); } /** * folio_add_file_rmap_pmd - add a PMD mapping to a page range of a folio * @folio: The folio to add the mapping to * @page: The first page to add * @vma: The vm area in which the mapping is added * * The page range of the folio is defined by [page, page + HPAGE_PMD_NR) * * The caller needs to hold the page table lock. */ void folio_add_file_rmap_pmd(struct folio *folio, struct page *page, struct vm_area_struct *vma) { #ifdef CONFIG_TRANSPARENT_HUGEPAGE __folio_add_file_rmap(folio, page, HPAGE_PMD_NR, vma, RMAP_LEVEL_PMD); #else WARN_ON_ONCE(true); #endif } static __always_inline void __folio_remove_rmap(struct folio *folio, struct page *page, int nr_pages, struct vm_area_struct *vma, enum rmap_level level) { atomic_t *mapped = &folio->_nr_pages_mapped; int last = 0, nr = 0, nr_pmdmapped = 0; bool partially_mapped = false; __folio_rmap_sanity_checks(folio, page, nr_pages, level); switch (level) { case RMAP_LEVEL_PTE: if (!folio_test_large(folio)) { nr = atomic_add_negative(-1, &folio->_mapcount); break; } atomic_sub(nr_pages, &folio->_large_mapcount); do { last += atomic_add_negative(-1, &page->_mapcount); } while (page++, --nr_pages > 0); if (last && atomic_sub_return_relaxed(last, mapped) < ENTIRELY_MAPPED) nr = last; partially_mapped = nr && atomic_read(mapped); break; case RMAP_LEVEL_PMD: atomic_dec(&folio->_large_mapcount); last = atomic_add_negative(-1, &folio->_entire_mapcount); if (last) { nr = atomic_sub_return_relaxed(ENTIRELY_MAPPED, mapped); if (likely(nr < ENTIRELY_MAPPED)) { nr_pmdmapped = folio_nr_pages(folio); nr = nr_pmdmapped - (nr & FOLIO_PAGES_MAPPED); /* Raced ahead of another remove and an add? */ if (unlikely(nr < 0)) nr = 0; } else { /* An add of ENTIRELY_MAPPED raced ahead */ nr = 0; } } partially_mapped = nr && nr < nr_pmdmapped; break; } /* * Queue anon large folio for deferred split if at least one page of * the folio is unmapped and at least one page is still mapped. * * Check partially_mapped first to ensure it is a large folio. */ if (partially_mapped && folio_test_anon(folio) && !folio_test_partially_mapped(folio)) deferred_split_folio(folio, true); __folio_mod_stat(folio, -nr, -nr_pmdmapped); /* * It would be tidy to reset folio_test_anon mapping when fully * unmapped, but that might overwrite a racing folio_add_anon_rmap_*() * which increments mapcount after us but sets mapping before us: * so leave the reset to free_pages_prepare, and remember that * it's only reliable while mapped. */ munlock_vma_folio(folio, vma); } /** * folio_remove_rmap_ptes - remove PTE mappings from a page range of a folio * @folio: The folio to remove the mappings from * @page: The first page to remove * @nr_pages: The number of pages that will be removed from the mapping * @vma: The vm area from which the mappings are removed * * The page range of the folio is defined by [page, page + nr_pages) * * The caller needs to hold the page table lock. */ void folio_remove_rmap_ptes(struct folio *folio, struct page *page, int nr_pages, struct vm_area_struct *vma) { __folio_remove_rmap(folio, page, nr_pages, vma, RMAP_LEVEL_PTE); } /** * folio_remove_rmap_pmd - remove a PMD mapping from a page range of a folio * @folio: The folio to remove the mapping from * @page: The first page to remove * @vma: The vm area from which the mapping is removed * * The page range of the folio is defined by [page, page + HPAGE_PMD_NR) * * The caller needs to hold the page table lock. */ void folio_remove_rmap_pmd(struct folio *folio, struct page *page, struct vm_area_struct *vma) { #ifdef CONFIG_TRANSPARENT_HUGEPAGE __folio_remove_rmap(folio, page, HPAGE_PMD_NR, vma, RMAP_LEVEL_PMD); #else WARN_ON_ONCE(true); #endif } /* * @arg: enum ttu_flags will be passed to this argument */ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma, unsigned long address, void *arg) { struct mm_struct *mm = vma->vm_mm; DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0); pte_t pteval; struct page *subpage; bool anon_exclusive, ret = true; struct mmu_notifier_range range; enum ttu_flags flags = (enum ttu_flags)(long)arg; unsigned long pfn; unsigned long hsz = 0; /* * When racing against e.g. zap_pte_range() on another cpu, * in between its ptep_get_and_clear_full() and folio_remove_rmap_*(), * try_to_unmap() may return before page_mapped() has become false, * if page table locking is skipped: use TTU_SYNC to wait for that. */ if (flags & TTU_SYNC) pvmw.flags = PVMW_SYNC; /* * For THP, we have to assume the worse case ie pmd for invalidation. * For hugetlb, it could be much worse if we need to do pud * invalidation in the case of pmd sharing. * * Note that the folio can not be freed in this function as call of * try_to_unmap() must hold a reference on the folio. */ range.end = vma_address_end(&pvmw); mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm, address, range.end); if (folio_test_hugetlb(folio)) { /* * If sharing is possible, start and end will be adjusted * accordingly. */ adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end); /* We need the huge page size for set_huge_pte_at() */ hsz = huge_page_size(hstate_vma(vma)); } mmu_notifier_invalidate_range_start(&range); while (page_vma_mapped_walk(&pvmw)) { /* * If the folio is in an mlock()d vma, we must not swap it out. */ if (!(flags & TTU_IGNORE_MLOCK) && (vma->vm_flags & VM_LOCKED)) { /* Restore the mlock which got missed */ if (!folio_test_large(folio)) mlock_vma_folio(folio, vma); goto walk_abort; } if (!pvmw.pte) { if (unmap_huge_pmd_locked(vma, pvmw.address, pvmw.pmd, folio)) goto walk_done; if (flags & TTU_SPLIT_HUGE_PMD) { /* * We temporarily have to drop the PTL and * restart so we can process the PTE-mapped THP. */ split_huge_pmd_locked(vma, pvmw.address, pvmw.pmd, false, folio); flags &= ~TTU_SPLIT_HUGE_PMD; page_vma_mapped_walk_restart(&pvmw); continue; } } /* Unexpected PMD-mapped THP? */ VM_BUG_ON_FOLIO(!pvmw.pte, folio); pfn = pte_pfn(ptep_get(pvmw.pte)); subpage = folio_page(folio, pfn - folio_pfn(folio)); address = pvmw.address; anon_exclusive = folio_test_anon(folio) && PageAnonExclusive(subpage); if (folio_test_hugetlb(folio)) { bool anon = folio_test_anon(folio); /* * The try_to_unmap() is only passed a hugetlb page * in the case where the hugetlb page is poisoned. */ VM_BUG_ON_PAGE(!PageHWPoison(subpage), subpage); /* * huge_pmd_unshare may unmap an entire PMD page. * There is no way of knowing exactly which PMDs may * be cached for this mm, so we must flush them all. * start/end were already adjusted above to cover this * range. */ flush_cache_range(vma, range.start, range.end); /* * To call huge_pmd_unshare, i_mmap_rwsem must be * held in write mode. Caller needs to explicitly * do this outside rmap routines. * * We also must hold hugetlb vma_lock in write mode. * Lock order dictates acquiring vma_lock BEFORE * i_mmap_rwsem. We can only try lock here and fail * if unsuccessful. */ if (!anon) { VM_BUG_ON(!(flags & TTU_RMAP_LOCKED)); if (!hugetlb_vma_trylock_write(vma)) goto walk_abort; if (huge_pmd_unshare(mm, vma, address, pvmw.pte)) { hugetlb_vma_unlock_write(vma); flush_tlb_range(vma, range.start, range.end); /* * The ref count of the PMD page was * dropped which is part of the way map * counting is done for shared PMDs. * Return 'true' here. When there is * no other sharing, huge_pmd_unshare * returns false and we will unmap the * actual page and drop map count * to zero. */ goto walk_done; } hugetlb_vma_unlock_write(vma); } pteval = huge_ptep_clear_flush(vma, address, pvmw.pte); } else { flush_cache_page(vma, address, pfn); /* Nuke the page table entry. */ if (should_defer_flush(mm, flags)) { /* * We clear the PTE but do not flush so potentially * a remote CPU could still be writing to the folio. * If the entry was previously clean then the * architecture must guarantee that a clear->dirty * transition on a cached TLB entry is written through * and traps if the PTE is unmapped. */ pteval = ptep_get_and_clear(mm, address, pvmw.pte); set_tlb_ubc_flush_pending(mm, pteval, address); } else { pteval = ptep_clear_flush(vma, address, pvmw.pte); } } /* * Now the pte is cleared. If this pte was uffd-wp armed, * we may want to replace a none pte with a marker pte if * it's file-backed, so we don't lose the tracking info. */ pte_install_uffd_wp_if_needed(vma, address, pvmw.pte, pteval); /* Set the dirty flag on the folio now the pte is gone. */ if (pte_dirty(pteval)) folio_mark_dirty(folio); /* Update high watermark before we lower rss */ update_hiwater_rss(mm); if (PageHWPoison(subpage) && (flags & TTU_HWPOISON)) { pteval = swp_entry_to_pte(make_hwpoison_entry(subpage)); if (folio_test_hugetlb(folio)) { hugetlb_count_sub(folio_nr_pages(folio), mm); set_huge_pte_at(mm, address, pvmw.pte, pteval, hsz); } else { dec_mm_counter(mm, mm_counter(folio)); set_pte_at(mm, address, pvmw.pte, pteval); } } else if (pte_unused(pteval) && !userfaultfd_armed(vma)) { /* * The guest indicated that the page content is of no * interest anymore. Simply discard the pte, vmscan * will take care of the rest. * A future reference will then fault in a new zero * page. When userfaultfd is active, we must not drop * this page though, as its main user (postcopy * migration) will not expect userfaults on already * copied pages. */ dec_mm_counter(mm, mm_counter(folio)); } else if (folio_test_anon(folio)) { swp_entry_t entry = page_swap_entry(subpage); pte_t swp_pte; /* * Store the swap location in the pte. * See handle_pte_fault() ... */ if (unlikely(folio_test_swapbacked(folio) != folio_test_swapcache(folio))) { WARN_ON_ONCE(1); goto walk_abort; } /* MADV_FREE page check */ if (!folio_test_swapbacked(folio)) { int ref_count, map_count; /* * Synchronize with gup_pte_range(): * - clear PTE; barrier; read refcount * - inc refcount; barrier; read PTE */ smp_mb(); ref_count = folio_ref_count(folio); map_count = folio_mapcount(folio); /* * Order reads for page refcount and dirty flag * (see comments in __remove_mapping()). */ smp_rmb(); /* * The only page refs must be one from isolation * plus the rmap(s) (dropped by discard:). */ if (ref_count == 1 + map_count && (!folio_test_dirty(folio) || /* * Unlike MADV_FREE mappings, VM_DROPPABLE * ones can be dropped even if they've * been dirtied. */ (vma->vm_flags & VM_DROPPABLE))) { dec_mm_counter(mm, MM_ANONPAGES); goto discard; } /* * If the folio was redirtied, it cannot be * discarded. Remap the page to page table. */ set_pte_at(mm, address, pvmw.pte, pteval); /* * Unlike MADV_FREE mappings, VM_DROPPABLE ones * never get swap backed on failure to drop. */ if (!(vma->vm_flags & VM_DROPPABLE)) folio_set_swapbacked(folio); goto walk_abort; } if (swap_duplicate(entry) < 0) { set_pte_at(mm, address, pvmw.pte, pteval); goto walk_abort; } if (arch_unmap_one(mm, vma, address, pteval) < 0) { swap_free(entry); set_pte_at(mm, address, pvmw.pte, pteval); goto walk_abort; } /* See folio_try_share_anon_rmap(): clear PTE first. */ if (anon_exclusive && folio_try_share_anon_rmap_pte(folio, subpage)) { swap_free(entry); set_pte_at(mm, address, pvmw.pte, pteval); goto walk_abort; } if (list_empty(&mm->mmlist)) { spin_lock(&mmlist_lock); if (list_empty(&mm->mmlist)) list_add(&mm->mmlist, &init_mm.mmlist); spin_unlock(&mmlist_lock); } dec_mm_counter(mm, MM_ANONPAGES); inc_mm_counter(mm, MM_SWAPENTS); swp_pte = swp_entry_to_pte(entry); if (anon_exclusive) swp_pte = pte_swp_mkexclusive(swp_pte); if (pte_soft_dirty(pteval)) swp_pte = pte_swp_mksoft_dirty(swp_pte); if (pte_uffd_wp(pteval)) swp_pte = pte_swp_mkuffd_wp(swp_pte); set_pte_at(mm, address, pvmw.pte, swp_pte); } else { /* * This is a locked file-backed folio, * so it cannot be removed from the page * cache and replaced by a new folio before * mmu_notifier_invalidate_range_end, so no * concurrent thread might update its page table * to point at a new folio while a device is * still using this folio. * * See Documentation/mm/mmu_notifier.rst */ dec_mm_counter(mm, mm_counter_file(folio)); } discard: if (unlikely(folio_test_hugetlb(folio))) hugetlb_remove_rmap(folio); else folio_remove_rmap_pte(folio, subpage, vma); if (vma->vm_flags & VM_LOCKED) mlock_drain_local(); folio_put(folio); continue; walk_abort: ret = false; walk_done: page_vma_mapped_walk_done(&pvmw); break; } mmu_notifier_invalidate_range_end(&range); return ret; } static bool invalid_migration_vma(struct vm_area_struct *vma, void *arg) { return vma_is_temporary_stack(vma); } static int folio_not_mapped(struct folio *folio) { return !folio_mapped(folio); } /** * try_to_unmap - Try to remove all page table mappings to a folio. * @folio: The folio to unmap. * @flags: action and flags * * Tries to remove all the page table entries which are mapping this * folio. It is the caller's responsibility to check if the folio is * still mapped if needed (use TTU_SYNC to prevent accounting races). * * Context: Caller must hold the folio lock. */ void try_to_unmap(struct folio *folio, enum ttu_flags flags) { struct rmap_walk_control rwc = { .rmap_one = try_to_unmap_one, .arg = (void *)flags, .done = folio_not_mapped, .anon_lock = folio_lock_anon_vma_read, }; if (flags & TTU_RMAP_LOCKED) rmap_walk_locked(folio, &rwc); else rmap_walk(folio, &rwc); } /* * @arg: enum ttu_flags will be passed to this argument. * * If TTU_SPLIT_HUGE_PMD is specified any PMD mappings will be split into PTEs * containing migration entries. */ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma, unsigned long address, void *arg) { struct mm_struct *mm = vma->vm_mm; DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0); pte_t pteval; struct page *subpage; bool anon_exclusive, ret = true; struct mmu_notifier_range range; enum ttu_flags flags = (enum ttu_flags)(long)arg; unsigned long pfn; unsigned long hsz = 0; /* * When racing against e.g. zap_pte_range() on another cpu, * in between its ptep_get_and_clear_full() and folio_remove_rmap_*(), * try_to_migrate() may return before page_mapped() has become false, * if page table locking is skipped: use TTU_SYNC to wait for that. */ if (flags & TTU_SYNC) pvmw.flags = PVMW_SYNC; /* * unmap_page() in mm/huge_memory.c is the only user of migration with * TTU_SPLIT_HUGE_PMD and it wants to freeze. */ if (flags & TTU_SPLIT_HUGE_PMD) split_huge_pmd_address(vma, address, true, folio); /* * For THP, we have to assume the worse case ie pmd for invalidation. * For hugetlb, it could be much worse if we need to do pud * invalidation in the case of pmd sharing. * * Note that the page can not be free in this function as call of * try_to_unmap() must hold a reference on the page. */ range.end = vma_address_end(&pvmw); mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm, address, range.end); if (folio_test_hugetlb(folio)) { /* * If sharing is possible, start and end will be adjusted * accordingly. */ adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end); /* We need the huge page size for set_huge_pte_at() */ hsz = huge_page_size(hstate_vma(vma)); } mmu_notifier_invalidate_range_start(&range); while (page_vma_mapped_walk(&pvmw)) { #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION /* PMD-mapped THP migration entry */ if (!pvmw.pte) { subpage = folio_page(folio, pmd_pfn(*pvmw.pmd) - folio_pfn(folio)); VM_BUG_ON_FOLIO(folio_test_hugetlb(folio) || !folio_test_pmd_mappable(folio), folio); if (set_pmd_migration_entry(&pvmw, subpage)) { ret = false; page_vma_mapped_walk_done(&pvmw); break; } continue; } #endif /* Unexpected PMD-mapped THP? */ VM_BUG_ON_FOLIO(!pvmw.pte, folio); pfn = pte_pfn(ptep_get(pvmw.pte)); if (folio_is_zone_device(folio)) { /* * Our PTE is a non-present device exclusive entry and * calculating the subpage as for the common case would * result in an invalid pointer. * * Since only PAGE_SIZE pages can currently be * migrated, just set it to page. This will need to be * changed when hugepage migrations to device private * memory are supported. */ VM_BUG_ON_FOLIO(folio_nr_pages(folio) > 1, folio); subpage = &folio->page; } else { subpage = folio_page(folio, pfn - folio_pfn(folio)); } address = pvmw.address; anon_exclusive = folio_test_anon(folio) && PageAnonExclusive(subpage); if (folio_test_hugetlb(folio)) { bool anon = folio_test_anon(folio); /* * huge_pmd_unshare may unmap an entire PMD page. * There is no way of knowing exactly which PMDs may * be cached for this mm, so we must flush them all. * start/end were already adjusted above to cover this * range. */ flush_cache_range(vma, range.start, range.end); /* * To call huge_pmd_unshare, i_mmap_rwsem must be * held in write mode. Caller needs to explicitly * do this outside rmap routines. * * We also must hold hugetlb vma_lock in write mode. * Lock order dictates acquiring vma_lock BEFORE * i_mmap_rwsem. We can only try lock here and * fail if unsuccessful. */ if (!anon) { VM_BUG_ON(!(flags & TTU_RMAP_LOCKED)); if (!hugetlb_vma_trylock_write(vma)) { page_vma_mapped_walk_done(&pvmw); ret = false; break; } if (huge_pmd_unshare(mm, vma, address, pvmw.pte)) { hugetlb_vma_unlock_write(vma); flush_tlb_range(vma, range.start, range.end); /* * The ref count of the PMD page was * dropped which is part of the way map * counting is done for shared PMDs. * Return 'true' here. When there is * no other sharing, huge_pmd_unshare * returns false and we will unmap the * actual page and drop map count * to zero. */ page_vma_mapped_walk_done(&pvmw); break; } hugetlb_vma_unlock_write(vma); } /* Nuke the hugetlb page table entry */ pteval = huge_ptep_clear_flush(vma, address, pvmw.pte); } else { flush_cache_page(vma, address, pfn); /* Nuke the page table entry. */ if (should_defer_flush(mm, flags)) { /* * We clear the PTE but do not flush so potentially * a remote CPU could still be writing to the folio. * If the entry was previously clean then the * architecture must guarantee that a clear->dirty * transition on a cached TLB entry is written through * and traps if the PTE is unmapped. */ pteval = ptep_get_and_clear(mm, address, pvmw.pte); set_tlb_ubc_flush_pending(mm, pteval, address); } else { pteval = ptep_clear_flush(vma, address, pvmw.pte); } } /* Set the dirty flag on the folio now the pte is gone. */ if (pte_dirty(pteval)) folio_mark_dirty(folio); /* Update high watermark before we lower rss */ update_hiwater_rss(mm); if (folio_is_device_private(folio)) { unsigned long pfn = folio_pfn(folio); swp_entry_t entry; pte_t swp_pte; if (anon_exclusive) WARN_ON_ONCE(folio_try_share_anon_rmap_pte(folio, subpage)); /* * Store the pfn of the page in a special migration * pte. do_swap_page() will wait until the migration * pte is removed and then restart fault handling. */ entry = pte_to_swp_entry(pteval); if (is_writable_device_private_entry(entry)) entry = make_writable_migration_entry(pfn); else if (anon_exclusive) entry = make_readable_exclusive_migration_entry(pfn); else entry = make_readable_migration_entry(pfn); swp_pte = swp_entry_to_pte(entry); /* * pteval maps a zone device page and is therefore * a swap pte. */ if (pte_swp_soft_dirty(pteval)) swp_pte = pte_swp_mksoft_dirty(swp_pte); if (pte_swp_uffd_wp(pteval)) swp_pte = pte_swp_mkuffd_wp(swp_pte); set_pte_at(mm, pvmw.address, pvmw.pte, swp_pte); trace_set_migration_pte(pvmw.address, pte_val(swp_pte), folio_order(folio)); /* * No need to invalidate here it will synchronize on * against the special swap migration pte. */ } else if (PageHWPoison(subpage)) { pteval = swp_entry_to_pte(make_hwpoison_entry(subpage)); if (folio_test_hugetlb(folio)) { hugetlb_count_sub(folio_nr_pages(folio), mm); set_huge_pte_at(mm, address, pvmw.pte, pteval, hsz); } else { dec_mm_counter(mm, mm_counter(folio)); set_pte_at(mm, address, pvmw.pte, pteval); } } else if (pte_unused(pteval) && !userfaultfd_armed(vma)) { /* * The guest indicated that the page content is of no * interest anymore. Simply discard the pte, vmscan * will take care of the rest. * A future reference will then fault in a new zero * page. When userfaultfd is active, we must not drop * this page though, as its main user (postcopy * migration) will not expect userfaults on already * copied pages. */ dec_mm_counter(mm, mm_counter(folio)); } else { swp_entry_t entry; pte_t swp_pte; if (arch_unmap_one(mm, vma, address, pteval) < 0) { if (folio_test_hugetlb(folio)) set_huge_pte_at(mm, address, pvmw.pte, pteval, hsz); else set_pte_at(mm, address, pvmw.pte, pteval); ret = false; page_vma_mapped_walk_done(&pvmw); break; } VM_BUG_ON_PAGE(pte_write(pteval) && folio_test_anon(folio) && !anon_exclusive, subpage); /* See folio_try_share_anon_rmap_pte(): clear PTE first. */ if (folio_test_hugetlb(folio)) { if (anon_exclusive && hugetlb_try_share_anon_rmap(folio)) { set_huge_pte_at(mm, address, pvmw.pte, pteval, hsz); ret = false; page_vma_mapped_walk_done(&pvmw); break; } } else if (anon_exclusive && folio_try_share_anon_rmap_pte(folio, subpage)) { set_pte_at(mm, address, pvmw.pte, pteval); ret = false; page_vma_mapped_walk_done(&pvmw); break; } /* * Store the pfn of the page in a special migration * pte. do_swap_page() will wait until the migration * pte is removed and then restart fault handling. */ if (pte_write(pteval)) entry = make_writable_migration_entry( page_to_pfn(subpage)); else if (anon_exclusive) entry = make_readable_exclusive_migration_entry( page_to_pfn(subpage)); else entry = make_readable_migration_entry( page_to_pfn(subpage)); if (pte_young(pteval)) entry = make_migration_entry_young(entry); if (pte_dirty(pteval)) entry = make_migration_entry_dirty(entry); swp_pte = swp_entry_to_pte(entry); if (pte_soft_dirty(pteval)) swp_pte = pte_swp_mksoft_dirty(swp_pte); if (pte_uffd_wp(pteval)) swp_pte = pte_swp_mkuffd_wp(swp_pte); if (folio_test_hugetlb(folio)) set_huge_pte_at(mm, address, pvmw.pte, swp_pte, hsz); else set_pte_at(mm, address, pvmw.pte, swp_pte); trace_set_migration_pte(address, pte_val(swp_pte), folio_order(folio)); /* * No need to invalidate here it will synchronize on * against the special swap migration pte. */ } if (unlikely(folio_test_hugetlb(folio))) hugetlb_remove_rmap(folio); else folio_remove_rmap_pte(folio, subpage, vma); if (vma->vm_flags & VM_LOCKED) mlock_drain_local(); folio_put(folio); } mmu_notifier_invalidate_range_end(&range); return ret; } /** * try_to_migrate - try to replace all page table mappings with swap entries * @folio: the folio to replace page table entries for * @flags: action and flags * * Tries to remove all the page table entries which are mapping this folio and * replace them with special swap entries. Caller must hold the folio lock. */ void try_to_migrate(struct folio *folio, enum ttu_flags flags) { struct rmap_walk_control rwc = { .rmap_one = try_to_migrate_one, .arg = (void *)flags, .done = folio_not_mapped, .anon_lock = folio_lock_anon_vma_read, }; /* * Migration always ignores mlock and only supports TTU_RMAP_LOCKED and * TTU_SPLIT_HUGE_PMD, TTU_SYNC, and TTU_BATCH_FLUSH flags. */ if (WARN_ON_ONCE(flags & ~(TTU_RMAP_LOCKED | TTU_SPLIT_HUGE_PMD | TTU_SYNC | TTU_BATCH_FLUSH))) return; if (folio_is_zone_device(folio) && (!folio_is_device_private(folio) && !folio_is_device_coherent(folio))) return; /* * During exec, a temporary VMA is setup and later moved. * The VMA is moved under the anon_vma lock but not the * page tables leading to a race where migration cannot * find the migration ptes. Rather than increasing the * locking requirements of exec(), migration skips * temporary VMAs until after exec() completes. */ if (!folio_test_ksm(folio) && folio_test_anon(folio)) rwc.invalid_vma = invalid_migration_vma; if (flags & TTU_RMAP_LOCKED) rmap_walk_locked(folio, &rwc); else rmap_walk(folio, &rwc); } #ifdef CONFIG_DEVICE_PRIVATE struct make_exclusive_args { struct mm_struct *mm; unsigned long address; void *owner; bool valid; }; static bool page_make_device_exclusive_one(struct folio *folio, struct vm_area_struct *vma, unsigned long address, void *priv) { struct mm_struct *mm = vma->vm_mm; DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0); struct make_exclusive_args *args = priv; pte_t pteval; struct page *subpage; bool ret = true; struct mmu_notifier_range range; swp_entry_t entry; pte_t swp_pte; pte_t ptent; mmu_notifier_range_init_owner(&range, MMU_NOTIFY_EXCLUSIVE, 0, vma->vm_mm, address, min(vma->vm_end, address + folio_size(folio)), args->owner); mmu_notifier_invalidate_range_start(&range); while (page_vma_mapped_walk(&pvmw)) { /* Unexpected PMD-mapped THP? */ VM_BUG_ON_FOLIO(!pvmw.pte, folio); ptent = ptep_get(pvmw.pte); if (!pte_present(ptent)) { ret = false; page_vma_mapped_walk_done(&pvmw); break; } subpage = folio_page(folio, pte_pfn(ptent) - folio_pfn(folio)); address = pvmw.address; /* Nuke the page table entry. */ flush_cache_page(vma, address, pte_pfn(ptent)); pteval = ptep_clear_flush(vma, address, pvmw.pte); /* Set the dirty flag on the folio now the pte is gone. */ if (pte_dirty(pteval)) folio_mark_dirty(folio); /* * Check that our target page is still mapped at the expected * address. */ if (args->mm == mm && args->address == address && pte_write(pteval)) args->valid = true; /* * Store the pfn of the page in a special migration * pte. do_swap_page() will wait until the migration * pte is removed and then restart fault handling. */ if (pte_write(pteval)) entry = make_writable_device_exclusive_entry( page_to_pfn(subpage)); else entry = make_readable_device_exclusive_entry( page_to_pfn(subpage)); swp_pte = swp_entry_to_pte(entry); if (pte_soft_dirty(pteval)) swp_pte = pte_swp_mksoft_dirty(swp_pte); if (pte_uffd_wp(pteval)) swp_pte = pte_swp_mkuffd_wp(swp_pte); set_pte_at(mm, address, pvmw.pte, swp_pte); /* * There is a reference on the page for the swap entry which has * been removed, so shouldn't take another. */ folio_remove_rmap_pte(folio, subpage, vma); } mmu_notifier_invalidate_range_end(&range); return ret; } /** * folio_make_device_exclusive - Mark the folio exclusively owned by a device. * @folio: The folio to replace page table entries for. * @mm: The mm_struct where the folio is expected to be mapped. * @address: Address where the folio is expected to be mapped. * @owner: passed to MMU_NOTIFY_EXCLUSIVE range notifier callbacks * * Tries to remove all the page table entries which are mapping this * folio and replace them with special device exclusive swap entries to * grant a device exclusive access to the folio. * * Context: Caller must hold the folio lock. * Return: false if the page is still mapped, or if it could not be unmapped * from the expected address. Otherwise returns true (success). */ static bool folio_make_device_exclusive(struct folio *folio, struct mm_struct *mm, unsigned long address, void *owner) { struct make_exclusive_args args = { .mm = mm, .address = address, .owner = owner, .valid = false, }; struct rmap_walk_control rwc = { .rmap_one = page_make_device_exclusive_one, .done = folio_not_mapped, .anon_lock = folio_lock_anon_vma_read, .arg = &args, }; /* * Restrict to anonymous folios for now to avoid potential writeback * issues. */ if (!folio_test_anon(folio)) return false; rmap_walk(folio, &rwc); return args.valid && !folio_mapcount(folio); } /** * make_device_exclusive_range() - Mark a range for exclusive use by a device * @mm: mm_struct of associated target process * @start: start of the region to mark for exclusive device access * @end: end address of region * @pages: returns the pages which were successfully marked for exclusive access * @owner: passed to MMU_NOTIFY_EXCLUSIVE range notifier to allow filtering * * Returns: number of pages found in the range by GUP. A page is marked for * exclusive access only if the page pointer is non-NULL. * * This function finds ptes mapping page(s) to the given address range, locks * them and replaces mappings with special swap entries preventing userspace CPU * access. On fault these entries are replaced with the original mapping after * calling MMU notifiers. * * A driver using this to program access from a device must use a mmu notifier * critical section to hold a device specific lock during programming. Once * programming is complete it should drop the page lock and reference after * which point CPU access to the page will revoke the exclusive access. */ int make_device_exclusive_range(struct mm_struct *mm, unsigned long start, unsigned long end, struct page **pages, void *owner) { long npages = (end - start) >> PAGE_SHIFT; long i; npages = get_user_pages_remote(mm, start, npages, FOLL_GET | FOLL_WRITE | FOLL_SPLIT_PMD, pages, NULL); if (npages < 0) return npages; for (i = 0; i < npages; i++, start += PAGE_SIZE) { struct folio *folio = page_folio(pages[i]); if (PageTail(pages[i]) || !folio_trylock(folio)) { folio_put(folio); pages[i] = NULL; continue; } if (!folio_make_device_exclusive(folio, mm, start, owner)) { folio_unlock(folio); folio_put(folio); pages[i] = NULL; } } return npages; } EXPORT_SYMBOL_GPL(make_device_exclusive_range); #endif void __put_anon_vma(struct anon_vma *anon_vma) { struct anon_vma *root = anon_vma->root; anon_vma_free(anon_vma); if (root != anon_vma && atomic_dec_and_test(&root->refcount)) anon_vma_free(root); } static struct anon_vma *rmap_walk_anon_lock(const struct folio *folio, struct rmap_walk_control *rwc) { struct anon_vma *anon_vma; if (rwc->anon_lock) return rwc->anon_lock(folio, rwc); /* * Note: remove_migration_ptes() cannot use folio_lock_anon_vma_read() * because that depends on page_mapped(); but not all its usages * are holding mmap_lock. Users without mmap_lock are required to * take a reference count to prevent the anon_vma disappearing */ anon_vma = folio_anon_vma(folio); if (!anon_vma) return NULL; if (anon_vma_trylock_read(anon_vma)) goto out; if (rwc->try_lock) { anon_vma = NULL; rwc->contended = true; goto out; } anon_vma_lock_read(anon_vma); out: return anon_vma; } /* * rmap_walk_anon - do something to anonymous page using the object-based * rmap method * @folio: the folio to be handled * @rwc: control variable according to each walk type * @locked: caller holds relevant rmap lock * * Find all the mappings of a folio using the mapping pointer and the vma * chains contained in the anon_vma struct it points to. */ static void rmap_walk_anon(struct folio *folio, struct rmap_walk_control *rwc, bool locked) { struct anon_vma *anon_vma; pgoff_t pgoff_start, pgoff_end; struct anon_vma_chain *avc; if (locked) { anon_vma = folio_anon_vma(folio); /* anon_vma disappear under us? */ VM_BUG_ON_FOLIO(!anon_vma, folio); } else { anon_vma = rmap_walk_anon_lock(folio, rwc); } if (!anon_vma) return; pgoff_start = folio_pgoff(folio); pgoff_end = pgoff_start + folio_nr_pages(folio) - 1; anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root, pgoff_start, pgoff_end) { struct vm_area_struct *vma = avc->vma; unsigned long address = vma_address(vma, pgoff_start, folio_nr_pages(folio)); VM_BUG_ON_VMA(address == -EFAULT, vma); cond_resched(); if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg)) continue; if (!rwc->rmap_one(folio, vma, address, rwc->arg)) break; if (rwc->done && rwc->done(folio)) break; } if (!locked) anon_vma_unlock_read(anon_vma); } /* * rmap_walk_file - do something to file page using the object-based rmap method * @folio: the folio to be handled * @rwc: control variable according to each walk type * @locked: caller holds relevant rmap lock * * Find all the mappings of a folio using the mapping pointer and the vma chains * contained in the address_space struct it points to. */ static void rmap_walk_file(struct folio *folio, struct rmap_walk_control *rwc, bool locked) { struct address_space *mapping = folio_mapping(folio); pgoff_t pgoff_start, pgoff_end; struct vm_area_struct *vma; /* * The page lock not only makes sure that page->mapping cannot * suddenly be NULLified by truncation, it makes sure that the * structure at mapping cannot be freed and reused yet, * so we can safely take mapping->i_mmap_rwsem. */ VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); if (!mapping) return; pgoff_start = folio_pgoff(folio); pgoff_end = pgoff_start + folio_nr_pages(folio) - 1; if (!locked) { if (i_mmap_trylock_read(mapping)) goto lookup; if (rwc->try_lock) { rwc->contended = true; return; } i_mmap_lock_read(mapping); } lookup: vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff_start, pgoff_end) { unsigned long address = vma_address(vma, pgoff_start, folio_nr_pages(folio)); VM_BUG_ON_VMA(address == -EFAULT, vma); cond_resched(); if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg)) continue; if (!rwc->rmap_one(folio, vma, address, rwc->arg)) goto done; if (rwc->done && rwc->done(folio)) goto done; } done: if (!locked) i_mmap_unlock_read(mapping); } void rmap_walk(struct folio *folio, struct rmap_walk_control *rwc) { if (unlikely(folio_test_ksm(folio))) rmap_walk_ksm(folio, rwc); else if (folio_test_anon(folio)) rmap_walk_anon(folio, rwc, false); else rmap_walk_file(folio, rwc, false); } /* Like rmap_walk, but caller holds relevant rmap lock */ void rmap_walk_locked(struct folio *folio, struct rmap_walk_control *rwc) { /* no ksm support for now */ VM_BUG_ON_FOLIO(folio_test_ksm(folio), folio); if (folio_test_anon(folio)) rmap_walk_anon(folio, rwc, true); else rmap_walk_file(folio, rwc, true); } #ifdef CONFIG_HUGETLB_PAGE /* * The following two functions are for anonymous (private mapped) hugepages. * Unlike common anonymous pages, anonymous hugepages have no accounting code * and no lru code, because we handle hugepages differently from common pages. */ void hugetlb_add_anon_rmap(struct folio *folio, struct vm_area_struct *vma, unsigned long address, rmap_t flags) { VM_WARN_ON_FOLIO(!folio_test_hugetlb(folio), folio); VM_WARN_ON_FOLIO(!folio_test_anon(folio), folio); atomic_inc(&folio->_entire_mapcount); atomic_inc(&folio->_large_mapcount); if (flags & RMAP_EXCLUSIVE) SetPageAnonExclusive(&folio->page); VM_WARN_ON_FOLIO(folio_entire_mapcount(folio) > 1 && PageAnonExclusive(&folio->page), folio); } void hugetlb_add_new_anon_rmap(struct folio *folio, struct vm_area_struct *vma, unsigned long address) { VM_WARN_ON_FOLIO(!folio_test_hugetlb(folio), folio); BUG_ON(address < vma->vm_start || address >= vma->vm_end); /* increment count (starts at -1) */ atomic_set(&folio->_entire_mapcount, 0); atomic_set(&folio->_large_mapcount, 0); folio_clear_hugetlb_restore_reserve(folio); __folio_set_anon(folio, vma, address, true); SetPageAnonExclusive(&folio->page); } #endif /* CONFIG_HUGETLB_PAGE */
48 44 19 49 38 31 70 49 48 41 21 121 121 121 5 45 45 46 39 37 50 121 107 111 4 4 1 3 3 51 45 46 4 4 156 1 4 116 150 4 58 2 57 2 51 51 44 47 47 3 39 43 1 4 38 29 29 28 8 1 12 9 7 1 7 13 1 1 70 70 3 3 3 2 3 3 3 3 3 3 6 6 3 3 4 2 6 6 6 4 3 3 53 53 7 51 5 2 7 6 43 4 37 24 1 39 1 26 39 15 4 12 4 8 3 1 3 2 3 4 1 2 1 4 4 1 3 6 4 2 5 1 24 1 23 53 8 7 3 7 7 7 100 101 91 102 1 1 1 1 1 1 1 1 1 6 5 1 1 67 67 67 67 67 67 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 // SPDX-License-Identifier: GPL-2.0-only /* * * Copyright (C) 2011 Novell Inc. */ #include <linux/fs.h> #include <linux/slab.h> #include <linux/namei.h> #include <linux/file.h> #include <linux/xattr.h> #include <linux/rbtree.h> #include <linux/security.h> #include <linux/cred.h> #include <linux/ratelimit.h> #include "overlayfs.h" struct ovl_cache_entry { unsigned int len; unsigned int type; u64 real_ino; u64 ino; struct list_head l_node; struct rb_node node; struct ovl_cache_entry *next_maybe_whiteout; bool is_upper; bool is_whiteout; bool check_xwhiteout; char name[]; }; struct ovl_dir_cache { long refcount; u64 version; struct list_head entries; struct rb_root root; }; struct ovl_readdir_data { struct dir_context ctx; struct dentry *dentry; bool is_lowest; struct rb_root *root; struct list_head *list; struct list_head middle; struct ovl_cache_entry *first_maybe_whiteout; int count; int err; bool is_upper; bool d_type_supported; bool in_xwhiteouts_dir; }; struct ovl_dir_file { bool is_real; bool is_upper; struct ovl_dir_cache *cache; struct list_head *cursor; struct file *realfile; struct file *upperfile; }; static struct ovl_cache_entry *ovl_cache_entry_from_node(struct rb_node *n) { return rb_entry(n, struct ovl_cache_entry, node); } static bool ovl_cache_entry_find_link(const char *name, int len, struct rb_node ***link, struct rb_node **parent) { bool found = false; struct rb_node **newp = *link; while (!found && *newp) { int cmp; struct ovl_cache_entry *tmp; *parent = *newp; tmp = ovl_cache_entry_from_node(*newp); cmp = strncmp(name, tmp->name, len); if (cmp > 0) newp = &tmp->node.rb_right; else if (cmp < 0 || len < tmp->len) newp = &tmp->node.rb_left; else found = true; } *link = newp; return found; } static struct ovl_cache_entry *ovl_cache_entry_find(struct rb_root *root, const char *name, int len) { struct rb_node *node = root->rb_node; int cmp; while (node) { struct ovl_cache_entry *p = ovl_cache_entry_from_node(node); cmp = strncmp(name, p->name, len); if (cmp > 0) node = p->node.rb_right; else if (cmp < 0 || len < p->len) node = p->node.rb_left; else return p; } return NULL; } static bool ovl_calc_d_ino(struct ovl_readdir_data *rdd, struct ovl_cache_entry *p) { /* Don't care if not doing ovl_iter() */ if (!rdd->dentry) return false; /* Always recalc d_ino when remapping lower inode numbers */ if (ovl_xino_bits(OVL_FS(rdd->dentry->d_sb))) return true; /* Always recalc d_ino for parent */ if (strcmp(p->name, "..") == 0) return true; /* If this is lower, then native d_ino will do */ if (!rdd->is_upper) return false; /* * Recalc d_ino for '.' and for all entries if dir is impure (contains * copied up entries) */ if ((p->name[0] == '.' && p->len == 1) || ovl_test_flag(OVL_IMPURE, d_inode(rdd->dentry))) return true; return false; } static struct ovl_cache_entry *ovl_cache_entry_new(struct ovl_readdir_data *rdd, const char *name, int len, u64 ino, unsigned int d_type) { struct ovl_cache_entry *p; size_t size = offsetof(struct ovl_cache_entry, name[len + 1]); p = kmalloc(size, GFP_KERNEL); if (!p) return NULL; memcpy(p->name, name, len); p->name[len] = '\0'; p->len = len; p->type = d_type; p->real_ino = ino; p->ino = ino; /* Defer setting d_ino for upper entry to ovl_iterate() */ if (ovl_calc_d_ino(rdd, p)) p->ino = 0; p->is_upper = rdd->is_upper; p->is_whiteout = false; /* Defer check for overlay.whiteout to ovl_iterate() */ p->check_xwhiteout = rdd->in_xwhiteouts_dir && d_type == DT_REG; if (d_type == DT_CHR) { p->next_maybe_whiteout = rdd->first_maybe_whiteout; rdd->first_maybe_whiteout = p; } return p; } static bool ovl_cache_entry_add_rb(struct ovl_readdir_data *rdd, const char *name, int len, u64 ino, unsigned int d_type) { struct rb_node **newp = &rdd->root->rb_node; struct rb_node *parent = NULL; struct ovl_cache_entry *p; if (ovl_cache_entry_find_link(name, len, &newp, &parent)) return true; p = ovl_cache_entry_new(rdd, name, len, ino, d_type); if (p == NULL) { rdd->err = -ENOMEM; return false; } list_add_tail(&p->l_node, rdd->list); rb_link_node(&p->node, parent, newp); rb_insert_color(&p->node, rdd->root); return true; } static bool ovl_fill_lowest(struct ovl_readdir_data *rdd, const char *name, int namelen, loff_t offset, u64 ino, unsigned int d_type) { struct ovl_cache_entry *p; p = ovl_cache_entry_find(rdd->root, name, namelen); if (p) { list_move_tail(&p->l_node, &rdd->middle); } else { p = ovl_cache_entry_new(rdd, name, namelen, ino, d_type); if (p == NULL) rdd->err = -ENOMEM; else list_add_tail(&p->l_node, &rdd->middle); } return rdd->err == 0; } void ovl_cache_free(struct list_head *list) { struct ovl_cache_entry *p; struct ovl_cache_entry *n; list_for_each_entry_safe(p, n, list, l_node) kfree(p); INIT_LIST_HEAD(list); } void ovl_dir_cache_free(struct inode *inode) { struct ovl_dir_cache *cache = ovl_dir_cache(inode); if (cache) { ovl_cache_free(&cache->entries); kfree(cache); } } static void ovl_cache_put(struct ovl_dir_file *od, struct inode *inode) { struct ovl_dir_cache *cache = od->cache; WARN_ON(cache->refcount <= 0); cache->refcount--; if (!cache->refcount) { if (ovl_dir_cache(inode) == cache) ovl_set_dir_cache(inode, NULL); ovl_cache_free(&cache->entries); kfree(cache); } } static bool ovl_fill_merge(struct dir_context *ctx, const char *name, int namelen, loff_t offset, u64 ino, unsigned int d_type) { struct ovl_readdir_data *rdd = container_of(ctx, struct ovl_readdir_data, ctx); rdd->count++; if (!rdd->is_lowest) return ovl_cache_entry_add_rb(rdd, name, namelen, ino, d_type); else return ovl_fill_lowest(rdd, name, namelen, offset, ino, d_type); } static int ovl_check_whiteouts(const struct path *path, struct ovl_readdir_data *rdd) { int err; struct ovl_cache_entry *p; struct dentry *dentry, *dir = path->dentry; const struct cred *old_cred; old_cred = ovl_override_creds(rdd->dentry->d_sb); err = down_write_killable(&dir->d_inode->i_rwsem); if (!err) { while (rdd->first_maybe_whiteout) { p = rdd->first_maybe_whiteout; rdd->first_maybe_whiteout = p->next_maybe_whiteout; dentry = lookup_one(mnt_idmap(path->mnt), p->name, dir, p->len); if (!IS_ERR(dentry)) { p->is_whiteout = ovl_is_whiteout(dentry); dput(dentry); } } inode_unlock(dir->d_inode); } ovl_revert_creds(old_cred); return err; } static inline int ovl_dir_read(const struct path *realpath, struct ovl_readdir_data *rdd) { struct file *realfile; int err; realfile = ovl_path_open(realpath, O_RDONLY | O_LARGEFILE); if (IS_ERR(realfile)) return PTR_ERR(realfile); rdd->first_maybe_whiteout = NULL; rdd->ctx.pos = 0; do { rdd->count = 0; rdd->err = 0; err = iterate_dir(realfile, &rdd->ctx); if (err >= 0) err = rdd->err; } while (!err && rdd->count); if (!err && rdd->first_maybe_whiteout && rdd->dentry) err = ovl_check_whiteouts(realpath, rdd); fput(realfile); return err; } static void ovl_dir_reset(struct file *file) { struct ovl_dir_file *od = file->private_data; struct ovl_dir_cache *cache = od->cache; struct inode *inode = file_inode(file); bool is_real; if (cache && ovl_inode_version_get(inode) != cache->version) { ovl_cache_put(od, inode); od->cache = NULL; od->cursor = NULL; } is_real = ovl_dir_is_real(inode); if (od->is_real != is_real) { /* is_real can only become false when dir is copied up */ if (WARN_ON(is_real)) return; od->is_real = false; } } static int ovl_dir_read_merged(struct dentry *dentry, struct list_head *list, struct rb_root *root) { int err; struct path realpath; struct ovl_readdir_data rdd = { .ctx.actor = ovl_fill_merge, .dentry = dentry, .list = list, .root = root, .is_lowest = false, }; int idx, next; const struct ovl_layer *layer; for (idx = 0; idx != -1; idx = next) { next = ovl_path_next(idx, dentry, &realpath, &layer); rdd.is_upper = ovl_dentry_upper(dentry) == realpath.dentry; rdd.in_xwhiteouts_dir = layer->has_xwhiteouts && ovl_dentry_has_xwhiteouts(dentry); if (next != -1) { err = ovl_dir_read(&realpath, &rdd); if (err) break; } else { /* * Insert lowest layer entries before upper ones, this * allows offsets to be reasonably constant */ list_add(&rdd.middle, rdd.list); rdd.is_lowest = true; err = ovl_dir_read(&realpath, &rdd); list_del(&rdd.middle); } } return err; } static void ovl_seek_cursor(struct ovl_dir_file *od, loff_t pos) { struct list_head *p; loff_t off = 0; list_for_each(p, &od->cache->entries) { if (off >= pos) break; off++; } /* Cursor is safe since the cache is stable */ od->cursor = p; } static struct ovl_dir_cache *ovl_cache_get(struct dentry *dentry) { int res; struct ovl_dir_cache *cache; struct inode *inode = d_inode(dentry); cache = ovl_dir_cache(inode); if (cache && ovl_inode_version_get(inode) == cache->version) { WARN_ON(!cache->refcount); cache->refcount++; return cache; } ovl_set_dir_cache(d_inode(dentry), NULL); cache = kzalloc(sizeof(struct ovl_dir_cache), GFP_KERNEL); if (!cache) return ERR_PTR(-ENOMEM); cache->refcount = 1; INIT_LIST_HEAD(&cache->entries); cache->root = RB_ROOT; res = ovl_dir_read_merged(dentry, &cache->entries, &cache->root); if (res) { ovl_cache_free(&cache->entries); kfree(cache); return ERR_PTR(res); } cache->version = ovl_inode_version_get(inode); ovl_set_dir_cache(inode, cache); return cache; } /* Map inode number to lower fs unique range */ static u64 ovl_remap_lower_ino(u64 ino, int xinobits, int fsid, const char *name, int namelen, bool warn) { unsigned int xinoshift = 64 - xinobits; if (unlikely(ino >> xinoshift)) { if (warn) { pr_warn_ratelimited("d_ino too big (%.*s, ino=%llu, xinobits=%d)\n", namelen, name, ino, xinobits); } return ino; } /* * The lowest xinobit is reserved for mapping the non-peresistent inode * numbers range, but this range is only exposed via st_ino, not here. */ return ino | ((u64)fsid) << (xinoshift + 1); } /* * Set d_ino for upper entries if needed. Non-upper entries should always report * the uppermost real inode ino and should not call this function. * * When not all layer are on same fs, report real ino also for upper. * * When all layers are on the same fs, and upper has a reference to * copy up origin, call vfs_getattr() on the overlay entry to make * sure that d_ino will be consistent with st_ino from stat(2). * * Also checks the overlay.whiteout xattr by doing a full lookup which will return * negative in this case. */ static int ovl_cache_update(const struct path *path, struct ovl_cache_entry *p, bool update_ino) { struct dentry *dir = path->dentry; struct ovl_fs *ofs = OVL_FS(dir->d_sb); struct dentry *this = NULL; enum ovl_path_type type; u64 ino = p->real_ino; int xinobits = ovl_xino_bits(ofs); int err = 0; if (!ovl_same_dev(ofs) && !p->check_xwhiteout) goto out; if (p->name[0] == '.') { if (p->len == 1) { this = dget(dir); goto get; } if (p->len == 2 && p->name[1] == '.') { /* we shall not be moved */ this = dget(dir->d_parent); goto get; } } /* This checks also for xwhiteouts */ this = lookup_one(mnt_idmap(path->mnt), p->name, dir, p->len); if (IS_ERR_OR_NULL(this) || !this->d_inode) { /* Mark a stale entry */ p->is_whiteout = true; if (IS_ERR(this)) { err = PTR_ERR(this); this = NULL; goto fail; } goto out; } get: if (!ovl_same_dev(ofs) || !update_ino) goto out; type = ovl_path_type(this); if (OVL_TYPE_ORIGIN(type)) { struct kstat stat; struct path statpath = *path; statpath.dentry = this; err = vfs_getattr(&statpath, &stat, STATX_INO, 0); if (err) goto fail; /* * Directory inode is always on overlay st_dev. * Non-dir with ovl_same_dev() could be on pseudo st_dev in case * of xino bits overflow. */ WARN_ON_ONCE(S_ISDIR(stat.mode) && dir->d_sb->s_dev != stat.dev); ino = stat.ino; } else if (xinobits && !OVL_TYPE_UPPER(type)) { ino = ovl_remap_lower_ino(ino, xinobits, ovl_layer_lower(this)->fsid, p->name, p->len, ovl_xino_warn(ofs)); } out: p->ino = ino; dput(this); return err; fail: pr_warn_ratelimited("failed to look up (%s) for ino (%i)\n", p->name, err); goto out; } static bool ovl_fill_plain(struct dir_context *ctx, const char *name, int namelen, loff_t offset, u64 ino, unsigned int d_type) { struct ovl_cache_entry *p; struct ovl_readdir_data *rdd = container_of(ctx, struct ovl_readdir_data, ctx); rdd->count++; p = ovl_cache_entry_new(rdd, name, namelen, ino, d_type); if (p == NULL) { rdd->err = -ENOMEM; return false; } list_add_tail(&p->l_node, rdd->list); return true; } static int ovl_dir_read_impure(const struct path *path, struct list_head *list, struct rb_root *root) { int err; struct path realpath; struct ovl_cache_entry *p, *n; struct ovl_readdir_data rdd = { .ctx.actor = ovl_fill_plain, .list = list, .root = root, }; INIT_LIST_HEAD(list); *root = RB_ROOT; ovl_path_upper(path->dentry, &realpath); err = ovl_dir_read(&realpath, &rdd); if (err) return err; list_for_each_entry_safe(p, n, list, l_node) { if (strcmp(p->name, ".") != 0 && strcmp(p->name, "..") != 0) { err = ovl_cache_update(path, p, true); if (err) return err; } if (p->ino == p->real_ino) { list_del(&p->l_node); kfree(p); } else { struct rb_node **newp = &root->rb_node; struct rb_node *parent = NULL; if (WARN_ON(ovl_cache_entry_find_link(p->name, p->len, &newp, &parent))) return -EIO; rb_link_node(&p->node, parent, newp); rb_insert_color(&p->node, root); } } return 0; } static struct ovl_dir_cache *ovl_cache_get_impure(const struct path *path) { int res; struct dentry *dentry = path->dentry; struct inode *inode = d_inode(dentry); struct ovl_fs *ofs = OVL_FS(dentry->d_sb); struct ovl_dir_cache *cache; cache = ovl_dir_cache(inode); if (cache && ovl_inode_version_get(inode) == cache->version) return cache; /* Impure cache is not refcounted, free it here */ ovl_dir_cache_free(inode); ovl_set_dir_cache(inode, NULL); cache = kzalloc(sizeof(struct ovl_dir_cache), GFP_KERNEL); if (!cache) return ERR_PTR(-ENOMEM); res = ovl_dir_read_impure(path, &cache->entries, &cache->root); if (res) { ovl_cache_free(&cache->entries); kfree(cache); return ERR_PTR(res); } if (list_empty(&cache->entries)) { /* * A good opportunity to get rid of an unneeded "impure" flag. * Removing the "impure" xattr is best effort. */ if (!ovl_want_write(dentry)) { ovl_removexattr(ofs, ovl_dentry_upper(dentry), OVL_XATTR_IMPURE); ovl_drop_write(dentry); } ovl_clear_flag(OVL_IMPURE, inode); kfree(cache); return NULL; } cache->version = ovl_inode_version_get(inode); ovl_set_dir_cache(inode, cache); return cache; } struct ovl_readdir_translate { struct dir_context *orig_ctx; struct ovl_dir_cache *cache; struct dir_context ctx; u64 parent_ino; int fsid; int xinobits; bool xinowarn; }; static bool ovl_fill_real(struct dir_context *ctx, const char *name, int namelen, loff_t offset, u64 ino, unsigned int d_type) { struct ovl_readdir_translate *rdt = container_of(ctx, struct ovl_readdir_translate, ctx); struct dir_context *orig_ctx = rdt->orig_ctx; if (rdt->parent_ino && strcmp(name, "..") == 0) { ino = rdt->parent_ino; } else if (rdt->cache) { struct ovl_cache_entry *p; p = ovl_cache_entry_find(&rdt->cache->root, name, namelen); if (p) ino = p->ino; } else if (rdt->xinobits) { ino = ovl_remap_lower_ino(ino, rdt->xinobits, rdt->fsid, name, namelen, rdt->xinowarn); } return orig_ctx->actor(orig_ctx, name, namelen, offset, ino, d_type); } static bool ovl_is_impure_dir(struct file *file) { struct ovl_dir_file *od = file->private_data; struct inode *dir = file_inode(file); /* * Only upper dir can be impure, but if we are in the middle of * iterating a lower real dir, dir could be copied up and marked * impure. We only want the impure cache if we started iterating * a real upper dir to begin with. */ return od->is_upper && ovl_test_flag(OVL_IMPURE, dir); } static int ovl_iterate_real(struct file *file, struct dir_context *ctx) { int err; struct ovl_dir_file *od = file->private_data; struct dentry *dir = file->f_path.dentry; struct ovl_fs *ofs = OVL_FS(dir->d_sb); const struct ovl_layer *lower_layer = ovl_layer_lower(dir); struct ovl_readdir_translate rdt = { .ctx.actor = ovl_fill_real, .orig_ctx = ctx, .xinobits = ovl_xino_bits(ofs), .xinowarn = ovl_xino_warn(ofs), }; if (rdt.xinobits && lower_layer) rdt.fsid = lower_layer->fsid; if (OVL_TYPE_MERGE(ovl_path_type(dir->d_parent))) { struct kstat stat; struct path statpath = file->f_path; statpath.dentry = dir->d_parent; err = vfs_getattr(&statpath, &stat, STATX_INO, 0); if (err) return err; WARN_ON_ONCE(dir->d_sb->s_dev != stat.dev); rdt.parent_ino = stat.ino; } if (ovl_is_impure_dir(file)) { rdt.cache = ovl_cache_get_impure(&file->f_path); if (IS_ERR(rdt.cache)) return PTR_ERR(rdt.cache); } err = iterate_dir(od->realfile, &rdt.ctx); ctx->pos = rdt.ctx.pos; return err; } static int ovl_iterate(struct file *file, struct dir_context *ctx) { struct ovl_dir_file *od = file->private_data; struct dentry *dentry = file->f_path.dentry; struct ovl_fs *ofs = OVL_FS(dentry->d_sb); struct ovl_cache_entry *p; const struct cred *old_cred; int err; old_cred = ovl_override_creds(dentry->d_sb); if (!ctx->pos) ovl_dir_reset(file); if (od->is_real) { /* * If parent is merge, then need to adjust d_ino for '..', if * dir is impure then need to adjust d_ino for copied up * entries. */ if (ovl_xino_bits(ofs) || (ovl_same_fs(ofs) && (ovl_is_impure_dir(file) || OVL_TYPE_MERGE(ovl_path_type(dentry->d_parent))))) { err = ovl_iterate_real(file, ctx); } else { err = iterate_dir(od->realfile, ctx); } goto out; } if (!od->cache) { struct ovl_dir_cache *cache; cache = ovl_cache_get(dentry); err = PTR_ERR(cache); if (IS_ERR(cache)) goto out; od->cache = cache; ovl_seek_cursor(od, ctx->pos); } while (od->cursor != &od->cache->entries) { p = list_entry(od->cursor, struct ovl_cache_entry, l_node); if (!p->is_whiteout) { if (!p->ino || p->check_xwhiteout) { err = ovl_cache_update(&file->f_path, p, !p->ino); if (err) goto out; } } /* ovl_cache_update() sets is_whiteout on stale entry */ if (!p->is_whiteout) { if (!dir_emit(ctx, p->name, p->len, p->ino, p->type)) break; } od->cursor = p->l_node.next; ctx->pos++; } err = 0; out: ovl_revert_creds(old_cred); return err; } static loff_t ovl_dir_llseek(struct file *file, loff_t offset, int origin) { loff_t res; struct ovl_dir_file *od = file->private_data; inode_lock(file_inode(file)); if (!file->f_pos) ovl_dir_reset(file); if (od->is_real) { res = vfs_llseek(od->realfile, offset, origin); file->f_pos = od->realfile->f_pos; } else { res = -EINVAL; switch (origin) { case SEEK_CUR: offset += file->f_pos; break; case SEEK_SET: break; default: goto out_unlock; } if (offset < 0) goto out_unlock; if (offset != file->f_pos) { file->f_pos = offset; if (od->cache) ovl_seek_cursor(od, offset); } res = offset; } out_unlock: inode_unlock(file_inode(file)); return res; } static struct file *ovl_dir_open_realfile(const struct file *file, const struct path *realpath) { struct file *res; const struct cred *old_cred; old_cred = ovl_override_creds(file_inode(file)->i_sb); res = ovl_path_open(realpath, O_RDONLY | (file->f_flags & O_LARGEFILE)); ovl_revert_creds(old_cred); return res; } /* * Like ovl_real_fdget(), returns upperfile if dir was copied up since open. * Unlike ovl_real_fdget(), this caches upperfile in file->private_data. * * TODO: use same abstract type for file->private_data of dir and file so * upperfile could also be cached for files as well. */ struct file *ovl_dir_real_file(const struct file *file, bool want_upper) { struct ovl_dir_file *od = file->private_data; struct dentry *dentry = file->f_path.dentry; struct file *old, *realfile = od->realfile; if (!OVL_TYPE_UPPER(ovl_path_type(dentry))) return want_upper ? NULL : realfile; /* * Need to check if we started out being a lower dir, but got copied up */ if (!od->is_upper) { realfile = READ_ONCE(od->upperfile); if (!realfile) { struct path upperpath; ovl_path_upper(dentry, &upperpath); realfile = ovl_dir_open_realfile(file, &upperpath); if (IS_ERR(realfile)) return realfile; old = cmpxchg_release(&od->upperfile, NULL, realfile); if (old) { fput(realfile); realfile = old; } } } return realfile; } static int ovl_dir_fsync(struct file *file, loff_t start, loff_t end, int datasync) { struct file *realfile; int err; err = ovl_sync_status(OVL_FS(file_inode(file)->i_sb)); if (err <= 0) return err; realfile = ovl_dir_real_file(file, true); err = PTR_ERR_OR_ZERO(realfile); /* Nothing to sync for lower */ if (!realfile || err) return err; return vfs_fsync_range(realfile, start, end, datasync); } static int ovl_dir_release(struct inode *inode, struct file *file) { struct ovl_dir_file *od = file->private_data; if (od->cache) { inode_lock(inode); ovl_cache_put(od, inode); inode_unlock(inode); } fput(od->realfile); if (od->upperfile) fput(od->upperfile); kfree(od); return 0; } static int ovl_dir_open(struct inode *inode, struct file *file) { struct path realpath; struct file *realfile; struct ovl_dir_file *od; enum ovl_path_type type; od = kzalloc(sizeof(struct ovl_dir_file), GFP_KERNEL); if (!od) return -ENOMEM; type = ovl_path_real(file->f_path.dentry, &realpath); realfile = ovl_dir_open_realfile(file, &realpath); if (IS_ERR(realfile)) { kfree(od); return PTR_ERR(realfile); } od->realfile = realfile; od->is_real = ovl_dir_is_real(inode); od->is_upper = OVL_TYPE_UPPER(type); file->private_data = od; return 0; } WRAP_DIR_ITER(ovl_iterate) // FIXME! const struct file_operations ovl_dir_operations = { .read = generic_read_dir, .open = ovl_dir_open, .iterate_shared = shared_ovl_iterate, .llseek = ovl_dir_llseek, .fsync = ovl_dir_fsync, .release = ovl_dir_release, }; int ovl_check_empty_dir(struct dentry *dentry, struct list_head *list) { int err; struct ovl_cache_entry *p, *n; struct rb_root root = RB_ROOT; const struct cred *old_cred; old_cred = ovl_override_creds(dentry->d_sb); err = ovl_dir_read_merged(dentry, list, &root); ovl_revert_creds(old_cred); if (err) return err; err = 0; list_for_each_entry_safe(p, n, list, l_node) { /* * Select whiteouts in upperdir, they should * be cleared when deleting this directory. */ if (p->is_whiteout) { if (p->is_upper) continue; goto del_entry; } if (p->name[0] == '.') { if (p->len == 1) goto del_entry; if (p->len == 2 && p->name[1] == '.') goto del_entry; } err = -ENOTEMPTY; break; del_entry: list_del(&p->l_node); kfree(p); } return err; } void ovl_cleanup_whiteouts(struct ovl_fs *ofs, struct dentry *upper, struct list_head *list) { struct ovl_cache_entry *p; inode_lock_nested(upper->d_inode, I_MUTEX_CHILD); list_for_each_entry(p, list, l_node) { struct dentry *dentry; if (WARN_ON(!p->is_whiteout || !p->is_upper)) continue; dentry = ovl_lookup_upper(ofs, p->name, upper, p->len); if (IS_ERR(dentry)) { pr_err("lookup '%s/%.*s' failed (%i)\n", upper->d_name.name, p->len, p->name, (int) PTR_ERR(dentry)); continue; } if (dentry->d_inode) ovl_cleanup(ofs, upper->d_inode, dentry); dput(dentry); } inode_unlock(upper->d_inode); } static bool ovl_check_d_type(struct dir_context *ctx, const char *name, int namelen, loff_t offset, u64 ino, unsigned int d_type) { struct ovl_readdir_data *rdd = container_of(ctx, struct ovl_readdir_data, ctx); /* Even if d_type is not supported, DT_DIR is returned for . and .. */ if (!strncmp(name, ".", namelen) || !strncmp(name, "..", namelen)) return true; if (d_type != DT_UNKNOWN) rdd->d_type_supported = true; return true; } /* * Returns 1 if d_type is supported, 0 not supported/unknown. Negative values * if error is encountered. */ int ovl_check_d_type_supported(const struct path *realpath) { int err; struct ovl_readdir_data rdd = { .ctx.actor = ovl_check_d_type, .d_type_supported = false, }; err = ovl_dir_read(realpath, &rdd); if (err) return err; return rdd.d_type_supported; } #define OVL_INCOMPATDIR_NAME "incompat" static int ovl_workdir_cleanup_recurse(struct ovl_fs *ofs, const struct path *path, int level) { int err; struct inode *dir = path->dentry->d_inode; LIST_HEAD(list); struct ovl_cache_entry *p; struct ovl_readdir_data rdd = { .ctx.actor = ovl_fill_plain, .list = &list, }; bool incompat = false; /* * The "work/incompat" directory is treated specially - if it is not * empty, instead of printing a generic error and mounting read-only, * we will error about incompat features and fail the mount. * * When called from ovl_indexdir_cleanup(), path->dentry->d_name.name * starts with '#'. */ if (level == 2 && !strcmp(path->dentry->d_name.name, OVL_INCOMPATDIR_NAME)) incompat = true; err = ovl_dir_read(path, &rdd); if (err) goto out; inode_lock_nested(dir, I_MUTEX_PARENT); list_for_each_entry(p, &list, l_node) { struct dentry *dentry; if (p->name[0] == '.') { if (p->len == 1) continue; if (p->len == 2 && p->name[1] == '.') continue; } else if (incompat) { pr_err("overlay with incompat feature '%s' cannot be mounted\n", p->name); err = -EINVAL; break; } dentry = ovl_lookup_upper(ofs, p->name, path->dentry, p->len); if (IS_ERR(dentry)) continue; if (dentry->d_inode) err = ovl_workdir_cleanup(ofs, dir, path->mnt, dentry, level); dput(dentry); if (err) break; } inode_unlock(dir); out: ovl_cache_free(&list); return err; } int ovl_workdir_cleanup(struct ovl_fs *ofs, struct inode *dir, struct vfsmount *mnt, struct dentry *dentry, int level) { int err; if (!d_is_dir(dentry) || level > 1) { return ovl_cleanup(ofs, dir, dentry); } err = ovl_do_rmdir(ofs, dir, dentry); if (err) { struct path path = { .mnt = mnt, .dentry = dentry }; inode_unlock(dir); err = ovl_workdir_cleanup_recurse(ofs, &path, level + 1); inode_lock_nested(dir, I_MUTEX_PARENT); if (!err) err = ovl_cleanup(ofs, dir, dentry); } return err; } int ovl_indexdir_cleanup(struct ovl_fs *ofs) { int err; struct dentry *indexdir = ofs->workdir; struct dentry *index = NULL; struct inode *dir = indexdir->d_inode; struct path path = { .mnt = ovl_upper_mnt(ofs), .dentry = indexdir }; LIST_HEAD(list); struct ovl_cache_entry *p; struct ovl_readdir_data rdd = { .ctx.actor = ovl_fill_plain, .list = &list, }; err = ovl_dir_read(&path, &rdd); if (err) goto out; inode_lock_nested(dir, I_MUTEX_PARENT); list_for_each_entry(p, &list, l_node) { if (p->name[0] == '.') { if (p->len == 1) continue; if (p->len == 2 && p->name[1] == '.') continue; } index = ovl_lookup_upper(ofs, p->name, indexdir, p->len); if (IS_ERR(index)) { err = PTR_ERR(index); index = NULL; break; } /* Cleanup leftover from index create/cleanup attempt */ if (index->d_name.name[0] == '#') { err = ovl_workdir_cleanup(ofs, dir, path.mnt, index, 1); if (err) break; goto next; } err = ovl_verify_index(ofs, index); if (!err) { goto next; } else if (err == -ESTALE) { /* Cleanup stale index entries */ err = ovl_cleanup(ofs, dir, index); } else if (err != -ENOENT) { /* * Abort mount to avoid corrupting the index if * an incompatible index entry was found or on out * of memory. */ break; } else if (ofs->config.nfs_export) { /* * Whiteout orphan index to block future open by * handle after overlay nlink dropped to zero. */ err = ovl_cleanup_and_whiteout(ofs, dir, index); } else { /* Cleanup orphan index entries */ err = ovl_cleanup(ofs, dir, index); } if (err) break; next: dput(index); index = NULL; } dput(index); inode_unlock(dir); out: ovl_cache_free(&list); if (err) pr_err("failed index dir cleanup (%i)\n", err); return err; }
2 1 3 1 3 2 2 2 1 1 2 2 4 4 4 2 2 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 // SPDX-License-Identifier: GPL-2.0 /* Fintek F81604 USB-to-2CAN controller driver. * * Copyright (C) 2023 Ji-Ze Hong (Peter Hong) <peter_hong@fintek.com.tw> */ #include <linux/bitfield.h> #include <linux/netdevice.h> #include <linux/units.h> #include <linux/usb.h> #include <linux/can.h> #include <linux/can/dev.h> #include <linux/can/error.h> #include <linux/can/platform/sja1000.h> #include <linux/unaligned.h> /* vendor and product id */ #define F81604_VENDOR_ID 0x2c42 #define F81604_PRODUCT_ID 0x1709 #define F81604_CAN_CLOCK (12 * MEGA) #define F81604_MAX_DEV 2 #define F81604_SET_DEVICE_RETRY 10 #define F81604_USB_TIMEOUT 2000 #define F81604_SET_GET_REGISTER 0xA0 #define F81604_PORT_OFFSET 0x1000 #define F81604_MAX_RX_URBS 4 #define F81604_CMD_DATA 0x00 #define F81604_DLC_LEN_MASK GENMASK(3, 0) #define F81604_DLC_EFF_BIT BIT(7) #define F81604_DLC_RTR_BIT BIT(6) #define F81604_SFF_SHIFT 5 #define F81604_EFF_SHIFT 3 #define F81604_BRP_MASK GENMASK(5, 0) #define F81604_SJW_MASK GENMASK(7, 6) #define F81604_SEG1_MASK GENMASK(3, 0) #define F81604_SEG2_MASK GENMASK(6, 4) #define F81604_CLEAR_ALC 0 #define F81604_CLEAR_ECC 1 #define F81604_CLEAR_OVERRUN 2 /* device setting */ #define F81604_CTRL_MODE_REG 0x80 #define F81604_TX_ONESHOT (0x03 << 3) #define F81604_TX_NORMAL (0x01 << 3) #define F81604_RX_AUTO_RELEASE_BUF BIT(1) #define F81604_INT_WHEN_CHANGE BIT(0) #define F81604_TERMINATOR_REG 0x105 #define F81604_CAN0_TERM BIT(2) #define F81604_CAN1_TERM BIT(3) #define F81604_TERMINATION_DISABLED CAN_TERMINATION_DISABLED #define F81604_TERMINATION_ENABLED 120 /* SJA1000 registers - manual section 6.4 (Pelican Mode) */ #define F81604_SJA1000_MOD 0x00 #define F81604_SJA1000_CMR 0x01 #define F81604_SJA1000_IR 0x03 #define F81604_SJA1000_IER 0x04 #define F81604_SJA1000_ALC 0x0B #define F81604_SJA1000_ECC 0x0C #define F81604_SJA1000_RXERR 0x0E #define F81604_SJA1000_TXERR 0x0F #define F81604_SJA1000_ACCC0 0x10 #define F81604_SJA1000_ACCM0 0x14 #define F81604_MAX_FILTER_CNT 4 /* Common registers - manual section 6.5 */ #define F81604_SJA1000_BTR0 0x06 #define F81604_SJA1000_BTR1 0x07 #define F81604_SJA1000_BTR1_SAMPLE_TRIPLE BIT(7) #define F81604_SJA1000_OCR 0x08 #define F81604_SJA1000_CDR 0x1F /* mode register */ #define F81604_SJA1000_MOD_RM 0x01 #define F81604_SJA1000_MOD_LOM 0x02 #define F81604_SJA1000_MOD_STM 0x04 /* commands */ #define F81604_SJA1000_CMD_CDO 0x08 /* interrupt sources */ #define F81604_SJA1000_IRQ_BEI 0x80 #define F81604_SJA1000_IRQ_ALI 0x40 #define F81604_SJA1000_IRQ_EPI 0x20 #define F81604_SJA1000_IRQ_DOI 0x08 #define F81604_SJA1000_IRQ_EI 0x04 #define F81604_SJA1000_IRQ_TI 0x02 #define F81604_SJA1000_IRQ_RI 0x01 #define F81604_SJA1000_IRQ_ALL 0xFF #define F81604_SJA1000_IRQ_OFF 0x00 /* status register content */ #define F81604_SJA1000_SR_BS 0x80 #define F81604_SJA1000_SR_ES 0x40 #define F81604_SJA1000_SR_TCS 0x08 /* ECC register */ #define F81604_SJA1000_ECC_SEG 0x1F #define F81604_SJA1000_ECC_DIR 0x20 #define F81604_SJA1000_ECC_BIT 0x00 #define F81604_SJA1000_ECC_FORM 0x40 #define F81604_SJA1000_ECC_STUFF 0x80 #define F81604_SJA1000_ECC_MASK 0xc0 /* ALC register */ #define F81604_SJA1000_ALC_MASK 0x1f /* table of devices that work with this driver */ static const struct usb_device_id f81604_table[] = { { USB_DEVICE(F81604_VENDOR_ID, F81604_PRODUCT_ID) }, {} /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, f81604_table); static const struct ethtool_ops f81604_ethtool_ops = { .get_ts_info = ethtool_op_get_ts_info, }; static const u16 f81604_termination[] = { F81604_TERMINATION_DISABLED, F81604_TERMINATION_ENABLED }; struct f81604_priv { struct net_device *netdev[F81604_MAX_DEV]; }; struct f81604_port_priv { struct can_priv can; struct net_device *netdev; struct sk_buff *echo_skb; unsigned long clear_flags; struct work_struct clear_reg_work; struct usb_device *dev; struct usb_interface *intf; struct usb_anchor urbs_anchor; }; /* Interrupt endpoint data format: * Byte 0: Status register. * Byte 1: Interrupt register. * Byte 2: Interrupt enable register. * Byte 3: Arbitration lost capture(ALC) register. * Byte 4: Error code capture(ECC) register. * Byte 5: Error warning limit register. * Byte 6: RX error counter register. * Byte 7: TX error counter register. * Byte 8: Reserved. */ struct f81604_int_data { u8 sr; u8 isrc; u8 ier; u8 alc; u8 ecc; u8 ewlr; u8 rxerr; u8 txerr; u8 val; } __packed __aligned(4); struct f81604_sff { __be16 id; u8 data[CAN_MAX_DLEN]; } __packed __aligned(2); struct f81604_eff { __be32 id; u8 data[CAN_MAX_DLEN]; } __packed __aligned(2); struct f81604_can_frame { u8 cmd; /* According for F81604 DLC define: * bit 3~0: data length (0~8) * bit6: is RTR flag. * bit7: is EFF frame. */ u8 dlc; union { struct f81604_sff sff; struct f81604_eff eff; }; } __packed __aligned(2); static const u8 bulk_in_addr[F81604_MAX_DEV] = { 2, 4 }; static const u8 bulk_out_addr[F81604_MAX_DEV] = { 1, 3 }; static const u8 int_in_addr[F81604_MAX_DEV] = { 1, 3 }; static int f81604_write(struct usb_device *dev, u16 reg, u8 data) { int ret; ret = usb_control_msg_send(dev, 0, F81604_SET_GET_REGISTER, USB_TYPE_VENDOR | USB_DIR_OUT, 0, reg, &data, sizeof(data), F81604_USB_TIMEOUT, GFP_KERNEL); if (ret) dev_err(&dev->dev, "%s: reg: %x data: %x failed: %pe\n", __func__, reg, data, ERR_PTR(ret)); return ret; } static int f81604_read(struct usb_device *dev, u16 reg, u8 *data) { int ret; ret = usb_control_msg_recv(dev, 0, F81604_SET_GET_REGISTER, USB_TYPE_VENDOR | USB_DIR_IN, 0, reg, data, sizeof(*data), F81604_USB_TIMEOUT, GFP_KERNEL); if (ret < 0) dev_err(&dev->dev, "%s: reg: %x failed: %pe\n", __func__, reg, ERR_PTR(ret)); return ret; } static int f81604_update_bits(struct usb_device *dev, u16 reg, u8 mask, u8 data) { int ret; u8 tmp; ret = f81604_read(dev, reg, &tmp); if (ret) return ret; tmp &= ~mask; tmp |= (mask & data); return f81604_write(dev, reg, tmp); } static int f81604_sja1000_write(struct f81604_port_priv *priv, u16 reg, u8 data) { int port = priv->netdev->dev_port; int real_reg; real_reg = reg + F81604_PORT_OFFSET * port + F81604_PORT_OFFSET; return f81604_write(priv->dev, real_reg, data); } static int f81604_sja1000_read(struct f81604_port_priv *priv, u16 reg, u8 *data) { int port = priv->netdev->dev_port; int real_reg; real_reg = reg + F81604_PORT_OFFSET * port + F81604_PORT_OFFSET; return f81604_read(priv->dev, real_reg, data); } static int f81604_set_reset_mode(struct f81604_port_priv *priv) { int ret, i; u8 tmp; /* disable interrupts */ ret = f81604_sja1000_write(priv, F81604_SJA1000_IER, F81604_SJA1000_IRQ_OFF); if (ret) return ret; for (i = 0; i < F81604_SET_DEVICE_RETRY; i++) { ret = f81604_sja1000_read(priv, F81604_SJA1000_MOD, &tmp); if (ret) return ret; /* check reset bit */ if (tmp & F81604_SJA1000_MOD_RM) { priv->can.state = CAN_STATE_STOPPED; return 0; } /* reset chip */ ret = f81604_sja1000_write(priv, F81604_SJA1000_MOD, F81604_SJA1000_MOD_RM); if (ret) return ret; } return -EPERM; } static int f81604_set_normal_mode(struct f81604_port_priv *priv) { u8 tmp, ier = 0; u8 mod_reg = 0; int ret, i; for (i = 0; i < F81604_SET_DEVICE_RETRY; i++) { ret = f81604_sja1000_read(priv, F81604_SJA1000_MOD, &tmp); if (ret) return ret; /* check reset bit */ if ((tmp & F81604_SJA1000_MOD_RM) == 0) { priv->can.state = CAN_STATE_ERROR_ACTIVE; /* enable interrupts, RI handled by bulk-in */ ier = F81604_SJA1000_IRQ_ALL & ~F81604_SJA1000_IRQ_RI; if (!(priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING)) ier &= ~F81604_SJA1000_IRQ_BEI; return f81604_sja1000_write(priv, F81604_SJA1000_IER, ier); } /* set chip to normal mode */ if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) mod_reg |= F81604_SJA1000_MOD_LOM; if (priv->can.ctrlmode & CAN_CTRLMODE_PRESUME_ACK) mod_reg |= F81604_SJA1000_MOD_STM; ret = f81604_sja1000_write(priv, F81604_SJA1000_MOD, mod_reg); if (ret) return ret; } return -EPERM; } static int f81604_chipset_init(struct f81604_port_priv *priv) { int i, ret; /* set clock divider and output control register */ ret = f81604_sja1000_write(priv, F81604_SJA1000_CDR, CDR_CBP | CDR_PELICAN); if (ret) return ret; /* set acceptance filter (accept all) */ for (i = 0; i < F81604_MAX_FILTER_CNT; ++i) { ret = f81604_sja1000_write(priv, F81604_SJA1000_ACCC0 + i, 0); if (ret) return ret; } for (i = 0; i < F81604_MAX_FILTER_CNT; ++i) { ret = f81604_sja1000_write(priv, F81604_SJA1000_ACCM0 + i, 0xFF); if (ret) return ret; } return f81604_sja1000_write(priv, F81604_SJA1000_OCR, OCR_TX0_PUSHPULL | OCR_TX1_PUSHPULL | OCR_MODE_NORMAL); } static void f81604_process_rx_packet(struct net_device *netdev, struct f81604_can_frame *frame) { struct net_device_stats *stats = &netdev->stats; struct can_frame *cf; struct sk_buff *skb; if (frame->cmd != F81604_CMD_DATA) return; skb = alloc_can_skb(netdev, &cf); if (!skb) { stats->rx_dropped++; return; } cf->len = can_cc_dlc2len(frame->dlc & F81604_DLC_LEN_MASK); if (frame->dlc & F81604_DLC_EFF_BIT) { cf->can_id = get_unaligned_be32(&frame->eff.id) >> F81604_EFF_SHIFT; cf->can_id |= CAN_EFF_FLAG; if (!(frame->dlc & F81604_DLC_RTR_BIT)) memcpy(cf->data, frame->eff.data, cf->len); } else { cf->can_id = get_unaligned_be16(&frame->sff.id) >> F81604_SFF_SHIFT; if (!(frame->dlc & F81604_DLC_RTR_BIT)) memcpy(cf->data, frame->sff.data, cf->len); } if (frame->dlc & F81604_DLC_RTR_BIT) cf->can_id |= CAN_RTR_FLAG; else stats->rx_bytes += cf->len; stats->rx_packets++; netif_rx(skb); } static void f81604_read_bulk_callback(struct urb *urb) { struct f81604_can_frame *frame = urb->transfer_buffer; struct net_device *netdev = urb->context; int ret; if (!netif_device_present(netdev)) return; if (urb->status) netdev_info(netdev, "%s: URB aborted %pe\n", __func__, ERR_PTR(urb->status)); switch (urb->status) { case 0: /* success */ break; case -ENOENT: case -EPIPE: case -EPROTO: case -ESHUTDOWN: return; default: goto resubmit_urb; } if (urb->actual_length != sizeof(*frame)) { netdev_warn(netdev, "URB length %u not equal to %zu\n", urb->actual_length, sizeof(*frame)); goto resubmit_urb; } f81604_process_rx_packet(netdev, frame); resubmit_urb: ret = usb_submit_urb(urb, GFP_ATOMIC); if (ret == -ENODEV) netif_device_detach(netdev); else if (ret) netdev_err(netdev, "%s: failed to resubmit read bulk urb: %pe\n", __func__, ERR_PTR(ret)); } static void f81604_handle_tx(struct f81604_port_priv *priv, struct f81604_int_data *data) { struct net_device *netdev = priv->netdev; struct net_device_stats *stats = &netdev->stats; /* transmission buffer released */ if (priv->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT && !(data->sr & F81604_SJA1000_SR_TCS)) { stats->tx_errors++; can_free_echo_skb(netdev, 0, NULL); } else { /* transmission complete */ stats->tx_bytes += can_get_echo_skb(netdev, 0, NULL); stats->tx_packets++; } netif_wake_queue(netdev); } static void f81604_handle_can_bus_errors(struct f81604_port_priv *priv, struct f81604_int_data *data) { enum can_state can_state = priv->can.state; struct net_device *netdev = priv->netdev; struct net_device_stats *stats = &netdev->stats; struct can_frame *cf; struct sk_buff *skb; /* Note: ALC/ECC will not auto clear by read here, must be cleared by * read register (via clear_reg_work). */ skb = alloc_can_err_skb(netdev, &cf); if (skb) { cf->can_id |= CAN_ERR_CNT; cf->data[6] = data->txerr; cf->data[7] = data->rxerr; } if (data->isrc & F81604_SJA1000_IRQ_DOI) { /* data overrun interrupt */ netdev_dbg(netdev, "data overrun interrupt\n"); if (skb) { cf->can_id |= CAN_ERR_CRTL; cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW; } stats->rx_over_errors++; stats->rx_errors++; set_bit(F81604_CLEAR_OVERRUN, &priv->clear_flags); } if (data->isrc & F81604_SJA1000_IRQ_EI) { /* error warning interrupt */ netdev_dbg(netdev, "error warning interrupt\n"); if (data->sr & F81604_SJA1000_SR_BS) can_state = CAN_STATE_BUS_OFF; else if (data->sr & F81604_SJA1000_SR_ES) can_state = CAN_STATE_ERROR_WARNING; else can_state = CAN_STATE_ERROR_ACTIVE; } if (data->isrc & F81604_SJA1000_IRQ_BEI) { /* bus error interrupt */ netdev_dbg(netdev, "bus error interrupt\n"); priv->can.can_stats.bus_error++; if (skb) { cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR; /* set error type */ switch (data->ecc & F81604_SJA1000_ECC_MASK) { case F81604_SJA1000_ECC_BIT: cf->data[2] |= CAN_ERR_PROT_BIT; break; case F81604_SJA1000_ECC_FORM: cf->data[2] |= CAN_ERR_PROT_FORM; break; case F81604_SJA1000_ECC_STUFF: cf->data[2] |= CAN_ERR_PROT_STUFF; break; default: break; } /* set error location */ cf->data[3] = data->ecc & F81604_SJA1000_ECC_SEG; } /* Error occurred during transmission? */ if ((data->ecc & F81604_SJA1000_ECC_DIR) == 0) { stats->tx_errors++; if (skb) cf->data[2] |= CAN_ERR_PROT_TX; } else { stats->rx_errors++; } set_bit(F81604_CLEAR_ECC, &priv->clear_flags); } if (data->isrc & F81604_SJA1000_IRQ_EPI) { if (can_state == CAN_STATE_ERROR_PASSIVE) can_state = CAN_STATE_ERROR_WARNING; else can_state = CAN_STATE_ERROR_PASSIVE; /* error passive interrupt */ netdev_dbg(netdev, "error passive interrupt: %d\n", can_state); } if (data->isrc & F81604_SJA1000_IRQ_ALI) { /* arbitration lost interrupt */ netdev_dbg(netdev, "arbitration lost interrupt\n"); priv->can.can_stats.arbitration_lost++; if (skb) { cf->can_id |= CAN_ERR_LOSTARB; cf->data[0] = data->alc & F81604_SJA1000_ALC_MASK; } set_bit(F81604_CLEAR_ALC, &priv->clear_flags); } if (can_state != priv->can.state) { enum can_state tx_state, rx_state; tx_state = data->txerr >= data->rxerr ? can_state : 0; rx_state = data->txerr <= data->rxerr ? can_state : 0; can_change_state(netdev, cf, tx_state, rx_state); if (can_state == CAN_STATE_BUS_OFF) can_bus_off(netdev); } if (priv->clear_flags) schedule_work(&priv->clear_reg_work); if (skb) netif_rx(skb); } static void f81604_read_int_callback(struct urb *urb) { struct f81604_int_data *data = urb->transfer_buffer; struct net_device *netdev = urb->context; struct f81604_port_priv *priv; int ret; priv = netdev_priv(netdev); if (!netif_device_present(netdev)) return; if (urb->status) netdev_info(netdev, "%s: Int URB aborted: %pe\n", __func__, ERR_PTR(urb->status)); switch (urb->status) { case 0: /* success */ break; case -ENOENT: case -EPIPE: case -EPROTO: case -ESHUTDOWN: return; default: goto resubmit_urb; } /* handle Errors */ if (data->isrc & (F81604_SJA1000_IRQ_DOI | F81604_SJA1000_IRQ_EI | F81604_SJA1000_IRQ_BEI | F81604_SJA1000_IRQ_EPI | F81604_SJA1000_IRQ_ALI)) f81604_handle_can_bus_errors(priv, data); /* handle TX */ if (priv->can.state != CAN_STATE_BUS_OFF && (data->isrc & F81604_SJA1000_IRQ_TI)) f81604_handle_tx(priv, data); resubmit_urb: ret = usb_submit_urb(urb, GFP_ATOMIC); if (ret == -ENODEV) netif_device_detach(netdev); else if (ret) netdev_err(netdev, "%s: failed to resubmit int urb: %pe\n", __func__, ERR_PTR(ret)); } static void f81604_unregister_urbs(struct f81604_port_priv *priv) { usb_kill_anchored_urbs(&priv->urbs_anchor); } static int f81604_register_urbs(struct f81604_port_priv *priv) { struct net_device *netdev = priv->netdev; struct f81604_int_data *int_data; int id = netdev->dev_port; struct urb *int_urb; int rx_urb_cnt; int ret; for (rx_urb_cnt = 0; rx_urb_cnt < F81604_MAX_RX_URBS; ++rx_urb_cnt) { struct f81604_can_frame *frame; struct urb *rx_urb; rx_urb = usb_alloc_urb(0, GFP_KERNEL); if (!rx_urb) { ret = -ENOMEM; break; } frame = kmalloc(sizeof(*frame), GFP_KERNEL); if (!frame) { usb_free_urb(rx_urb); ret = -ENOMEM; break; } usb_fill_bulk_urb(rx_urb, priv->dev, usb_rcvbulkpipe(priv->dev, bulk_in_addr[id]), frame, sizeof(*frame), f81604_read_bulk_callback, netdev); rx_urb->transfer_flags |= URB_FREE_BUFFER; usb_anchor_urb(rx_urb, &priv->urbs_anchor); ret = usb_submit_urb(rx_urb, GFP_KERNEL); if (ret) { usb_unanchor_urb(rx_urb); usb_free_urb(rx_urb); break; } /* Drop reference, USB core will take care of freeing it */ usb_free_urb(rx_urb); } if (rx_urb_cnt == 0) { netdev_warn(netdev, "%s: submit rx urb failed: %pe\n", __func__, ERR_PTR(ret)); goto error; } int_urb = usb_alloc_urb(0, GFP_KERNEL); if (!int_urb) { ret = -ENOMEM; goto error; } int_data = kmalloc(sizeof(*int_data), GFP_KERNEL); if (!int_data) { usb_free_urb(int_urb); ret = -ENOMEM; goto error; } usb_fill_int_urb(int_urb, priv->dev, usb_rcvintpipe(priv->dev, int_in_addr[id]), int_data, sizeof(*int_data), f81604_read_int_callback, netdev, 1); int_urb->transfer_flags |= URB_FREE_BUFFER; usb_anchor_urb(int_urb, &priv->urbs_anchor); ret = usb_submit_urb(int_urb, GFP_KERNEL); if (ret) { usb_unanchor_urb(int_urb); usb_free_urb(int_urb); netdev_warn(netdev, "%s: submit int urb failed: %pe\n", __func__, ERR_PTR(ret)); goto error; } /* Drop reference, USB core will take care of freeing it */ usb_free_urb(int_urb); return 0; error: f81604_unregister_urbs(priv); return ret; } static int f81604_start(struct net_device *netdev) { struct f81604_port_priv *priv = netdev_priv(netdev); int ret; u8 mode; u8 tmp; mode = F81604_RX_AUTO_RELEASE_BUF | F81604_INT_WHEN_CHANGE; /* Set TR/AT mode */ if (priv->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT) mode |= F81604_TX_ONESHOT; else mode |= F81604_TX_NORMAL; ret = f81604_sja1000_write(priv, F81604_CTRL_MODE_REG, mode); if (ret) return ret; /* set reset mode */ ret = f81604_set_reset_mode(priv); if (ret) return ret; ret = f81604_chipset_init(priv); if (ret) return ret; /* Clear error counters and error code capture */ ret = f81604_sja1000_write(priv, F81604_SJA1000_TXERR, 0); if (ret) return ret; ret = f81604_sja1000_write(priv, F81604_SJA1000_RXERR, 0); if (ret) return ret; /* Read clear for ECC/ALC/IR register */ ret = f81604_sja1000_read(priv, F81604_SJA1000_ECC, &tmp); if (ret) return ret; ret = f81604_sja1000_read(priv, F81604_SJA1000_ALC, &tmp); if (ret) return ret; ret = f81604_sja1000_read(priv, F81604_SJA1000_IR, &tmp); if (ret) return ret; ret = f81604_register_urbs(priv); if (ret) return ret; ret = f81604_set_normal_mode(priv); if (ret) { f81604_unregister_urbs(priv); return ret; } return 0; } static int f81604_set_bittiming(struct net_device *dev) { struct f81604_port_priv *priv = netdev_priv(dev); struct can_bittiming *bt = &priv->can.bittiming; u8 btr0, btr1; int ret; btr0 = FIELD_PREP(F81604_BRP_MASK, bt->brp - 1) | FIELD_PREP(F81604_SJW_MASK, bt->sjw - 1); btr1 = FIELD_PREP(F81604_SEG1_MASK, bt->prop_seg + bt->phase_seg1 - 1) | FIELD_PREP(F81604_SEG2_MASK, bt->phase_seg2 - 1); if (priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES) btr1 |= F81604_SJA1000_BTR1_SAMPLE_TRIPLE; ret = f81604_sja1000_write(priv, F81604_SJA1000_BTR0, btr0); if (ret) { netdev_warn(dev, "%s: Set BTR0 failed: %pe\n", __func__, ERR_PTR(ret)); return ret; } ret = f81604_sja1000_write(priv, F81604_SJA1000_BTR1, btr1); if (ret) { netdev_warn(dev, "%s: Set BTR1 failed: %pe\n", __func__, ERR_PTR(ret)); return ret; } return 0; } static int f81604_set_mode(struct net_device *netdev, enum can_mode mode) { int ret; switch (mode) { case CAN_MODE_START: ret = f81604_start(netdev); if (!ret && netif_queue_stopped(netdev)) netif_wake_queue(netdev); break; default: ret = -EOPNOTSUPP; } return ret; } static void f81604_write_bulk_callback(struct urb *urb) { struct net_device *netdev = urb->context; if (!netif_device_present(netdev)) return; if (urb->status) netdev_info(netdev, "%s: Tx URB error: %pe\n", __func__, ERR_PTR(urb->status)); } static void f81604_clear_reg_work(struct work_struct *work) { struct f81604_port_priv *priv; u8 tmp; priv = container_of(work, struct f81604_port_priv, clear_reg_work); /* dummy read for clear Arbitration lost capture(ALC) register. */ if (test_and_clear_bit(F81604_CLEAR_ALC, &priv->clear_flags)) f81604_sja1000_read(priv, F81604_SJA1000_ALC, &tmp); /* dummy read for clear Error code capture(ECC) register. */ if (test_and_clear_bit(F81604_CLEAR_ECC, &priv->clear_flags)) f81604_sja1000_read(priv, F81604_SJA1000_ECC, &tmp); /* dummy write for clear data overrun flag. */ if (test_and_clear_bit(F81604_CLEAR_OVERRUN, &priv->clear_flags)) f81604_sja1000_write(priv, F81604_SJA1000_CMR, F81604_SJA1000_CMD_CDO); } static netdev_tx_t f81604_start_xmit(struct sk_buff *skb, struct net_device *netdev) { struct can_frame *cf = (struct can_frame *)skb->data; struct f81604_port_priv *priv = netdev_priv(netdev); struct net_device_stats *stats = &netdev->stats; struct f81604_can_frame *frame; struct urb *write_urb; int ret; if (can_dev_dropped_skb(netdev, skb)) return NETDEV_TX_OK; netif_stop_queue(netdev); write_urb = usb_alloc_urb(0, GFP_ATOMIC); if (!write_urb) goto nomem_urb; frame = kzalloc(sizeof(*frame), GFP_ATOMIC); if (!frame) goto nomem_buf; usb_fill_bulk_urb(write_urb, priv->dev, usb_sndbulkpipe(priv->dev, bulk_out_addr[netdev->dev_port]), frame, sizeof(*frame), f81604_write_bulk_callback, priv->netdev); write_urb->transfer_flags |= URB_FREE_BUFFER; frame->cmd = F81604_CMD_DATA; frame->dlc = cf->len; if (cf->can_id & CAN_RTR_FLAG) frame->dlc |= F81604_DLC_RTR_BIT; if (cf->can_id & CAN_EFF_FLAG) { u32 id = (cf->can_id & CAN_EFF_MASK) << F81604_EFF_SHIFT; put_unaligned_be32(id, &frame->eff.id); frame->dlc |= F81604_DLC_EFF_BIT; if (!(cf->can_id & CAN_RTR_FLAG)) memcpy(&frame->eff.data, cf->data, cf->len); } else { u32 id = (cf->can_id & CAN_SFF_MASK) << F81604_SFF_SHIFT; put_unaligned_be16(id, &frame->sff.id); if (!(cf->can_id & CAN_RTR_FLAG)) memcpy(&frame->sff.data, cf->data, cf->len); } can_put_echo_skb(skb, netdev, 0, 0); ret = usb_submit_urb(write_urb, GFP_ATOMIC); if (ret) { netdev_err(netdev, "%s: failed to resubmit tx bulk urb: %pe\n", __func__, ERR_PTR(ret)); can_free_echo_skb(netdev, 0, NULL); stats->tx_dropped++; stats->tx_errors++; if (ret == -ENODEV) netif_device_detach(netdev); else netif_wake_queue(netdev); } /* let usb core take care of this urb */ usb_free_urb(write_urb); return NETDEV_TX_OK; nomem_buf: usb_free_urb(write_urb); nomem_urb: dev_kfree_skb(skb); stats->tx_dropped++; stats->tx_errors++; netif_wake_queue(netdev); return NETDEV_TX_OK; } static int f81604_get_berr_counter(const struct net_device *netdev, struct can_berr_counter *bec) { struct f81604_port_priv *priv = netdev_priv(netdev); u8 txerr, rxerr; int ret; ret = f81604_sja1000_read(priv, F81604_SJA1000_TXERR, &txerr); if (ret) return ret; ret = f81604_sja1000_read(priv, F81604_SJA1000_RXERR, &rxerr); if (ret) return ret; bec->txerr = txerr; bec->rxerr = rxerr; return 0; } /* Open USB device */ static int f81604_open(struct net_device *netdev) { int ret; ret = open_candev(netdev); if (ret) return ret; ret = f81604_start(netdev); if (ret) { if (ret == -ENODEV) netif_device_detach(netdev); close_candev(netdev); return ret; } netif_start_queue(netdev); return 0; } /* Close USB device */ static int f81604_close(struct net_device *netdev) { struct f81604_port_priv *priv = netdev_priv(netdev); f81604_set_reset_mode(priv); netif_stop_queue(netdev); cancel_work_sync(&priv->clear_reg_work); close_candev(netdev); f81604_unregister_urbs(priv); return 0; } static const struct net_device_ops f81604_netdev_ops = { .ndo_open = f81604_open, .ndo_stop = f81604_close, .ndo_start_xmit = f81604_start_xmit, .ndo_change_mtu = can_change_mtu, }; static const struct can_bittiming_const f81604_bittiming_const = { .name = KBUILD_MODNAME, .tseg1_min = 1, .tseg1_max = 16, .tseg2_min = 1, .tseg2_max = 8, .sjw_max = 4, .brp_min = 1, .brp_max = 64, .brp_inc = 1, }; /* Called by the usb core when driver is unloaded or device is removed */ static void f81604_disconnect(struct usb_interface *intf) { struct f81604_priv *priv = usb_get_intfdata(intf); int i; for (i = 0; i < ARRAY_SIZE(priv->netdev); ++i) { if (!priv->netdev[i]) continue; unregister_netdev(priv->netdev[i]); free_candev(priv->netdev[i]); } } static int __f81604_set_termination(struct usb_device *dev, int idx, u16 term) { u8 mask, data = 0; if (idx == 0) mask = F81604_CAN0_TERM; else mask = F81604_CAN1_TERM; if (term) data = mask; return f81604_update_bits(dev, F81604_TERMINATOR_REG, mask, data); } static int f81604_set_termination(struct net_device *netdev, u16 term) { struct f81604_port_priv *port_priv = netdev_priv(netdev); ASSERT_RTNL(); return __f81604_set_termination(port_priv->dev, netdev->dev_port, term); } static int f81604_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct usb_device *dev = interface_to_usbdev(intf); struct net_device *netdev; struct f81604_priv *priv; int i, ret; priv = devm_kzalloc(&intf->dev, sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; usb_set_intfdata(intf, priv); for (i = 0; i < ARRAY_SIZE(priv->netdev); ++i) { ret = __f81604_set_termination(dev, i, 0); if (ret) { dev_err(&intf->dev, "Setting termination of CH#%d failed: %pe\n", i, ERR_PTR(ret)); return ret; } } for (i = 0; i < ARRAY_SIZE(priv->netdev); ++i) { struct f81604_port_priv *port_priv; netdev = alloc_candev(sizeof(*port_priv), 1); if (!netdev) { dev_err(&intf->dev, "Couldn't alloc candev: %d\n", i); ret = -ENOMEM; goto failure_cleanup; } port_priv = netdev_priv(netdev); INIT_WORK(&port_priv->clear_reg_work, f81604_clear_reg_work); init_usb_anchor(&port_priv->urbs_anchor); port_priv->intf = intf; port_priv->dev = dev; port_priv->netdev = netdev; port_priv->can.clock.freq = F81604_CAN_CLOCK; port_priv->can.termination_const = f81604_termination; port_priv->can.termination_const_cnt = ARRAY_SIZE(f81604_termination); port_priv->can.bittiming_const = &f81604_bittiming_const; port_priv->can.do_set_bittiming = f81604_set_bittiming; port_priv->can.do_set_mode = f81604_set_mode; port_priv->can.do_set_termination = f81604_set_termination; port_priv->can.do_get_berr_counter = f81604_get_berr_counter; port_priv->can.ctrlmode_supported = CAN_CTRLMODE_LISTENONLY | CAN_CTRLMODE_3_SAMPLES | CAN_CTRLMODE_ONE_SHOT | CAN_CTRLMODE_BERR_REPORTING | CAN_CTRLMODE_PRESUME_ACK; netdev->ethtool_ops = &f81604_ethtool_ops; netdev->netdev_ops = &f81604_netdev_ops; netdev->flags |= IFF_ECHO; netdev->dev_port = i; SET_NETDEV_DEV(netdev, &intf->dev); ret = register_candev(netdev); if (ret) { netdev_err(netdev, "register CAN device failed: %pe\n", ERR_PTR(ret)); free_candev(netdev); goto failure_cleanup; } priv->netdev[i] = netdev; } return 0; failure_cleanup: f81604_disconnect(intf); return ret; } static struct usb_driver f81604_driver = { .name = KBUILD_MODNAME, .probe = f81604_probe, .disconnect = f81604_disconnect, .id_table = f81604_table, }; module_usb_driver(f81604_driver); MODULE_AUTHOR("Ji-Ze Hong (Peter Hong) <peter_hong@fintek.com.tw>"); MODULE_DESCRIPTION("Fintek F81604 USB to 2xCANBUS"); MODULE_LICENSE("GPL");
1188 6 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * cls_cgroup.h Control Group Classifier * * Authors: Thomas Graf <tgraf@suug.ch> */ #ifndef _NET_CLS_CGROUP_H #define _NET_CLS_CGROUP_H #include <linux/cgroup.h> #include <linux/hardirq.h> #include <linux/rcupdate.h> #include <net/sock.h> #include <net/inet_sock.h> #ifdef CONFIG_CGROUP_NET_CLASSID struct cgroup_cls_state { struct cgroup_subsys_state css; u32 classid; }; struct cgroup_cls_state *task_cls_state(struct task_struct *p); static inline u32 task_cls_classid(struct task_struct *p) { u32 classid; if (in_interrupt()) return 0; rcu_read_lock(); classid = container_of(task_css(p, net_cls_cgrp_id), struct cgroup_cls_state, css)->classid; rcu_read_unlock(); return classid; } static inline void sock_update_classid(struct sock_cgroup_data *skcd) { u32 classid; classid = task_cls_classid(current); sock_cgroup_set_classid(skcd, classid); } static inline u32 __task_get_classid(struct task_struct *task) { return task_cls_state(task)->classid; } static inline u32 task_get_classid(const struct sk_buff *skb) { u32 classid = __task_get_classid(current); /* Due to the nature of the classifier it is required to ignore all * packets originating from softirq context as accessing `current' * would lead to false results. * * This test assumes that all callers of dev_queue_xmit() explicitly * disable bh. Knowing this, it is possible to detect softirq based * calls by looking at the number of nested bh disable calls because * softirqs always disables bh. */ if (in_serving_softirq()) { struct sock *sk = skb_to_full_sk(skb); /* If there is an sock_cgroup_classid we'll use that. */ if (!sk || !sk_fullsock(sk)) return 0; classid = sock_cgroup_classid(&sk->sk_cgrp_data); } return classid; } #else /* !CONFIG_CGROUP_NET_CLASSID */ static inline void sock_update_classid(struct sock_cgroup_data *skcd) { } static inline u32 task_get_classid(const struct sk_buff *skb) { return 0; } #endif /* CONFIG_CGROUP_NET_CLASSID */ #endif /* _NET_CLS_CGROUP_H */
18 3 15 50 49 16 16 30 30 44 45 7 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 // SPDX-License-Identifier: GPL-2.0 /* * f2fs compress support * * Copyright (c) 2019 Chao Yu <chao@kernel.org> */ #include <linux/fs.h> #include <linux/f2fs_fs.h> #include <linux/moduleparam.h> #include <linux/writeback.h> #include <linux/backing-dev.h> #include <linux/lzo.h> #include <linux/lz4.h> #include <linux/zstd.h> #include <linux/pagevec.h> #include "f2fs.h" #include "node.h" #include "segment.h" #include <trace/events/f2fs.h> static struct kmem_cache *cic_entry_slab; static struct kmem_cache *dic_entry_slab; static void *page_array_alloc(struct inode *inode, int nr) { struct f2fs_sb_info *sbi = F2FS_I_SB(inode); unsigned int size = sizeof(struct page *) * nr; if (likely(size <= sbi->page_array_slab_size)) return f2fs_kmem_cache_alloc(sbi->page_array_slab, GFP_F2FS_ZERO, false, F2FS_I_SB(inode)); return f2fs_kzalloc(sbi, size, GFP_NOFS); } static void page_array_free(struct inode *inode, void *pages, int nr) { struct f2fs_sb_info *sbi = F2FS_I_SB(inode); unsigned int size = sizeof(struct page *) * nr; if (!pages) return; if (likely(size <= sbi->page_array_slab_size)) kmem_cache_free(sbi->page_array_slab, pages); else kfree(pages); } struct f2fs_compress_ops { int (*init_compress_ctx)(struct compress_ctx *cc); void (*destroy_compress_ctx)(struct compress_ctx *cc); int (*compress_pages)(struct compress_ctx *cc); int (*init_decompress_ctx)(struct decompress_io_ctx *dic); void (*destroy_decompress_ctx)(struct decompress_io_ctx *dic); int (*decompress_pages)(struct decompress_io_ctx *dic); bool (*is_level_valid)(int level); }; static unsigned int offset_in_cluster(struct compress_ctx *cc, pgoff_t index) { return index & (cc->cluster_size - 1); } static pgoff_t cluster_idx(struct compress_ctx *cc, pgoff_t index) { return index >> cc->log_cluster_size; } static pgoff_t start_idx_of_cluster(struct compress_ctx *cc) { return cc->cluster_idx << cc->log_cluster_size; } bool f2fs_is_compressed_page(struct page *page) { if (!PagePrivate(page)) return false; if (!page_private(page)) return false; if (page_private_nonpointer(page)) return false; f2fs_bug_on(F2FS_M_SB(page->mapping), *((u32 *)page_private(page)) != F2FS_COMPRESSED_PAGE_MAGIC); return true; } static void f2fs_set_compressed_page(struct page *page, struct inode *inode, pgoff_t index, void *data) { struct folio *folio = page_folio(page); folio_attach_private(folio, (void *)data); /* i_crypto_info and iv index */ folio->index = index; folio->mapping = inode->i_mapping; } static void f2fs_drop_rpages(struct compress_ctx *cc, int len, bool unlock) { int i; for (i = 0; i < len; i++) { if (!cc->rpages[i]) continue; if (unlock) unlock_page(cc->rpages[i]); else put_page(cc->rpages[i]); } } static void f2fs_put_rpages(struct compress_ctx *cc) { f2fs_drop_rpages(cc, cc->cluster_size, false); } static void f2fs_unlock_rpages(struct compress_ctx *cc, int len) { f2fs_drop_rpages(cc, len, true); } static void f2fs_put_rpages_wbc(struct compress_ctx *cc, struct writeback_control *wbc, bool redirty, int unlock) { unsigned int i; for (i = 0; i < cc->cluster_size; i++) { if (!cc->rpages[i]) continue; if (redirty) redirty_page_for_writepage(wbc, cc->rpages[i]); f2fs_put_page(cc->rpages[i], unlock); } } struct page *f2fs_compress_control_page(struct page *page) { return ((struct compress_io_ctx *)page_private(page))->rpages[0]; } int f2fs_init_compress_ctx(struct compress_ctx *cc) { if (cc->rpages) return 0; cc->rpages = page_array_alloc(cc->inode, cc->cluster_size); return cc->rpages ? 0 : -ENOMEM; } void f2fs_destroy_compress_ctx(struct compress_ctx *cc, bool reuse) { page_array_free(cc->inode, cc->rpages, cc->cluster_size); cc->rpages = NULL; cc->nr_rpages = 0; cc->nr_cpages = 0; cc->valid_nr_cpages = 0; if (!reuse) cc->cluster_idx = NULL_CLUSTER; } void f2fs_compress_ctx_add_page(struct compress_ctx *cc, struct folio *folio) { unsigned int cluster_ofs; if (!f2fs_cluster_can_merge_page(cc, folio->index)) f2fs_bug_on(F2FS_I_SB(cc->inode), 1); cluster_ofs = offset_in_cluster(cc, folio->index); cc->rpages[cluster_ofs] = folio_page(folio, 0); cc->nr_rpages++; cc->cluster_idx = cluster_idx(cc, folio->index); } #ifdef CONFIG_F2FS_FS_LZO static int lzo_init_compress_ctx(struct compress_ctx *cc) { cc->private = f2fs_kvmalloc(F2FS_I_SB(cc->inode), LZO1X_MEM_COMPRESS, GFP_NOFS); if (!cc->private) return -ENOMEM; cc->clen = lzo1x_worst_compress(PAGE_SIZE << cc->log_cluster_size); return 0; } static void lzo_destroy_compress_ctx(struct compress_ctx *cc) { kvfree(cc->private); cc->private = NULL; } static int lzo_compress_pages(struct compress_ctx *cc) { int ret; ret = lzo1x_1_compress(cc->rbuf, cc->rlen, cc->cbuf->cdata, &cc->clen, cc->private); if (ret != LZO_E_OK) { f2fs_err_ratelimited(F2FS_I_SB(cc->inode), "lzo compress failed, ret:%d", ret); return -EIO; } return 0; } static int lzo_decompress_pages(struct decompress_io_ctx *dic) { int ret; ret = lzo1x_decompress_safe(dic->cbuf->cdata, dic->clen, dic->rbuf, &dic->rlen); if (ret != LZO_E_OK) { f2fs_err_ratelimited(F2FS_I_SB(dic->inode), "lzo decompress failed, ret:%d", ret); return -EIO; } if (dic->rlen != PAGE_SIZE << dic->log_cluster_size) { f2fs_err_ratelimited(F2FS_I_SB(dic->inode), "lzo invalid rlen:%zu, expected:%lu", dic->rlen, PAGE_SIZE << dic->log_cluster_size); return -EIO; } return 0; } static const struct f2fs_compress_ops f2fs_lzo_ops = { .init_compress_ctx = lzo_init_compress_ctx, .destroy_compress_ctx = lzo_destroy_compress_ctx, .compress_pages = lzo_compress_pages, .decompress_pages = lzo_decompress_pages, }; #endif #ifdef CONFIG_F2FS_FS_LZ4 static int lz4_init_compress_ctx(struct compress_ctx *cc) { unsigned int size = LZ4_MEM_COMPRESS; #ifdef CONFIG_F2FS_FS_LZ4HC if (F2FS_I(cc->inode)->i_compress_level) size = LZ4HC_MEM_COMPRESS; #endif cc->private = f2fs_kvmalloc(F2FS_I_SB(cc->inode), size, GFP_NOFS); if (!cc->private) return -ENOMEM; /* * we do not change cc->clen to LZ4_compressBound(inputsize) to * adapt worst compress case, because lz4 compressor can handle * output budget properly. */ cc->clen = cc->rlen - PAGE_SIZE - COMPRESS_HEADER_SIZE; return 0; } static void lz4_destroy_compress_ctx(struct compress_ctx *cc) { kvfree(cc->private); cc->private = NULL; } static int lz4_compress_pages(struct compress_ctx *cc) { int len = -EINVAL; unsigned char level = F2FS_I(cc->inode)->i_compress_level; if (!level) len = LZ4_compress_default(cc->rbuf, cc->cbuf->cdata, cc->rlen, cc->clen, cc->private); #ifdef CONFIG_F2FS_FS_LZ4HC else len = LZ4_compress_HC(cc->rbuf, cc->cbuf->cdata, cc->rlen, cc->clen, level, cc->private); #endif if (len < 0) return len; if (!len) return -EAGAIN; cc->clen = len; return 0; } static int lz4_decompress_pages(struct decompress_io_ctx *dic) { int ret; ret = LZ4_decompress_safe(dic->cbuf->cdata, dic->rbuf, dic->clen, dic->rlen); if (ret < 0) { f2fs_err_ratelimited(F2FS_I_SB(dic->inode), "lz4 decompress failed, ret:%d", ret); return -EIO; } if (ret != PAGE_SIZE << dic->log_cluster_size) { f2fs_err_ratelimited(F2FS_I_SB(dic->inode), "lz4 invalid ret:%d, expected:%lu", ret, PAGE_SIZE << dic->log_cluster_size); return -EIO; } return 0; } static bool lz4_is_level_valid(int lvl) { #ifdef CONFIG_F2FS_FS_LZ4HC return !lvl || (lvl >= LZ4HC_MIN_CLEVEL && lvl <= LZ4HC_MAX_CLEVEL); #else return lvl == 0; #endif } static const struct f2fs_compress_ops f2fs_lz4_ops = { .init_compress_ctx = lz4_init_compress_ctx, .destroy_compress_ctx = lz4_destroy_compress_ctx, .compress_pages = lz4_compress_pages, .decompress_pages = lz4_decompress_pages, .is_level_valid = lz4_is_level_valid, }; #endif #ifdef CONFIG_F2FS_FS_ZSTD static int zstd_init_compress_ctx(struct compress_ctx *cc) { zstd_parameters params; zstd_cstream *stream; void *workspace; unsigned int workspace_size; unsigned char level = F2FS_I(cc->inode)->i_compress_level; /* Need to remain this for backward compatibility */ if (!level) level = F2FS_ZSTD_DEFAULT_CLEVEL; params = zstd_get_params(level, cc->rlen); workspace_size = zstd_cstream_workspace_bound(&params.cParams); workspace = f2fs_kvmalloc(F2FS_I_SB(cc->inode), workspace_size, GFP_NOFS); if (!workspace) return -ENOMEM; stream = zstd_init_cstream(&params, 0, workspace, workspace_size); if (!stream) { f2fs_err_ratelimited(F2FS_I_SB(cc->inode), "%s zstd_init_cstream failed", __func__); kvfree(workspace); return -EIO; } cc->private = workspace; cc->private2 = stream; cc->clen = cc->rlen - PAGE_SIZE - COMPRESS_HEADER_SIZE; return 0; } static void zstd_destroy_compress_ctx(struct compress_ctx *cc) { kvfree(cc->private); cc->private = NULL; cc->private2 = NULL; } static int zstd_compress_pages(struct compress_ctx *cc) { zstd_cstream *stream = cc->private2; zstd_in_buffer inbuf; zstd_out_buffer outbuf; int src_size = cc->rlen; int dst_size = src_size - PAGE_SIZE - COMPRESS_HEADER_SIZE; int ret; inbuf.pos = 0; inbuf.src = cc->rbuf; inbuf.size = src_size; outbuf.pos = 0; outbuf.dst = cc->cbuf->cdata; outbuf.size = dst_size; ret = zstd_compress_stream(stream, &outbuf, &inbuf); if (zstd_is_error(ret)) { f2fs_err_ratelimited(F2FS_I_SB(cc->inode), "%s zstd_compress_stream failed, ret: %d", __func__, zstd_get_error_code(ret)); return -EIO; } ret = zstd_end_stream(stream, &outbuf); if (zstd_is_error(ret)) { f2fs_err_ratelimited(F2FS_I_SB(cc->inode), "%s zstd_end_stream returned %d", __func__, zstd_get_error_code(ret)); return -EIO; } /* * there is compressed data remained in intermediate buffer due to * no more space in cbuf.cdata */ if (ret) return -EAGAIN; cc->clen = outbuf.pos; return 0; } static int zstd_init_decompress_ctx(struct decompress_io_ctx *dic) { zstd_dstream *stream; void *workspace; unsigned int workspace_size; unsigned int max_window_size = MAX_COMPRESS_WINDOW_SIZE(dic->log_cluster_size); workspace_size = zstd_dstream_workspace_bound(max_window_size); workspace = f2fs_kvmalloc(F2FS_I_SB(dic->inode), workspace_size, GFP_NOFS); if (!workspace) return -ENOMEM; stream = zstd_init_dstream(max_window_size, workspace, workspace_size); if (!stream) { f2fs_err_ratelimited(F2FS_I_SB(dic->inode), "%s zstd_init_dstream failed", __func__); kvfree(workspace); return -EIO; } dic->private = workspace; dic->private2 = stream; return 0; } static void zstd_destroy_decompress_ctx(struct decompress_io_ctx *dic) { kvfree(dic->private); dic->private = NULL; dic->private2 = NULL; } static int zstd_decompress_pages(struct decompress_io_ctx *dic) { zstd_dstream *stream = dic->private2; zstd_in_buffer inbuf; zstd_out_buffer outbuf; int ret; inbuf.pos = 0; inbuf.src = dic->cbuf->cdata; inbuf.size = dic->clen; outbuf.pos = 0; outbuf.dst = dic->rbuf; outbuf.size = dic->rlen; ret = zstd_decompress_stream(stream, &outbuf, &inbuf); if (zstd_is_error(ret)) { f2fs_err_ratelimited(F2FS_I_SB(dic->inode), "%s zstd_decompress_stream failed, ret: %d", __func__, zstd_get_error_code(ret)); return -EIO; } if (dic->rlen != outbuf.pos) { f2fs_err_ratelimited(F2FS_I_SB(dic->inode), "%s ZSTD invalid rlen:%zu, expected:%lu", __func__, dic->rlen, PAGE_SIZE << dic->log_cluster_size); return -EIO; } return 0; } static bool zstd_is_level_valid(int lvl) { return lvl >= zstd_min_clevel() && lvl <= zstd_max_clevel(); } static const struct f2fs_compress_ops f2fs_zstd_ops = { .init_compress_ctx = zstd_init_compress_ctx, .destroy_compress_ctx = zstd_destroy_compress_ctx, .compress_pages = zstd_compress_pages, .init_decompress_ctx = zstd_init_decompress_ctx, .destroy_decompress_ctx = zstd_destroy_decompress_ctx, .decompress_pages = zstd_decompress_pages, .is_level_valid = zstd_is_level_valid, }; #endif #ifdef CONFIG_F2FS_FS_LZO #ifdef CONFIG_F2FS_FS_LZORLE static int lzorle_compress_pages(struct compress_ctx *cc) { int ret; ret = lzorle1x_1_compress(cc->rbuf, cc->rlen, cc->cbuf->cdata, &cc->clen, cc->private); if (ret != LZO_E_OK) { f2fs_err_ratelimited(F2FS_I_SB(cc->inode), "lzo-rle compress failed, ret:%d", ret); return -EIO; } return 0; } static const struct f2fs_compress_ops f2fs_lzorle_ops = { .init_compress_ctx = lzo_init_compress_ctx, .destroy_compress_ctx = lzo_destroy_compress_ctx, .compress_pages = lzorle_compress_pages, .decompress_pages = lzo_decompress_pages, }; #endif #endif static const struct f2fs_compress_ops *f2fs_cops[COMPRESS_MAX] = { #ifdef CONFIG_F2FS_FS_LZO &f2fs_lzo_ops, #else NULL, #endif #ifdef CONFIG_F2FS_FS_LZ4 &f2fs_lz4_ops, #else NULL, #endif #ifdef CONFIG_F2FS_FS_ZSTD &f2fs_zstd_ops, #else NULL, #endif #if defined(CONFIG_F2FS_FS_LZO) && defined(CONFIG_F2FS_FS_LZORLE) &f2fs_lzorle_ops, #else NULL, #endif }; bool f2fs_is_compress_backend_ready(struct inode *inode) { if (!f2fs_compressed_file(inode)) return true; return f2fs_cops[F2FS_I(inode)->i_compress_algorithm]; } bool f2fs_is_compress_level_valid(int alg, int lvl) { const struct f2fs_compress_ops *cops = f2fs_cops[alg]; if (cops->is_level_valid) return cops->is_level_valid(lvl); return lvl == 0; } static mempool_t *compress_page_pool; static int num_compress_pages = 512; module_param(num_compress_pages, uint, 0444); MODULE_PARM_DESC(num_compress_pages, "Number of intermediate compress pages to preallocate"); int __init f2fs_init_compress_mempool(void) { compress_page_pool = mempool_create_page_pool(num_compress_pages, 0); return compress_page_pool ? 0 : -ENOMEM; } void f2fs_destroy_compress_mempool(void) { mempool_destroy(compress_page_pool); } static struct page *f2fs_compress_alloc_page(void) { struct page *page; page = mempool_alloc(compress_page_pool, GFP_NOFS); lock_page(page); return page; } static void f2fs_compress_free_page(struct page *page) { if (!page) return; detach_page_private(page); page->mapping = NULL; unlock_page(page); mempool_free(page, compress_page_pool); } #define MAX_VMAP_RETRIES 3 static void *f2fs_vmap(struct page **pages, unsigned int count) { int i; void *buf = NULL; for (i = 0; i < MAX_VMAP_RETRIES; i++) { buf = vm_map_ram(pages, count, -1); if (buf) break; vm_unmap_aliases(); } return buf; } static int f2fs_compress_pages(struct compress_ctx *cc) { struct f2fs_inode_info *fi = F2FS_I(cc->inode); const struct f2fs_compress_ops *cops = f2fs_cops[fi->i_compress_algorithm]; unsigned int max_len, new_nr_cpages; u32 chksum = 0; int i, ret; trace_f2fs_compress_pages_start(cc->inode, cc->cluster_idx, cc->cluster_size, fi->i_compress_algorithm); if (cops->init_compress_ctx) { ret = cops->init_compress_ctx(cc); if (ret) goto out; } max_len = COMPRESS_HEADER_SIZE + cc->clen; cc->nr_cpages = DIV_ROUND_UP(max_len, PAGE_SIZE); cc->valid_nr_cpages = cc->nr_cpages; cc->cpages = page_array_alloc(cc->inode, cc->nr_cpages); if (!cc->cpages) { ret = -ENOMEM; goto destroy_compress_ctx; } for (i = 0; i < cc->nr_cpages; i++) cc->cpages[i] = f2fs_compress_alloc_page(); cc->rbuf = f2fs_vmap(cc->rpages, cc->cluster_size); if (!cc->rbuf) { ret = -ENOMEM; goto out_free_cpages; } cc->cbuf = f2fs_vmap(cc->cpages, cc->nr_cpages); if (!cc->cbuf) { ret = -ENOMEM; goto out_vunmap_rbuf; } ret = cops->compress_pages(cc); if (ret) goto out_vunmap_cbuf; max_len = PAGE_SIZE * (cc->cluster_size - 1) - COMPRESS_HEADER_SIZE; if (cc->clen > max_len) { ret = -EAGAIN; goto out_vunmap_cbuf; } cc->cbuf->clen = cpu_to_le32(cc->clen); if (fi->i_compress_flag & BIT(COMPRESS_CHKSUM)) chksum = f2fs_crc32(F2FS_I_SB(cc->inode), cc->cbuf->cdata, cc->clen); cc->cbuf->chksum = cpu_to_le32(chksum); for (i = 0; i < COMPRESS_DATA_RESERVED_SIZE; i++) cc->cbuf->reserved[i] = cpu_to_le32(0); new_nr_cpages = DIV_ROUND_UP(cc->clen + COMPRESS_HEADER_SIZE, PAGE_SIZE); /* zero out any unused part of the last page */ memset(&cc->cbuf->cdata[cc->clen], 0, (new_nr_cpages * PAGE_SIZE) - (cc->clen + COMPRESS_HEADER_SIZE)); vm_unmap_ram(cc->cbuf, cc->nr_cpages); vm_unmap_ram(cc->rbuf, cc->cluster_size); for (i = new_nr_cpages; i < cc->nr_cpages; i++) { f2fs_compress_free_page(cc->cpages[i]); cc->cpages[i] = NULL; } if (cops->destroy_compress_ctx) cops->destroy_compress_ctx(cc); cc->valid_nr_cpages = new_nr_cpages; trace_f2fs_compress_pages_end(cc->inode, cc->cluster_idx, cc->clen, ret); return 0; out_vunmap_cbuf: vm_unmap_ram(cc->cbuf, cc->nr_cpages); out_vunmap_rbuf: vm_unmap_ram(cc->rbuf, cc->cluster_size); out_free_cpages: for (i = 0; i < cc->nr_cpages; i++) { if (cc->cpages[i]) f2fs_compress_free_page(cc->cpages[i]); } page_array_free(cc->inode, cc->cpages, cc->nr_cpages); cc->cpages = NULL; destroy_compress_ctx: if (cops->destroy_compress_ctx) cops->destroy_compress_ctx(cc); out: trace_f2fs_compress_pages_end(cc->inode, cc->cluster_idx, cc->clen, ret); return ret; } static int f2fs_prepare_decomp_mem(struct decompress_io_ctx *dic, bool pre_alloc); static void f2fs_release_decomp_mem(struct decompress_io_ctx *dic, bool bypass_destroy_callback, bool pre_alloc); void f2fs_decompress_cluster(struct decompress_io_ctx *dic, bool in_task) { struct f2fs_sb_info *sbi = F2FS_I_SB(dic->inode); struct f2fs_inode_info *fi = F2FS_I(dic->inode); const struct f2fs_compress_ops *cops = f2fs_cops[fi->i_compress_algorithm]; bool bypass_callback = false; int ret; trace_f2fs_decompress_pages_start(dic->inode, dic->cluster_idx, dic->cluster_size, fi->i_compress_algorithm); if (dic->failed) { ret = -EIO; goto out_end_io; } ret = f2fs_prepare_decomp_mem(dic, false); if (ret) { bypass_callback = true; goto out_release; } dic->clen = le32_to_cpu(dic->cbuf->clen); dic->rlen = PAGE_SIZE << dic->log_cluster_size; if (dic->clen > PAGE_SIZE * dic->nr_cpages - COMPRESS_HEADER_SIZE) { ret = -EFSCORRUPTED; /* Avoid f2fs_commit_super in irq context */ if (!in_task) f2fs_handle_error_async(sbi, ERROR_FAIL_DECOMPRESSION); else f2fs_handle_error(sbi, ERROR_FAIL_DECOMPRESSION); goto out_release; } ret = cops->decompress_pages(dic); if (!ret && (fi->i_compress_flag & BIT(COMPRESS_CHKSUM))) { u32 provided = le32_to_cpu(dic->cbuf->chksum); u32 calculated = f2fs_crc32(sbi, dic->cbuf->cdata, dic->clen); if (provided != calculated) { if (!is_inode_flag_set(dic->inode, FI_COMPRESS_CORRUPT)) { set_inode_flag(dic->inode, FI_COMPRESS_CORRUPT); f2fs_info_ratelimited(sbi, "checksum invalid, nid = %lu, %x vs %x", dic->inode->i_ino, provided, calculated); } set_sbi_flag(sbi, SBI_NEED_FSCK); } } out_release: f2fs_release_decomp_mem(dic, bypass_callback, false); out_end_io: trace_f2fs_decompress_pages_end(dic->inode, dic->cluster_idx, dic->clen, ret); f2fs_decompress_end_io(dic, ret, in_task); } /* * This is called when a page of a compressed cluster has been read from disk * (or failed to be read from disk). It checks whether this page was the last * page being waited on in the cluster, and if so, it decompresses the cluster * (or in the case of a failure, cleans up without actually decompressing). */ void f2fs_end_read_compressed_page(struct page *page, bool failed, block_t blkaddr, bool in_task) { struct decompress_io_ctx *dic = (struct decompress_io_ctx *)page_private(page); struct f2fs_sb_info *sbi = F2FS_I_SB(dic->inode); dec_page_count(sbi, F2FS_RD_DATA); if (failed) WRITE_ONCE(dic->failed, true); else if (blkaddr && in_task) f2fs_cache_compressed_page(sbi, page, dic->inode->i_ino, blkaddr); if (atomic_dec_and_test(&dic->remaining_pages)) f2fs_decompress_cluster(dic, in_task); } static bool is_page_in_cluster(struct compress_ctx *cc, pgoff_t index) { if (cc->cluster_idx == NULL_CLUSTER) return true; return cc->cluster_idx == cluster_idx(cc, index); } bool f2fs_cluster_is_empty(struct compress_ctx *cc) { return cc->nr_rpages == 0; } static bool f2fs_cluster_is_full(struct compress_ctx *cc) { return cc->cluster_size == cc->nr_rpages; } bool f2fs_cluster_can_merge_page(struct compress_ctx *cc, pgoff_t index) { if (f2fs_cluster_is_empty(cc)) return true; return is_page_in_cluster(cc, index); } bool f2fs_all_cluster_page_ready(struct compress_ctx *cc, struct page **pages, int index, int nr_pages, bool uptodate) { unsigned long pgidx = pages[index]->index; int i = uptodate ? 0 : 1; /* * when uptodate set to true, try to check all pages in cluster is * uptodate or not. */ if (uptodate && (pgidx % cc->cluster_size)) return false; if (nr_pages - index < cc->cluster_size) return false; for (; i < cc->cluster_size; i++) { if (pages[index + i]->index != pgidx + i) return false; if (uptodate && !PageUptodate(pages[index + i])) return false; } return true; } static bool cluster_has_invalid_data(struct compress_ctx *cc) { loff_t i_size = i_size_read(cc->inode); unsigned nr_pages = DIV_ROUND_UP(i_size, PAGE_SIZE); int i; for (i = 0; i < cc->cluster_size; i++) { struct page *page = cc->rpages[i]; f2fs_bug_on(F2FS_I_SB(cc->inode), !page); /* beyond EOF */ if (page_folio(page)->index >= nr_pages) return true; } return false; } bool f2fs_sanity_check_cluster(struct dnode_of_data *dn) { #ifdef CONFIG_F2FS_CHECK_FS struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode); unsigned int cluster_size = F2FS_I(dn->inode)->i_cluster_size; int cluster_end = 0; unsigned int count; int i; char *reason = ""; if (dn->data_blkaddr != COMPRESS_ADDR) return false; /* [..., COMPR_ADDR, ...] */ if (dn->ofs_in_node % cluster_size) { reason = "[*|C|*|*]"; goto out; } for (i = 1, count = 1; i < cluster_size; i++, count++) { block_t blkaddr = data_blkaddr(dn->inode, dn->node_page, dn->ofs_in_node + i); /* [COMPR_ADDR, ..., COMPR_ADDR] */ if (blkaddr == COMPRESS_ADDR) { reason = "[C|*|C|*]"; goto out; } if (!__is_valid_data_blkaddr(blkaddr)) { if (!cluster_end) cluster_end = i; continue; } /* [COMPR_ADDR, NULL_ADDR or NEW_ADDR, valid_blkaddr] */ if (cluster_end) { reason = "[C|N|N|V]"; goto out; } } f2fs_bug_on(F2FS_I_SB(dn->inode), count != cluster_size && !is_inode_flag_set(dn->inode, FI_COMPRESS_RELEASED)); return false; out: f2fs_warn(sbi, "access invalid cluster, ino:%lu, nid:%u, ofs_in_node:%u, reason:%s", dn->inode->i_ino, dn->nid, dn->ofs_in_node, reason); set_sbi_flag(sbi, SBI_NEED_FSCK); return true; #else return false; #endif } static int __f2fs_get_cluster_blocks(struct inode *inode, struct dnode_of_data *dn) { unsigned int cluster_size = F2FS_I(inode)->i_cluster_size; int count, i; for (i = 0, count = 0; i < cluster_size; i++) { block_t blkaddr = data_blkaddr(dn->inode, dn->node_page, dn->ofs_in_node + i); if (__is_valid_data_blkaddr(blkaddr)) count++; } return count; } static int __f2fs_cluster_blocks(struct inode *inode, unsigned int cluster_idx, enum cluster_check_type type) { struct dnode_of_data dn; unsigned int start_idx = cluster_idx << F2FS_I(inode)->i_log_cluster_size; int ret; set_new_dnode(&dn, inode, NULL, NULL, 0); ret = f2fs_get_dnode_of_data(&dn, start_idx, LOOKUP_NODE); if (ret) { if (ret == -ENOENT) ret = 0; goto fail; } if (f2fs_sanity_check_cluster(&dn)) { ret = -EFSCORRUPTED; goto fail; } if (dn.data_blkaddr == COMPRESS_ADDR) { if (type == CLUSTER_COMPR_BLKS) ret = 1 + __f2fs_get_cluster_blocks(inode, &dn); else if (type == CLUSTER_IS_COMPR) ret = 1; } else if (type == CLUSTER_RAW_BLKS) { ret = __f2fs_get_cluster_blocks(inode, &dn); } fail: f2fs_put_dnode(&dn); return ret; } /* return # of compressed blocks in compressed cluster */ static int f2fs_compressed_blocks(struct compress_ctx *cc) { return __f2fs_cluster_blocks(cc->inode, cc->cluster_idx, CLUSTER_COMPR_BLKS); } /* return # of raw blocks in non-compressed cluster */ static int f2fs_decompressed_blocks(struct inode *inode, unsigned int cluster_idx) { return __f2fs_cluster_blocks(inode, cluster_idx, CLUSTER_RAW_BLKS); } /* return whether cluster is compressed one or not */ int f2fs_is_compressed_cluster(struct inode *inode, pgoff_t index) { return __f2fs_cluster_blocks(inode, index >> F2FS_I(inode)->i_log_cluster_size, CLUSTER_IS_COMPR); } /* return whether cluster contains non raw blocks or not */ bool f2fs_is_sparse_cluster(struct inode *inode, pgoff_t index) { unsigned int cluster_idx = index >> F2FS_I(inode)->i_log_cluster_size; return f2fs_decompressed_blocks(inode, cluster_idx) != F2FS_I(inode)->i_cluster_size; } static bool cluster_may_compress(struct compress_ctx *cc) { if (!f2fs_need_compress_data(cc->inode)) return false; if (f2fs_is_atomic_file(cc->inode)) return false; if (!f2fs_cluster_is_full(cc)) return false; if (unlikely(f2fs_cp_error(F2FS_I_SB(cc->inode)))) return false; return !cluster_has_invalid_data(cc); } static void set_cluster_writeback(struct compress_ctx *cc) { int i; for (i = 0; i < cc->cluster_size; i++) { if (cc->rpages[i]) set_page_writeback(cc->rpages[i]); } } static void cancel_cluster_writeback(struct compress_ctx *cc, struct compress_io_ctx *cic, int submitted) { int i; /* Wait for submitted IOs. */ if (submitted > 1) { f2fs_submit_merged_write(F2FS_I_SB(cc->inode), DATA); while (atomic_read(&cic->pending_pages) != (cc->valid_nr_cpages - submitted + 1)) f2fs_io_schedule_timeout(DEFAULT_IO_TIMEOUT); } /* Cancel writeback and stay locked. */ for (i = 0; i < cc->cluster_size; i++) { if (i < submitted) { inode_inc_dirty_pages(cc->inode); lock_page(cc->rpages[i]); } clear_page_private_gcing(cc->rpages[i]); if (folio_test_writeback(page_folio(cc->rpages[i]))) end_page_writeback(cc->rpages[i]); } } static void set_cluster_dirty(struct compress_ctx *cc) { int i; for (i = 0; i < cc->cluster_size; i++) if (cc->rpages[i]) { set_page_dirty(cc->rpages[i]); set_page_private_gcing(cc->rpages[i]); } } static int prepare_compress_overwrite(struct compress_ctx *cc, struct page **pagep, pgoff_t index, void **fsdata) { struct f2fs_sb_info *sbi = F2FS_I_SB(cc->inode); struct address_space *mapping = cc->inode->i_mapping; struct page *page; sector_t last_block_in_bio; fgf_t fgp_flag = FGP_LOCK | FGP_WRITE | FGP_CREAT; pgoff_t start_idx = start_idx_of_cluster(cc); int i, ret; retry: ret = f2fs_is_compressed_cluster(cc->inode, start_idx); if (ret <= 0) return ret; ret = f2fs_init_compress_ctx(cc); if (ret) return ret; /* keep page reference to avoid page reclaim */ for (i = 0; i < cc->cluster_size; i++) { page = f2fs_pagecache_get_page(mapping, start_idx + i, fgp_flag, GFP_NOFS); if (!page) { ret = -ENOMEM; goto unlock_pages; } if (PageUptodate(page)) f2fs_put_page(page, 1); else f2fs_compress_ctx_add_page(cc, page_folio(page)); } if (!f2fs_cluster_is_empty(cc)) { struct bio *bio = NULL; ret = f2fs_read_multi_pages(cc, &bio, cc->cluster_size, &last_block_in_bio, NULL, true); f2fs_put_rpages(cc); f2fs_destroy_compress_ctx(cc, true); if (ret) goto out; if (bio) f2fs_submit_read_bio(sbi, bio, DATA); ret = f2fs_init_compress_ctx(cc); if (ret) goto out; } for (i = 0; i < cc->cluster_size; i++) { f2fs_bug_on(sbi, cc->rpages[i]); page = find_lock_page(mapping, start_idx + i); if (!page) { /* page can be truncated */ goto release_and_retry; } f2fs_wait_on_page_writeback(page, DATA, true, true); f2fs_compress_ctx_add_page(cc, page_folio(page)); if (!PageUptodate(page)) { release_and_retry: f2fs_put_rpages(cc); f2fs_unlock_rpages(cc, i + 1); f2fs_destroy_compress_ctx(cc, true); goto retry; } } if (likely(!ret)) { *fsdata = cc->rpages; *pagep = cc->rpages[offset_in_cluster(cc, index)]; return cc->cluster_size; } unlock_pages: f2fs_put_rpages(cc); f2fs_unlock_rpages(cc, i); f2fs_destroy_compress_ctx(cc, true); out: return ret; } int f2fs_prepare_compress_overwrite(struct inode *inode, struct page **pagep, pgoff_t index, void **fsdata) { struct compress_ctx cc = { .inode = inode, .log_cluster_size = F2FS_I(inode)->i_log_cluster_size, .cluster_size = F2FS_I(inode)->i_cluster_size, .cluster_idx = index >> F2FS_I(inode)->i_log_cluster_size, .rpages = NULL, .nr_rpages = 0, }; return prepare_compress_overwrite(&cc, pagep, index, fsdata); } bool f2fs_compress_write_end(struct inode *inode, void *fsdata, pgoff_t index, unsigned copied) { struct compress_ctx cc = { .inode = inode, .log_cluster_size = F2FS_I(inode)->i_log_cluster_size, .cluster_size = F2FS_I(inode)->i_cluster_size, .rpages = fsdata, }; bool first_index = (index == cc.rpages[0]->index); if (copied) set_cluster_dirty(&cc); f2fs_put_rpages_wbc(&cc, NULL, false, 1); f2fs_destroy_compress_ctx(&cc, false); return first_index; } int f2fs_truncate_partial_cluster(struct inode *inode, u64 from, bool lock) { void *fsdata = NULL; struct page *pagep; int log_cluster_size = F2FS_I(inode)->i_log_cluster_size; pgoff_t start_idx = from >> (PAGE_SHIFT + log_cluster_size) << log_cluster_size; int err; err = f2fs_is_compressed_cluster(inode, start_idx); if (err < 0) return err; /* truncate normal cluster */ if (!err) return f2fs_do_truncate_blocks(inode, from, lock); /* truncate compressed cluster */ err = f2fs_prepare_compress_overwrite(inode, &pagep, start_idx, &fsdata); /* should not be a normal cluster */ f2fs_bug_on(F2FS_I_SB(inode), err == 0); if (err <= 0) return err; if (err > 0) { struct page **rpages = fsdata; int cluster_size = F2FS_I(inode)->i_cluster_size; int i; for (i = cluster_size - 1; i >= 0; i--) { loff_t start = rpages[i]->index << PAGE_SHIFT; if (from <= start) { zero_user_segment(rpages[i], 0, PAGE_SIZE); } else { zero_user_segment(rpages[i], from - start, PAGE_SIZE); break; } } f2fs_compress_write_end(inode, fsdata, start_idx, true); } return 0; } static int f2fs_write_compressed_pages(struct compress_ctx *cc, int *submitted, struct writeback_control *wbc, enum iostat_type io_type) { struct inode *inode = cc->inode; struct f2fs_sb_info *sbi = F2FS_I_SB(inode); struct f2fs_inode_info *fi = F2FS_I(inode); struct f2fs_io_info fio = { .sbi = sbi, .ino = cc->inode->i_ino, .type = DATA, .op = REQ_OP_WRITE, .op_flags = wbc_to_write_flags(wbc), .old_blkaddr = NEW_ADDR, .page = NULL, .encrypted_page = NULL, .compressed_page = NULL, .io_type = io_type, .io_wbc = wbc, .encrypted = fscrypt_inode_uses_fs_layer_crypto(cc->inode) ? 1 : 0, }; struct dnode_of_data dn; struct node_info ni; struct compress_io_ctx *cic; pgoff_t start_idx = start_idx_of_cluster(cc); unsigned int last_index = cc->cluster_size - 1; loff_t psize; int i, err; bool quota_inode = IS_NOQUOTA(inode); /* we should bypass data pages to proceed the kworker jobs */ if (unlikely(f2fs_cp_error(sbi))) { mapping_set_error(cc->rpages[0]->mapping, -EIO); goto out_free; } if (quota_inode) { /* * We need to wait for node_write to avoid block allocation during * checkpoint. This can only happen to quota writes which can cause * the below discard race condition. */ f2fs_down_read(&sbi->node_write); } else if (!f2fs_trylock_op(sbi)) { goto out_free; } set_new_dnode(&dn, cc->inode, NULL, NULL, 0); err = f2fs_get_dnode_of_data(&dn, start_idx, LOOKUP_NODE); if (err) goto out_unlock_op; for (i = 0; i < cc->cluster_size; i++) { if (data_blkaddr(dn.inode, dn.node_page, dn.ofs_in_node + i) == NULL_ADDR) goto out_put_dnode; } psize = (loff_t)(cc->rpages[last_index]->index + 1) << PAGE_SHIFT; err = f2fs_get_node_info(fio.sbi, dn.nid, &ni, false); if (err) goto out_put_dnode; fio.version = ni.version; cic = f2fs_kmem_cache_alloc(cic_entry_slab, GFP_F2FS_ZERO, false, sbi); if (!cic) goto out_put_dnode; cic->magic = F2FS_COMPRESSED_PAGE_MAGIC; cic->inode = inode; atomic_set(&cic->pending_pages, cc->valid_nr_cpages); cic->rpages = page_array_alloc(cc->inode, cc->cluster_size); if (!cic->rpages) goto out_put_cic; cic->nr_rpages = cc->cluster_size; for (i = 0; i < cc->valid_nr_cpages; i++) { f2fs_set_compressed_page(cc->cpages[i], inode, cc->rpages[i + 1]->index, cic); fio.compressed_page = cc->cpages[i]; fio.old_blkaddr = data_blkaddr(dn.inode, dn.node_page, dn.ofs_in_node + i + 1); /* wait for GCed page writeback via META_MAPPING */ f2fs_wait_on_block_writeback(inode, fio.old_blkaddr); if (fio.encrypted) { fio.page = cc->rpages[i + 1]; err = f2fs_encrypt_one_page(&fio); if (err) goto out_destroy_crypt; cc->cpages[i] = fio.encrypted_page; } } set_cluster_writeback(cc); for (i = 0; i < cc->cluster_size; i++) cic->rpages[i] = cc->rpages[i]; for (i = 0; i < cc->cluster_size; i++, dn.ofs_in_node++) { block_t blkaddr; blkaddr = f2fs_data_blkaddr(&dn); fio.page = cc->rpages[i]; fio.old_blkaddr = blkaddr; /* cluster header */ if (i == 0) { if (blkaddr == COMPRESS_ADDR) fio.compr_blocks++; if (__is_valid_data_blkaddr(blkaddr)) f2fs_invalidate_blocks(sbi, blkaddr); f2fs_update_data_blkaddr(&dn, COMPRESS_ADDR); goto unlock_continue; } if (fio.compr_blocks && __is_valid_data_blkaddr(blkaddr)) fio.compr_blocks++; if (i > cc->valid_nr_cpages) { if (__is_valid_data_blkaddr(blkaddr)) { f2fs_invalidate_blocks(sbi, blkaddr); f2fs_update_data_blkaddr(&dn, NEW_ADDR); } goto unlock_continue; } f2fs_bug_on(fio.sbi, blkaddr == NULL_ADDR); if (fio.encrypted) fio.encrypted_page = cc->cpages[i - 1]; else fio.compressed_page = cc->cpages[i - 1]; cc->cpages[i - 1] = NULL; fio.submitted = 0; f2fs_outplace_write_data(&dn, &fio); if (unlikely(!fio.submitted)) { cancel_cluster_writeback(cc, cic, i); /* To call fscrypt_finalize_bounce_page */ i = cc->valid_nr_cpages; *submitted = 0; goto out_destroy_crypt; } (*submitted)++; unlock_continue: inode_dec_dirty_pages(cc->inode); unlock_page(fio.page); } if (fio.compr_blocks) f2fs_i_compr_blocks_update(inode, fio.compr_blocks - 1, false); f2fs_i_compr_blocks_update(inode, cc->valid_nr_cpages, true); add_compr_block_stat(inode, cc->valid_nr_cpages); set_inode_flag(cc->inode, FI_APPEND_WRITE); f2fs_put_dnode(&dn); if (quota_inode) f2fs_up_read(&sbi->node_write); else f2fs_unlock_op(sbi); spin_lock(&fi->i_size_lock); if (fi->last_disk_size < psize) fi->last_disk_size = psize; spin_unlock(&fi->i_size_lock); f2fs_put_rpages(cc); page_array_free(cc->inode, cc->cpages, cc->nr_cpages); cc->cpages = NULL; f2fs_destroy_compress_ctx(cc, false); return 0; out_destroy_crypt: page_array_free(cc->inode, cic->rpages, cc->cluster_size); for (--i; i >= 0; i--) { if (!cc->cpages[i]) continue; fscrypt_finalize_bounce_page(&cc->cpages[i]); } out_put_cic: kmem_cache_free(cic_entry_slab, cic); out_put_dnode: f2fs_put_dnode(&dn); out_unlock_op: if (quota_inode) f2fs_up_read(&sbi->node_write); else f2fs_unlock_op(sbi); out_free: for (i = 0; i < cc->valid_nr_cpages; i++) { f2fs_compress_free_page(cc->cpages[i]); cc->cpages[i] = NULL; } page_array_free(cc->inode, cc->cpages, cc->nr_cpages); cc->cpages = NULL; return -EAGAIN; } void f2fs_compress_write_end_io(struct bio *bio, struct page *page) { struct f2fs_sb_info *sbi = bio->bi_private; struct compress_io_ctx *cic = (struct compress_io_ctx *)page_private(page); enum count_type type = WB_DATA_TYPE(page, f2fs_is_compressed_page(page)); int i; if (unlikely(bio->bi_status)) mapping_set_error(cic->inode->i_mapping, -EIO); f2fs_compress_free_page(page); dec_page_count(sbi, type); if (atomic_dec_return(&cic->pending_pages)) return; for (i = 0; i < cic->nr_rpages; i++) { WARN_ON(!cic->rpages[i]); clear_page_private_gcing(cic->rpages[i]); end_page_writeback(cic->rpages[i]); } page_array_free(cic->inode, cic->rpages, cic->nr_rpages); kmem_cache_free(cic_entry_slab, cic); } static int f2fs_write_raw_pages(struct compress_ctx *cc, int *submitted_p, struct writeback_control *wbc, enum iostat_type io_type) { struct address_space *mapping = cc->inode->i_mapping; struct f2fs_sb_info *sbi = F2FS_M_SB(mapping); int submitted, compr_blocks, i; int ret = 0; compr_blocks = f2fs_compressed_blocks(cc); for (i = 0; i < cc->cluster_size; i++) { if (!cc->rpages[i]) continue; redirty_page_for_writepage(wbc, cc->rpages[i]); unlock_page(cc->rpages[i]); } if (compr_blocks < 0) return compr_blocks; /* overwrite compressed cluster w/ normal cluster */ if (compr_blocks > 0) f2fs_lock_op(sbi); for (i = 0; i < cc->cluster_size; i++) { if (!cc->rpages[i]) continue; retry_write: lock_page(cc->rpages[i]); if (cc->rpages[i]->mapping != mapping) { continue_unlock: unlock_page(cc->rpages[i]); continue; } if (!PageDirty(cc->rpages[i])) goto continue_unlock; if (folio_test_writeback(page_folio(cc->rpages[i]))) { if (wbc->sync_mode == WB_SYNC_NONE) goto continue_unlock; f2fs_wait_on_page_writeback(cc->rpages[i], DATA, true, true); } if (!clear_page_dirty_for_io(cc->rpages[i])) goto continue_unlock; ret = f2fs_write_single_data_page(page_folio(cc->rpages[i]), &submitted, NULL, NULL, wbc, io_type, compr_blocks, false); if (ret) { if (ret == AOP_WRITEPAGE_ACTIVATE) { unlock_page(cc->rpages[i]); ret = 0; } else if (ret == -EAGAIN) { ret = 0; /* * for quota file, just redirty left pages to * avoid deadlock caused by cluster update race * from foreground operation. */ if (IS_NOQUOTA(cc->inode)) goto out; f2fs_io_schedule_timeout(DEFAULT_IO_TIMEOUT); goto retry_write; } goto out; } *submitted_p += submitted; } out: if (compr_blocks > 0) f2fs_unlock_op(sbi); f2fs_balance_fs(sbi, true); return ret; } int f2fs_write_multi_pages(struct compress_ctx *cc, int *submitted, struct writeback_control *wbc, enum iostat_type io_type) { int err; *submitted = 0; if (cluster_may_compress(cc)) { err = f2fs_compress_pages(cc); if (err == -EAGAIN) { add_compr_block_stat(cc->inode, cc->cluster_size); goto write; } else if (err) { f2fs_put_rpages_wbc(cc, wbc, true, 1); goto destroy_out; } err = f2fs_write_compressed_pages(cc, submitted, wbc, io_type); if (!err) return 0; f2fs_bug_on(F2FS_I_SB(cc->inode), err != -EAGAIN); } write: f2fs_bug_on(F2FS_I_SB(cc->inode), *submitted); err = f2fs_write_raw_pages(cc, submitted, wbc, io_type); f2fs_put_rpages_wbc(cc, wbc, false, 0); destroy_out: f2fs_destroy_compress_ctx(cc, false); return err; } static inline bool allow_memalloc_for_decomp(struct f2fs_sb_info *sbi, bool pre_alloc) { return pre_alloc ^ f2fs_low_mem_mode(sbi); } static int f2fs_prepare_decomp_mem(struct decompress_io_ctx *dic, bool pre_alloc) { const struct f2fs_compress_ops *cops = f2fs_cops[F2FS_I(dic->inode)->i_compress_algorithm]; int i; if (!allow_memalloc_for_decomp(F2FS_I_SB(dic->inode), pre_alloc)) return 0; dic->tpages = page_array_alloc(dic->inode, dic->cluster_size); if (!dic->tpages) return -ENOMEM; for (i = 0; i < dic->cluster_size; i++) { if (dic->rpages[i]) { dic->tpages[i] = dic->rpages[i]; continue; } dic->tpages[i] = f2fs_compress_alloc_page(); } dic->rbuf = f2fs_vmap(dic->tpages, dic->cluster_size); if (!dic->rbuf) return -ENOMEM; dic->cbuf = f2fs_vmap(dic->cpages, dic->nr_cpages); if (!dic->cbuf) return -ENOMEM; if (cops->init_decompress_ctx) return cops->init_decompress_ctx(dic); return 0; } static void f2fs_release_decomp_mem(struct decompress_io_ctx *dic, bool bypass_destroy_callback, bool pre_alloc) { const struct f2fs_compress_ops *cops = f2fs_cops[F2FS_I(dic->inode)->i_compress_algorithm]; if (!allow_memalloc_for_decomp(F2FS_I_SB(dic->inode), pre_alloc)) return; if (!bypass_destroy_callback && cops->destroy_decompress_ctx) cops->destroy_decompress_ctx(dic); if (dic->cbuf) vm_unmap_ram(dic->cbuf, dic->nr_cpages); if (dic->rbuf) vm_unmap_ram(dic->rbuf, dic->cluster_size); } static void f2fs_free_dic(struct decompress_io_ctx *dic, bool bypass_destroy_callback); struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc) { struct decompress_io_ctx *dic; pgoff_t start_idx = start_idx_of_cluster(cc); struct f2fs_sb_info *sbi = F2FS_I_SB(cc->inode); int i, ret; dic = f2fs_kmem_cache_alloc(dic_entry_slab, GFP_F2FS_ZERO, false, sbi); if (!dic) return ERR_PTR(-ENOMEM); dic->rpages = page_array_alloc(cc->inode, cc->cluster_size); if (!dic->rpages) { kmem_cache_free(dic_entry_slab, dic); return ERR_PTR(-ENOMEM); } dic->magic = F2FS_COMPRESSED_PAGE_MAGIC; dic->inode = cc->inode; atomic_set(&dic->remaining_pages, cc->nr_cpages); dic->cluster_idx = cc->cluster_idx; dic->cluster_size = cc->cluster_size; dic->log_cluster_size = cc->log_cluster_size; dic->nr_cpages = cc->nr_cpages; refcount_set(&dic->refcnt, 1); dic->failed = false; dic->need_verity = f2fs_need_verity(cc->inode, start_idx); for (i = 0; i < dic->cluster_size; i++) dic->rpages[i] = cc->rpages[i]; dic->nr_rpages = cc->cluster_size; dic->cpages = page_array_alloc(dic->inode, dic->nr_cpages); if (!dic->cpages) { ret = -ENOMEM; goto out_free; } for (i = 0; i < dic->nr_cpages; i++) { struct page *page; page = f2fs_compress_alloc_page(); f2fs_set_compressed_page(page, cc->inode, start_idx + i + 1, dic); dic->cpages[i] = page; } ret = f2fs_prepare_decomp_mem(dic, true); if (ret) goto out_free; return dic; out_free: f2fs_free_dic(dic, true); return ERR_PTR(ret); } static void f2fs_free_dic(struct decompress_io_ctx *dic, bool bypass_destroy_callback) { int i; f2fs_release_decomp_mem(dic, bypass_destroy_callback, true); if (dic->tpages) { for (i = 0; i < dic->cluster_size; i++) { if (dic->rpages[i]) continue; if (!dic->tpages[i]) continue; f2fs_compress_free_page(dic->tpages[i]); } page_array_free(dic->inode, dic->tpages, dic->cluster_size); } if (dic->cpages) { for (i = 0; i < dic->nr_cpages; i++) { if (!dic->cpages[i]) continue; f2fs_compress_free_page(dic->cpages[i]); } page_array_free(dic->inode, dic->cpages, dic->nr_cpages); } page_array_free(dic->inode, dic->rpages, dic->nr_rpages); kmem_cache_free(dic_entry_slab, dic); } static void f2fs_late_free_dic(struct work_struct *work) { struct decompress_io_ctx *dic = container_of(work, struct decompress_io_ctx, free_work); f2fs_free_dic(dic, false); } static void f2fs_put_dic(struct decompress_io_ctx *dic, bool in_task) { if (refcount_dec_and_test(&dic->refcnt)) { if (in_task) { f2fs_free_dic(dic, false); } else { INIT_WORK(&dic->free_work, f2fs_late_free_dic); queue_work(F2FS_I_SB(dic->inode)->post_read_wq, &dic->free_work); } } } static void f2fs_verify_cluster(struct work_struct *work) { struct decompress_io_ctx *dic = container_of(work, struct decompress_io_ctx, verity_work); int i; /* Verify, update, and unlock the decompressed pages. */ for (i = 0; i < dic->cluster_size; i++) { struct page *rpage = dic->rpages[i]; if (!rpage) continue; if (fsverity_verify_page(rpage)) SetPageUptodate(rpage); else ClearPageUptodate(rpage); unlock_page(rpage); } f2fs_put_dic(dic, true); } /* * This is called when a compressed cluster has been decompressed * (or failed to be read and/or decompressed). */ void f2fs_decompress_end_io(struct decompress_io_ctx *dic, bool failed, bool in_task) { int i; if (!failed && dic->need_verity) { /* * Note that to avoid deadlocks, the verity work can't be done * on the decompression workqueue. This is because verifying * the data pages can involve reading metadata pages from the * file, and these metadata pages may be compressed. */ INIT_WORK(&dic->verity_work, f2fs_verify_cluster); fsverity_enqueue_verify_work(&dic->verity_work); return; } /* Update and unlock the cluster's pagecache pages. */ for (i = 0; i < dic->cluster_size; i++) { struct page *rpage = dic->rpages[i]; if (!rpage) continue; if (failed) ClearPageUptodate(rpage); else SetPageUptodate(rpage); unlock_page(rpage); } /* * Release the reference to the decompress_io_ctx that was being held * for I/O completion. */ f2fs_put_dic(dic, in_task); } /* * Put a reference to a compressed page's decompress_io_ctx. * * This is called when the page is no longer needed and can be freed. */ void f2fs_put_page_dic(struct page *page, bool in_task) { struct decompress_io_ctx *dic = (struct decompress_io_ctx *)page_private(page); f2fs_put_dic(dic, in_task); } /* * check whether cluster blocks are contiguous, and add extent cache entry * only if cluster blocks are logically and physically contiguous. */ unsigned int f2fs_cluster_blocks_are_contiguous(struct dnode_of_data *dn, unsigned int ofs_in_node) { bool compressed = data_blkaddr(dn->inode, dn->node_page, ofs_in_node) == COMPRESS_ADDR; int i = compressed ? 1 : 0; block_t first_blkaddr = data_blkaddr(dn->inode, dn->node_page, ofs_in_node + i); for (i += 1; i < F2FS_I(dn->inode)->i_cluster_size; i++) { block_t blkaddr = data_blkaddr(dn->inode, dn->node_page, ofs_in_node + i); if (!__is_valid_data_blkaddr(blkaddr)) break; if (first_blkaddr + i - (compressed ? 1 : 0) != blkaddr) return 0; } return compressed ? i - 1 : i; } const struct address_space_operations f2fs_compress_aops = { .release_folio = f2fs_release_folio, .invalidate_folio = f2fs_invalidate_folio, .migrate_folio = filemap_migrate_folio, }; struct address_space *COMPRESS_MAPPING(struct f2fs_sb_info *sbi) { return sbi->compress_inode->i_mapping; } void f2fs_invalidate_compress_page(struct f2fs_sb_info *sbi, block_t blkaddr) { if (!sbi->compress_inode) return; invalidate_mapping_pages(COMPRESS_MAPPING(sbi), blkaddr, blkaddr); } void f2fs_cache_compressed_page(struct f2fs_sb_info *sbi, struct page *page, nid_t ino, block_t blkaddr) { struct page *cpage; int ret; if (!test_opt(sbi, COMPRESS_CACHE)) return; if (!f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC_ENHANCE_READ)) return; if (!f2fs_available_free_memory(sbi, COMPRESS_PAGE)) return; cpage = find_get_page(COMPRESS_MAPPING(sbi), blkaddr); if (cpage) { f2fs_put_page(cpage, 0); return; } cpage = alloc_page(__GFP_NOWARN | __GFP_IO); if (!cpage) return; ret = add_to_page_cache_lru(cpage, COMPRESS_MAPPING(sbi), blkaddr, GFP_NOFS); if (ret) { f2fs_put_page(cpage, 0); return; } set_page_private_data(cpage, ino); memcpy(page_address(cpage), page_address(page), PAGE_SIZE); SetPageUptodate(cpage); f2fs_put_page(cpage, 1); } bool f2fs_load_compressed_page(struct f2fs_sb_info *sbi, struct page *page, block_t blkaddr) { struct page *cpage; bool hitted = false; if (!test_opt(sbi, COMPRESS_CACHE)) return false; cpage = f2fs_pagecache_get_page(COMPRESS_MAPPING(sbi), blkaddr, FGP_LOCK | FGP_NOWAIT, GFP_NOFS); if (cpage) { if (PageUptodate(cpage)) { atomic_inc(&sbi->compress_page_hit); memcpy(page_address(page), page_address(cpage), PAGE_SIZE); hitted = true; } f2fs_put_page(cpage, 1); } return hitted; } void f2fs_invalidate_compress_pages(struct f2fs_sb_info *sbi, nid_t ino) { struct address_space *mapping = COMPRESS_MAPPING(sbi); struct folio_batch fbatch; pgoff_t index = 0; pgoff_t end = MAX_BLKADDR(sbi); if (!mapping->nrpages) return; folio_batch_init(&fbatch); do { unsigned int nr, i; nr = filemap_get_folios(mapping, &index, end - 1, &fbatch); if (!nr) break; for (i = 0; i < nr; i++) { struct folio *folio = fbatch.folios[i]; folio_lock(folio); if (folio->mapping != mapping) { folio_unlock(folio); continue; } if (ino != get_page_private_data(&folio->page)) { folio_unlock(folio); continue; } generic_error_remove_folio(mapping, folio); folio_unlock(folio); } folio_batch_release(&fbatch); cond_resched(); } while (index < end); } int f2fs_init_compress_inode(struct f2fs_sb_info *sbi) { struct inode *inode; if (!test_opt(sbi, COMPRESS_CACHE)) return 0; inode = f2fs_iget(sbi->sb, F2FS_COMPRESS_INO(sbi)); if (IS_ERR(inode)) return PTR_ERR(inode); sbi->compress_inode = inode; sbi->compress_percent = COMPRESS_PERCENT; sbi->compress_watermark = COMPRESS_WATERMARK; atomic_set(&sbi->compress_page_hit, 0); return 0; } void f2fs_destroy_compress_inode(struct f2fs_sb_info *sbi) { if (!sbi->compress_inode) return; iput(sbi->compress_inode); sbi->compress_inode = NULL; } int f2fs_init_page_array_cache(struct f2fs_sb_info *sbi) { dev_t dev = sbi->sb->s_bdev->bd_dev; char slab_name[35]; if (!f2fs_sb_has_compression(sbi)) return 0; sprintf(slab_name, "f2fs_page_array_entry-%u:%u", MAJOR(dev), MINOR(dev)); sbi->page_array_slab_size = sizeof(struct page *) << F2FS_OPTION(sbi).compress_log_size; sbi->page_array_slab = f2fs_kmem_cache_create(slab_name, sbi->page_array_slab_size); return sbi->page_array_slab ? 0 : -ENOMEM; } void f2fs_destroy_page_array_cache(struct f2fs_sb_info *sbi) { kmem_cache_destroy(sbi->page_array_slab); } int __init f2fs_init_compress_cache(void) { cic_entry_slab = f2fs_kmem_cache_create("f2fs_cic_entry", sizeof(struct compress_io_ctx)); if (!cic_entry_slab) return -ENOMEM; dic_entry_slab = f2fs_kmem_cache_create("f2fs_dic_entry", sizeof(struct decompress_io_ctx)); if (!dic_entry_slab) goto free_cic; return 0; free_cic: kmem_cache_destroy(cic_entry_slab); return -ENOMEM; } void f2fs_destroy_compress_cache(void) { kmem_cache_destroy(dic_entry_slab); kmem_cache_destroy(cic_entry_slab); }
1 1 1 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 // SPDX-License-Identifier: GPL-2.0-or-later /* * Roccat Lua driver for Linux * * Copyright (c) 2012 Stefan Achatz <erazor_de@users.sourceforge.net> */ /* */ /* * Roccat Lua is a gamer mouse which cpi, button and light settings can be * configured. */ #include <linux/device.h> #include <linux/input.h> #include <linux/hid.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/hid-roccat.h> #include "hid-ids.h" #include "hid-roccat-common.h" #include "hid-roccat-lua.h" static ssize_t lua_sysfs_read(struct file *fp, struct kobject *kobj, char *buf, loff_t off, size_t count, size_t real_size, uint command) { struct device *dev = kobj_to_dev(kobj); struct lua_device *lua = hid_get_drvdata(dev_get_drvdata(dev)); struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev)); int retval; if (off >= real_size) return 0; if (off != 0 || count != real_size) return -EINVAL; mutex_lock(&lua->lua_lock); retval = roccat_common2_receive(usb_dev, command, buf, real_size); mutex_unlock(&lua->lua_lock); return retval ? retval : real_size; } static ssize_t lua_sysfs_write(struct file *fp, struct kobject *kobj, void const *buf, loff_t off, size_t count, size_t real_size, uint command) { struct device *dev = kobj_to_dev(kobj); struct lua_device *lua = hid_get_drvdata(dev_get_drvdata(dev)); struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev)); int retval; if (off != 0 || count != real_size) return -EINVAL; mutex_lock(&lua->lua_lock); retval = roccat_common2_send(usb_dev, command, buf, real_size); mutex_unlock(&lua->lua_lock); return retval ? retval : real_size; } #define LUA_SYSFS_W(thingy, THINGY) \ static ssize_t lua_sysfs_write_ ## thingy(struct file *fp, \ struct kobject *kobj, struct bin_attribute *attr, \ char *buf, loff_t off, size_t count) \ { \ return lua_sysfs_write(fp, kobj, buf, off, count, \ LUA_SIZE_ ## THINGY, LUA_COMMAND_ ## THINGY); \ } #define LUA_SYSFS_R(thingy, THINGY) \ static ssize_t lua_sysfs_read_ ## thingy(struct file *fp, \ struct kobject *kobj, struct bin_attribute *attr, \ char *buf, loff_t off, size_t count) \ { \ return lua_sysfs_read(fp, kobj, buf, off, count, \ LUA_SIZE_ ## THINGY, LUA_COMMAND_ ## THINGY); \ } #define LUA_BIN_ATTRIBUTE_RW(thingy, THINGY) \ LUA_SYSFS_W(thingy, THINGY) \ LUA_SYSFS_R(thingy, THINGY) \ static struct bin_attribute lua_ ## thingy ## _attr = { \ .attr = { .name = #thingy, .mode = 0660 }, \ .size = LUA_SIZE_ ## THINGY, \ .read = lua_sysfs_read_ ## thingy, \ .write = lua_sysfs_write_ ## thingy \ }; LUA_BIN_ATTRIBUTE_RW(control, CONTROL) static int lua_create_sysfs_attributes(struct usb_interface *intf) { return sysfs_create_bin_file(&intf->dev.kobj, &lua_control_attr); } static void lua_remove_sysfs_attributes(struct usb_interface *intf) { sysfs_remove_bin_file(&intf->dev.kobj, &lua_control_attr); } static int lua_init_lua_device_struct(struct usb_device *usb_dev, struct lua_device *lua) { mutex_init(&lua->lua_lock); return 0; } static int lua_init_specials(struct hid_device *hdev) { struct usb_interface *intf = to_usb_interface(hdev->dev.parent); struct usb_device *usb_dev = interface_to_usbdev(intf); struct lua_device *lua; int retval; lua = kzalloc(sizeof(*lua), GFP_KERNEL); if (!lua) { hid_err(hdev, "can't alloc device descriptor\n"); return -ENOMEM; } hid_set_drvdata(hdev, lua); retval = lua_init_lua_device_struct(usb_dev, lua); if (retval) { hid_err(hdev, "couldn't init struct lua_device\n"); goto exit; } retval = lua_create_sysfs_attributes(intf); if (retval) { hid_err(hdev, "cannot create sysfs files\n"); goto exit; } return 0; exit: kfree(lua); return retval; } static void lua_remove_specials(struct hid_device *hdev) { struct usb_interface *intf = to_usb_interface(hdev->dev.parent); struct lua_device *lua; lua_remove_sysfs_attributes(intf); lua = hid_get_drvdata(hdev); kfree(lua); } static int lua_probe(struct hid_device *hdev, const struct hid_device_id *id) { int retval; if (!hid_is_usb(hdev)) return -EINVAL; retval = hid_parse(hdev); if (retval) { hid_err(hdev, "parse failed\n"); goto exit; } retval = hid_hw_start(hdev, HID_CONNECT_DEFAULT); if (retval) { hid_err(hdev, "hw start failed\n"); goto exit; } retval = lua_init_specials(hdev); if (retval) { hid_err(hdev, "couldn't install mouse\n"); goto exit_stop; } return 0; exit_stop: hid_hw_stop(hdev); exit: return retval; } static void lua_remove(struct hid_device *hdev) { lua_remove_specials(hdev); hid_hw_stop(hdev); } static const struct hid_device_id lua_devices[] = { { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_LUA) }, { } }; MODULE_DEVICE_TABLE(hid, lua_devices); static struct hid_driver lua_driver = { .name = "lua", .id_table = lua_devices, .probe = lua_probe, .remove = lua_remove }; module_hid_driver(lua_driver); MODULE_AUTHOR("Stefan Achatz"); MODULE_DESCRIPTION("USB Roccat Lua driver"); MODULE_LICENSE("GPL v2");
7 4 4 4 6 4 2 8 5 13 7 7 7 4 12 7 4 9 4 9 13 13 14 14 14 2 1 13 2 2 1 2 18 1 14 3 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 // SPDX-License-Identifier: GPL-2.0-only #include <linux/types.h> #include <linux/spinlock.h> #include <linux/sock_diag.h> #include <linux/unix_diag.h> #include <linux/skbuff.h> #include <linux/module.h> #include <linux/uidgid.h> #include <net/netlink.h> #include <net/af_unix.h> #include <net/tcp_states.h> #include <net/sock.h> static int sk_diag_dump_name(struct sock *sk, struct sk_buff *nlskb) { /* might or might not have a hash table lock */ struct unix_address *addr = smp_load_acquire(&unix_sk(sk)->addr); if (!addr) return 0; return nla_put(nlskb, UNIX_DIAG_NAME, addr->len - offsetof(struct sockaddr_un, sun_path), addr->name->sun_path); } static int sk_diag_dump_vfs(struct sock *sk, struct sk_buff *nlskb) { struct dentry *dentry = unix_sk(sk)->path.dentry; if (dentry) { struct unix_diag_vfs uv = { .udiag_vfs_ino = d_backing_inode(dentry)->i_ino, .udiag_vfs_dev = dentry->d_sb->s_dev, }; return nla_put(nlskb, UNIX_DIAG_VFS, sizeof(uv), &uv); } return 0; } static int sk_diag_dump_peer(struct sock *sk, struct sk_buff *nlskb) { struct sock *peer; int ino; peer = unix_peer_get(sk); if (peer) { ino = sock_i_ino(peer); sock_put(peer); return nla_put_u32(nlskb, UNIX_DIAG_PEER, ino); } return 0; } static int sk_diag_dump_icons(struct sock *sk, struct sk_buff *nlskb) { struct sk_buff *skb; struct nlattr *attr; u32 *buf; int i; if (READ_ONCE(sk->sk_state) == TCP_LISTEN) { spin_lock(&sk->sk_receive_queue.lock); attr = nla_reserve(nlskb, UNIX_DIAG_ICONS, sk->sk_receive_queue.qlen * sizeof(u32)); if (!attr) goto errout; buf = nla_data(attr); i = 0; skb_queue_walk(&sk->sk_receive_queue, skb) buf[i++] = sock_i_ino(unix_peer(skb->sk)); spin_unlock(&sk->sk_receive_queue.lock); } return 0; errout: spin_unlock(&sk->sk_receive_queue.lock); return -EMSGSIZE; } static int sk_diag_show_rqlen(struct sock *sk, struct sk_buff *nlskb) { struct unix_diag_rqlen rql; if (READ_ONCE(sk->sk_state) == TCP_LISTEN) { rql.udiag_rqueue = skb_queue_len_lockless(&sk->sk_receive_queue); rql.udiag_wqueue = sk->sk_max_ack_backlog; } else { rql.udiag_rqueue = (u32) unix_inq_len(sk); rql.udiag_wqueue = (u32) unix_outq_len(sk); } return nla_put(nlskb, UNIX_DIAG_RQLEN, sizeof(rql), &rql); } static int sk_diag_dump_uid(struct sock *sk, struct sk_buff *nlskb, struct user_namespace *user_ns) { uid_t uid = from_kuid_munged(user_ns, sock_i_uid(sk)); return nla_put(nlskb, UNIX_DIAG_UID, sizeof(uid_t), &uid); } static int sk_diag_fill(struct sock *sk, struct sk_buff *skb, struct unix_diag_req *req, struct user_namespace *user_ns, u32 portid, u32 seq, u32 flags, int sk_ino) { struct nlmsghdr *nlh; struct unix_diag_msg *rep; nlh = nlmsg_put(skb, portid, seq, SOCK_DIAG_BY_FAMILY, sizeof(*rep), flags); if (!nlh) return -EMSGSIZE; rep = nlmsg_data(nlh); rep->udiag_family = AF_UNIX; rep->udiag_type = sk->sk_type; rep->udiag_state = READ_ONCE(sk->sk_state); rep->pad = 0; rep->udiag_ino = sk_ino; sock_diag_save_cookie(sk, rep->udiag_cookie); if ((req->udiag_show & UDIAG_SHOW_NAME) && sk_diag_dump_name(sk, skb)) goto out_nlmsg_trim; if ((req->udiag_show & UDIAG_SHOW_VFS) && sk_diag_dump_vfs(sk, skb)) goto out_nlmsg_trim; if ((req->udiag_show & UDIAG_SHOW_PEER) && sk_diag_dump_peer(sk, skb)) goto out_nlmsg_trim; if ((req->udiag_show & UDIAG_SHOW_ICONS) && sk_diag_dump_icons(sk, skb)) goto out_nlmsg_trim; if ((req->udiag_show & UDIAG_SHOW_RQLEN) && sk_diag_show_rqlen(sk, skb)) goto out_nlmsg_trim; if ((req->udiag_show & UDIAG_SHOW_MEMINFO) && sock_diag_put_meminfo(sk, skb, UNIX_DIAG_MEMINFO)) goto out_nlmsg_trim; if (nla_put_u8(skb, UNIX_DIAG_SHUTDOWN, READ_ONCE(sk->sk_shutdown))) goto out_nlmsg_trim; if ((req->udiag_show & UDIAG_SHOW_UID) && sk_diag_dump_uid(sk, skb, user_ns)) goto out_nlmsg_trim; nlmsg_end(skb, nlh); return 0; out_nlmsg_trim: nlmsg_cancel(skb, nlh); return -EMSGSIZE; } static int unix_diag_dump(struct sk_buff *skb, struct netlink_callback *cb) { struct net *net = sock_net(skb->sk); int num, s_num, slot, s_slot; struct unix_diag_req *req; req = nlmsg_data(cb->nlh); s_slot = cb->args[0]; num = s_num = cb->args[1]; for (slot = s_slot; slot < UNIX_HASH_SIZE; s_num = 0, slot++) { struct sock *sk; num = 0; spin_lock(&net->unx.table.locks[slot]); sk_for_each(sk, &net->unx.table.buckets[slot]) { int sk_ino; if (num < s_num) goto next; if (!(req->udiag_states & (1 << READ_ONCE(sk->sk_state)))) goto next; sk_ino = sock_i_ino(sk); if (!sk_ino) goto next; if (sk_diag_fill(sk, skb, req, sk_user_ns(skb->sk), NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, NLM_F_MULTI, sk_ino) < 0) { spin_unlock(&net->unx.table.locks[slot]); goto done; } next: num++; } spin_unlock(&net->unx.table.locks[slot]); } done: cb->args[0] = slot; cb->args[1] = num; return skb->len; } static struct sock *unix_lookup_by_ino(struct net *net, unsigned int ino) { struct sock *sk; int i; for (i = 0; i < UNIX_HASH_SIZE; i++) { spin_lock(&net->unx.table.locks[i]); sk_for_each(sk, &net->unx.table.buckets[i]) { if (ino == sock_i_ino(sk)) { sock_hold(sk); spin_unlock(&net->unx.table.locks[i]); return sk; } } spin_unlock(&net->unx.table.locks[i]); } return NULL; } static int unix_diag_get_exact(struct sk_buff *in_skb, const struct nlmsghdr *nlh, struct unix_diag_req *req) { struct net *net = sock_net(in_skb->sk); unsigned int extra_len; struct sk_buff *rep; struct sock *sk; int err; err = -EINVAL; if (req->udiag_ino == 0) goto out_nosk; sk = unix_lookup_by_ino(net, req->udiag_ino); err = -ENOENT; if (sk == NULL) goto out_nosk; err = sock_diag_check_cookie(sk, req->udiag_cookie); if (err) goto out; extra_len = 256; again: err = -ENOMEM; rep = nlmsg_new(sizeof(struct unix_diag_msg) + extra_len, GFP_KERNEL); if (!rep) goto out; err = sk_diag_fill(sk, rep, req, sk_user_ns(NETLINK_CB(in_skb).sk), NETLINK_CB(in_skb).portid, nlh->nlmsg_seq, 0, req->udiag_ino); if (err < 0) { nlmsg_free(rep); extra_len += 256; if (extra_len >= PAGE_SIZE) goto out; goto again; } err = nlmsg_unicast(net->diag_nlsk, rep, NETLINK_CB(in_skb).portid); out: if (sk) sock_put(sk); out_nosk: return err; } static int unix_diag_handler_dump(struct sk_buff *skb, struct nlmsghdr *h) { int hdrlen = sizeof(struct unix_diag_req); if (nlmsg_len(h) < hdrlen) return -EINVAL; if (h->nlmsg_flags & NLM_F_DUMP) { struct netlink_dump_control c = { .dump = unix_diag_dump, }; return netlink_dump_start(sock_net(skb->sk)->diag_nlsk, skb, h, &c); } else return unix_diag_get_exact(skb, h, nlmsg_data(h)); } static const struct sock_diag_handler unix_diag_handler = { .owner = THIS_MODULE, .family = AF_UNIX, .dump = unix_diag_handler_dump, }; static int __init unix_diag_init(void) { return sock_diag_register(&unix_diag_handler); } static void __exit unix_diag_exit(void) { sock_diag_unregister(&unix_diag_handler); } module_init(unix_diag_init); module_exit(unix_diag_exit); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("UNIX socket monitoring via SOCK_DIAG"); MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, 1 /* AF_LOCAL */);
1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 /* * Copyright (c) 2011 Atheros Communications Inc. * Copyright (c) 2011-2012 Qualcomm Atheros, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/moduleparam.h> #include <linux/errno.h> #include <linux/export.h> #include <linux/of.h> #include <linux/mmc/sdio_func.h> #include <linux/vmalloc.h> #include "core.h" #include "cfg80211.h" #include "target.h" #include "debug.h" #include "hif-ops.h" #include "htc-ops.h" static const struct ath6kl_hw hw_list[] = { { .id = AR6003_HW_2_0_VERSION, .name = "ar6003 hw 2.0", .dataset_patch_addr = 0x57e884, .app_load_addr = 0x543180, .board_ext_data_addr = 0x57e500, .reserved_ram_size = 6912, .refclk_hz = 26000000, .uarttx_pin = 8, .flags = ATH6KL_HW_SDIO_CRC_ERROR_WAR, /* hw2.0 needs override address hardcoded */ .app_start_override_addr = 0x944C00, .fw = { .dir = AR6003_HW_2_0_FW_DIR, .otp = AR6003_HW_2_0_OTP_FILE, .fw = AR6003_HW_2_0_FIRMWARE_FILE, .tcmd = AR6003_HW_2_0_TCMD_FIRMWARE_FILE, .patch = AR6003_HW_2_0_PATCH_FILE, }, .fw_board = AR6003_HW_2_0_BOARD_DATA_FILE, .fw_default_board = AR6003_HW_2_0_DEFAULT_BOARD_DATA_FILE, }, { .id = AR6003_HW_2_1_1_VERSION, .name = "ar6003 hw 2.1.1", .dataset_patch_addr = 0x57ff74, .app_load_addr = 0x1234, .board_ext_data_addr = 0x542330, .reserved_ram_size = 512, .refclk_hz = 26000000, .uarttx_pin = 8, .testscript_addr = 0x57ef74, .flags = ATH6KL_HW_SDIO_CRC_ERROR_WAR, .fw = { .dir = AR6003_HW_2_1_1_FW_DIR, .otp = AR6003_HW_2_1_1_OTP_FILE, .fw = AR6003_HW_2_1_1_FIRMWARE_FILE, .tcmd = AR6003_HW_2_1_1_TCMD_FIRMWARE_FILE, .patch = AR6003_HW_2_1_1_PATCH_FILE, .utf = AR6003_HW_2_1_1_UTF_FIRMWARE_FILE, .testscript = AR6003_HW_2_1_1_TESTSCRIPT_FILE, }, .fw_board = AR6003_HW_2_1_1_BOARD_DATA_FILE, .fw_default_board = AR6003_HW_2_1_1_DEFAULT_BOARD_DATA_FILE, }, { .id = AR6004_HW_1_0_VERSION, .name = "ar6004 hw 1.0", .dataset_patch_addr = 0x57e884, .app_load_addr = 0x1234, .board_ext_data_addr = 0x437000, .reserved_ram_size = 19456, .board_addr = 0x433900, .refclk_hz = 26000000, .uarttx_pin = 11, .flags = 0, .fw = { .dir = AR6004_HW_1_0_FW_DIR, .fw = AR6004_HW_1_0_FIRMWARE_FILE, }, .fw_board = AR6004_HW_1_0_BOARD_DATA_FILE, .fw_default_board = AR6004_HW_1_0_DEFAULT_BOARD_DATA_FILE, }, { .id = AR6004_HW_1_1_VERSION, .name = "ar6004 hw 1.1", .dataset_patch_addr = 0x57e884, .app_load_addr = 0x1234, .board_ext_data_addr = 0x437000, .reserved_ram_size = 11264, .board_addr = 0x43d400, .refclk_hz = 40000000, .uarttx_pin = 11, .flags = 0, .fw = { .dir = AR6004_HW_1_1_FW_DIR, .fw = AR6004_HW_1_1_FIRMWARE_FILE, }, .fw_board = AR6004_HW_1_1_BOARD_DATA_FILE, .fw_default_board = AR6004_HW_1_1_DEFAULT_BOARD_DATA_FILE, }, { .id = AR6004_HW_1_2_VERSION, .name = "ar6004 hw 1.2", .dataset_patch_addr = 0x436ecc, .app_load_addr = 0x1234, .board_ext_data_addr = 0x437000, .reserved_ram_size = 9216, .board_addr = 0x435c00, .refclk_hz = 40000000, .uarttx_pin = 11, .flags = 0, .fw = { .dir = AR6004_HW_1_2_FW_DIR, .fw = AR6004_HW_1_2_FIRMWARE_FILE, }, .fw_board = AR6004_HW_1_2_BOARD_DATA_FILE, .fw_default_board = AR6004_HW_1_2_DEFAULT_BOARD_DATA_FILE, }, { .id = AR6004_HW_1_3_VERSION, .name = "ar6004 hw 1.3", .dataset_patch_addr = 0x437860, .app_load_addr = 0x1234, .board_ext_data_addr = 0x437000, .reserved_ram_size = 7168, .board_addr = 0x436400, .refclk_hz = 0, .uarttx_pin = 11, .flags = 0, .fw = { .dir = AR6004_HW_1_3_FW_DIR, .fw = AR6004_HW_1_3_FIRMWARE_FILE, .tcmd = AR6004_HW_1_3_TCMD_FIRMWARE_FILE, .utf = AR6004_HW_1_3_UTF_FIRMWARE_FILE, .testscript = AR6004_HW_1_3_TESTSCRIPT_FILE, }, .fw_board = AR6004_HW_1_3_BOARD_DATA_FILE, .fw_default_board = AR6004_HW_1_3_DEFAULT_BOARD_DATA_FILE, }, { .id = AR6004_HW_3_0_VERSION, .name = "ar6004 hw 3.0", .dataset_patch_addr = 0, .app_load_addr = 0x1234, .board_ext_data_addr = 0, .reserved_ram_size = 7168, .board_addr = 0x436400, .testscript_addr = 0, .uarttx_pin = 11, .flags = 0, .fw = { .dir = AR6004_HW_3_0_FW_DIR, .fw = AR6004_HW_3_0_FIRMWARE_FILE, .tcmd = AR6004_HW_3_0_TCMD_FIRMWARE_FILE, .utf = AR6004_HW_3_0_UTF_FIRMWARE_FILE, .testscript = AR6004_HW_3_0_TESTSCRIPT_FILE, }, .fw_board = AR6004_HW_3_0_BOARD_DATA_FILE, .fw_default_board = AR6004_HW_3_0_DEFAULT_BOARD_DATA_FILE, }, }; /* * Include definitions here that can be used to tune the WLAN module * behavior. Different customers can tune the behavior as per their needs, * here. */ /* * This configuration item enable/disable keepalive support. * Keepalive support: In the absence of any data traffic to AP, null * frames will be sent to the AP at periodic interval, to keep the association * active. This configuration item defines the periodic interval. * Use value of zero to disable keepalive support * Default: 60 seconds */ #define WLAN_CONFIG_KEEP_ALIVE_INTERVAL 60 /* * This configuration item sets the value of disconnect timeout * Firmware delays sending the disconnec event to the host for this * timeout after is gets disconnected from the current AP. * If the firmware successly roams within the disconnect timeout * it sends a new connect event */ #define WLAN_CONFIG_DISCONNECT_TIMEOUT 10 #define ATH6KL_DATA_OFFSET 64 struct sk_buff *ath6kl_buf_alloc(int size) { struct sk_buff *skb; u16 reserved; /* Add chacheline space at front and back of buffer */ reserved = roundup((2 * L1_CACHE_BYTES) + ATH6KL_DATA_OFFSET + sizeof(struct htc_packet) + ATH6KL_HTC_ALIGN_BYTES, 4); skb = dev_alloc_skb(size + reserved); if (skb) skb_reserve(skb, reserved - L1_CACHE_BYTES); return skb; } void ath6kl_init_profile_info(struct ath6kl_vif *vif) { vif->ssid_len = 0; memset(vif->ssid, 0, sizeof(vif->ssid)); vif->dot11_auth_mode = OPEN_AUTH; vif->auth_mode = NONE_AUTH; vif->prwise_crypto = NONE_CRYPT; vif->prwise_crypto_len = 0; vif->grp_crypto = NONE_CRYPT; vif->grp_crypto_len = 0; memset(vif->wep_key_list, 0, sizeof(vif->wep_key_list)); memset(vif->req_bssid, 0, sizeof(vif->req_bssid)); memset(vif->bssid, 0, sizeof(vif->bssid)); vif->bss_ch = 0; } static int ath6kl_set_host_app_area(struct ath6kl *ar) { u32 address, data; struct host_app_area host_app_area; /* Fetch the address of the host_app_area_s * instance in the host interest area */ address = ath6kl_get_hi_item_addr(ar, HI_ITEM(hi_app_host_interest)); address = TARG_VTOP(ar->target_type, address); if (ath6kl_diag_read32(ar, address, &data)) return -EIO; address = TARG_VTOP(ar->target_type, data); host_app_area.wmi_protocol_ver = cpu_to_le32(WMI_PROTOCOL_VERSION); if (ath6kl_diag_write(ar, address, (u8 *) &host_app_area, sizeof(struct host_app_area))) return -EIO; return 0; } static inline void set_ac2_ep_map(struct ath6kl *ar, u8 ac, enum htc_endpoint_id ep) { ar->ac2ep_map[ac] = ep; ar->ep2ac_map[ep] = ac; } /* connect to a service */ static int ath6kl_connectservice(struct ath6kl *ar, struct htc_service_connect_req *con_req, char *desc) { int status; struct htc_service_connect_resp response; memset(&response, 0, sizeof(response)); status = ath6kl_htc_conn_service(ar->htc_target, con_req, &response); if (status) { ath6kl_err("failed to connect to %s service status:%d\n", desc, status); return status; } switch (con_req->svc_id) { case WMI_CONTROL_SVC: if (test_bit(WMI_ENABLED, &ar->flag)) ath6kl_wmi_set_control_ep(ar->wmi, response.endpoint); ar->ctrl_ep = response.endpoint; break; case WMI_DATA_BE_SVC: set_ac2_ep_map(ar, WMM_AC_BE, response.endpoint); break; case WMI_DATA_BK_SVC: set_ac2_ep_map(ar, WMM_AC_BK, response.endpoint); break; case WMI_DATA_VI_SVC: set_ac2_ep_map(ar, WMM_AC_VI, response.endpoint); break; case WMI_DATA_VO_SVC: set_ac2_ep_map(ar, WMM_AC_VO, response.endpoint); break; default: ath6kl_err("service id is not mapped %d\n", con_req->svc_id); return -EINVAL; } return 0; } static int ath6kl_init_service_ep(struct ath6kl *ar) { struct htc_service_connect_req connect; memset(&connect, 0, sizeof(connect)); /* these fields are the same for all service endpoints */ connect.ep_cb.tx_comp_multi = ath6kl_tx_complete; connect.ep_cb.rx = ath6kl_rx; connect.ep_cb.rx_refill = ath6kl_rx_refill; connect.ep_cb.tx_full = ath6kl_tx_queue_full; /* * Set the max queue depth so that our ath6kl_tx_queue_full handler * gets called. */ connect.max_txq_depth = MAX_DEFAULT_SEND_QUEUE_DEPTH; connect.ep_cb.rx_refill_thresh = ATH6KL_MAX_RX_BUFFERS / 4; if (!connect.ep_cb.rx_refill_thresh) connect.ep_cb.rx_refill_thresh++; /* connect to control service */ connect.svc_id = WMI_CONTROL_SVC; if (ath6kl_connectservice(ar, &connect, "WMI CONTROL")) return -EIO; connect.flags |= HTC_FLGS_TX_BNDL_PAD_EN; /* * Limit the HTC message size on the send path, although e can * receive A-MSDU frames of 4K, we will only send ethernet-sized * (802.3) frames on the send path. */ connect.max_rxmsg_sz = WMI_MAX_TX_DATA_FRAME_LENGTH; /* * To reduce the amount of committed memory for larger A_MSDU * frames, use the recv-alloc threshold mechanism for larger * packets. */ connect.ep_cb.rx_alloc_thresh = ATH6KL_BUFFER_SIZE; connect.ep_cb.rx_allocthresh = ath6kl_alloc_amsdu_rxbuf; /* * For the remaining data services set the connection flag to * reduce dribbling, if configured to do so. */ connect.conn_flags |= HTC_CONN_FLGS_REDUCE_CRED_DRIB; connect.conn_flags &= ~HTC_CONN_FLGS_THRESH_MASK; connect.conn_flags |= HTC_CONN_FLGS_THRESH_LVL_HALF; connect.svc_id = WMI_DATA_BE_SVC; if (ath6kl_connectservice(ar, &connect, "WMI DATA BE")) return -EIO; /* connect to back-ground map this to WMI LOW_PRI */ connect.svc_id = WMI_DATA_BK_SVC; if (ath6kl_connectservice(ar, &connect, "WMI DATA BK")) return -EIO; /* connect to Video service, map this to HI PRI */ connect.svc_id = WMI_DATA_VI_SVC; if (ath6kl_connectservice(ar, &connect, "WMI DATA VI")) return -EIO; /* * Connect to VO service, this is currently not mapped to a WMI * priority stream due to historical reasons. WMI originally * defined 3 priorities over 3 mailboxes We can change this when * WMI is reworked so that priorities are not dependent on * mailboxes. */ connect.svc_id = WMI_DATA_VO_SVC; if (ath6kl_connectservice(ar, &connect, "WMI DATA VO")) return -EIO; return 0; } void ath6kl_init_control_info(struct ath6kl_vif *vif) { ath6kl_init_profile_info(vif); vif->def_txkey_index = 0; memset(vif->wep_key_list, 0, sizeof(vif->wep_key_list)); vif->ch_hint = 0; } /* * Set HTC/Mbox operational parameters, this can only be called when the * target is in the BMI phase. */ static int ath6kl_set_htc_params(struct ath6kl *ar, u32 mbox_isr_yield_val, u8 htc_ctrl_buf) { int status; u32 blk_size; blk_size = ar->mbox_info.block_size; if (htc_ctrl_buf) blk_size |= ((u32)htc_ctrl_buf) << 16; /* set the host interest area for the block size */ status = ath6kl_bmi_write_hi32(ar, hi_mbox_io_block_sz, blk_size); if (status) { ath6kl_err("bmi_write_memory for IO block size failed\n"); goto out; } ath6kl_dbg(ATH6KL_DBG_TRC, "block size set: %d (target addr:0x%X)\n", blk_size, ath6kl_get_hi_item_addr(ar, HI_ITEM(hi_mbox_io_block_sz))); if (mbox_isr_yield_val) { /* set the host interest area for the mbox ISR yield limit */ status = ath6kl_bmi_write_hi32(ar, hi_mbox_isr_yield_limit, mbox_isr_yield_val); if (status) { ath6kl_err("bmi_write_memory for yield limit failed\n"); goto out; } } out: return status; } static int ath6kl_target_config_wlan_params(struct ath6kl *ar, int idx) { int ret; /* * Configure the device for rx dot11 header rules. "0,0" are the * default values. Required if checksum offload is needed. Set * RxMetaVersion to 2. */ ret = ath6kl_wmi_set_rx_frame_format_cmd(ar->wmi, idx, ar->rx_meta_ver, 0, 0); if (ret) { ath6kl_err("unable to set the rx frame format: %d\n", ret); return ret; } if (ar->conf_flags & ATH6KL_CONF_IGNORE_PS_FAIL_EVT_IN_SCAN) { ret = ath6kl_wmi_pmparams_cmd(ar->wmi, idx, 0, 1, 0, 0, 1, IGNORE_PS_FAIL_DURING_SCAN); if (ret) { ath6kl_err("unable to set power save fail event policy: %d\n", ret); return ret; } } if (!(ar->conf_flags & ATH6KL_CONF_IGNORE_ERP_BARKER)) { ret = ath6kl_wmi_set_lpreamble_cmd(ar->wmi, idx, 0, WMI_FOLLOW_BARKER_IN_ERP); if (ret) { ath6kl_err("unable to set barker preamble policy: %d\n", ret); return ret; } } ret = ath6kl_wmi_set_keepalive_cmd(ar->wmi, idx, WLAN_CONFIG_KEEP_ALIVE_INTERVAL); if (ret) { ath6kl_err("unable to set keep alive interval: %d\n", ret); return ret; } ret = ath6kl_wmi_disctimeout_cmd(ar->wmi, idx, WLAN_CONFIG_DISCONNECT_TIMEOUT); if (ret) { ath6kl_err("unable to set disconnect timeout: %d\n", ret); return ret; } if (!(ar->conf_flags & ATH6KL_CONF_ENABLE_TX_BURST)) { ret = ath6kl_wmi_set_wmm_txop(ar->wmi, idx, WMI_TXOP_DISABLED); if (ret) { ath6kl_err("unable to set txop bursting: %d\n", ret); return ret; } } if (ar->p2p && (ar->vif_max == 1 || idx)) { ret = ath6kl_wmi_info_req_cmd(ar->wmi, idx, P2P_FLAG_CAPABILITIES_REQ | P2P_FLAG_MACADDR_REQ | P2P_FLAG_HMODEL_REQ); if (ret) { ath6kl_dbg(ATH6KL_DBG_TRC, "failed to request P2P capabilities (%d) - assuming P2P not supported\n", ret); ar->p2p = false; } } if (ar->p2p && (ar->vif_max == 1 || idx)) { /* Enable Probe Request reporting for P2P */ ret = ath6kl_wmi_probe_report_req_cmd(ar->wmi, idx, true); if (ret) { ath6kl_dbg(ATH6KL_DBG_TRC, "failed to enable Probe Request reporting (%d)\n", ret); } } return ret; } int ath6kl_configure_target(struct ath6kl *ar) { u32 param, ram_reserved_size; u8 fw_iftype, fw_mode = 0, fw_submode = 0; int i, status; param = !!(ar->conf_flags & ATH6KL_CONF_UART_DEBUG); if (ath6kl_bmi_write_hi32(ar, hi_serial_enable, param)) { ath6kl_err("bmi_write_memory for uart debug failed\n"); return -EIO; } /* * Note: Even though the firmware interface type is * chosen as BSS_STA for all three interfaces, can * be configured to IBSS/AP as long as the fw submode * remains normal mode (0 - AP, STA and IBSS). But * due to an target assert in firmware only one interface is * configured for now. */ fw_iftype = HI_OPTION_FW_MODE_BSS_STA; for (i = 0; i < ar->vif_max; i++) fw_mode |= fw_iftype << (i * HI_OPTION_FW_MODE_BITS); /* * Submodes when fw does not support dynamic interface * switching: * vif[0] - AP/STA/IBSS * vif[1] - "P2P dev"/"P2P GO"/"P2P Client" * vif[2] - "P2P dev"/"P2P GO"/"P2P Client" * Otherwise, All the interface are initialized to p2p dev. */ if (test_bit(ATH6KL_FW_CAPABILITY_STA_P2PDEV_DUPLEX, ar->fw_capabilities)) { for (i = 0; i < ar->vif_max; i++) fw_submode |= HI_OPTION_FW_SUBMODE_P2PDEV << (i * HI_OPTION_FW_SUBMODE_BITS); } else { for (i = 0; i < ar->max_norm_iface; i++) fw_submode |= HI_OPTION_FW_SUBMODE_NONE << (i * HI_OPTION_FW_SUBMODE_BITS); for (i = ar->max_norm_iface; i < ar->vif_max; i++) fw_submode |= HI_OPTION_FW_SUBMODE_P2PDEV << (i * HI_OPTION_FW_SUBMODE_BITS); if (ar->p2p && ar->vif_max == 1) fw_submode = HI_OPTION_FW_SUBMODE_P2PDEV; } if (ath6kl_bmi_write_hi32(ar, hi_app_host_interest, HTC_PROTOCOL_VERSION) != 0) { ath6kl_err("bmi_write_memory for htc version failed\n"); return -EIO; } /* set the firmware mode to STA/IBSS/AP */ param = 0; if (ath6kl_bmi_read_hi32(ar, hi_option_flag, &param) != 0) { ath6kl_err("bmi_read_memory for setting fwmode failed\n"); return -EIO; } param |= (ar->vif_max << HI_OPTION_NUM_DEV_SHIFT); param |= fw_mode << HI_OPTION_FW_MODE_SHIFT; param |= fw_submode << HI_OPTION_FW_SUBMODE_SHIFT; param |= (0 << HI_OPTION_MAC_ADDR_METHOD_SHIFT); param |= (0 << HI_OPTION_FW_BRIDGE_SHIFT); if (ath6kl_bmi_write_hi32(ar, hi_option_flag, param) != 0) { ath6kl_err("bmi_write_memory for setting fwmode failed\n"); return -EIO; } ath6kl_dbg(ATH6KL_DBG_TRC, "firmware mode set\n"); /* * Hardcode the address use for the extended board data * Ideally this should be pre-allocate by the OS at boot time * But since it is a new feature and board data is loaded * at init time, we have to workaround this from host. * It is difficult to patch the firmware boot code, * but possible in theory. */ if ((ar->target_type == TARGET_TYPE_AR6003) || (ar->version.target_ver == AR6004_HW_1_3_VERSION) || (ar->version.target_ver == AR6004_HW_3_0_VERSION)) { param = ar->hw.board_ext_data_addr; ram_reserved_size = ar->hw.reserved_ram_size; if (ath6kl_bmi_write_hi32(ar, hi_board_ext_data, param) != 0) { ath6kl_err("bmi_write_memory for hi_board_ext_data failed\n"); return -EIO; } if (ath6kl_bmi_write_hi32(ar, hi_end_ram_reserve_sz, ram_reserved_size) != 0) { ath6kl_err("bmi_write_memory for hi_end_ram_reserve_sz failed\n"); return -EIO; } } /* set the block size for the target */ if (ath6kl_set_htc_params(ar, MBOX_YIELD_LIMIT, 0)) /* use default number of control buffers */ return -EIO; /* Configure GPIO AR600x UART */ status = ath6kl_bmi_write_hi32(ar, hi_dbg_uart_txpin, ar->hw.uarttx_pin); if (status) return status; /* Only set the baud rate if we're actually doing debug */ if (ar->conf_flags & ATH6KL_CONF_UART_DEBUG) { status = ath6kl_bmi_write_hi32(ar, hi_desired_baud_rate, ar->hw.uarttx_rate); if (status) return status; } /* Configure target refclk_hz */ if (ar->hw.refclk_hz != 0) { status = ath6kl_bmi_write_hi32(ar, hi_refclk_hz, ar->hw.refclk_hz); if (status) return status; } return 0; } /* firmware upload */ static int ath6kl_get_fw(struct ath6kl *ar, const char *filename, u8 **fw, size_t *fw_len) { const struct firmware *fw_entry; int ret; ret = request_firmware(&fw_entry, filename, ar->dev); if (ret) return ret; *fw_len = fw_entry->size; *fw = kmemdup(fw_entry->data, fw_entry->size, GFP_KERNEL); if (*fw == NULL) ret = -ENOMEM; release_firmware(fw_entry); return ret; } #ifdef CONFIG_OF /* * Check the device tree for a board-id and use it to construct * the pathname to the firmware file. Used (for now) to find a * fallback to the "bdata.bin" file--typically a symlink to the * appropriate board-specific file. */ static bool check_device_tree(struct ath6kl *ar) { static const char *board_id_prop = "atheros,board-id"; struct device_node *node; char board_filename[64]; const char *board_id; int ret; for_each_compatible_node(node, NULL, "atheros,ath6kl") { board_id = of_get_property(node, board_id_prop, NULL); if (board_id == NULL) { ath6kl_warn("No \"%s\" property on %pOFn node.\n", board_id_prop, node); continue; } snprintf(board_filename, sizeof(board_filename), "%s/bdata.%s.bin", ar->hw.fw.dir, board_id); ret = ath6kl_get_fw(ar, board_filename, &ar->fw_board, &ar->fw_board_len); if (ret) { ath6kl_err("Failed to get DT board file %s: %d\n", board_filename, ret); continue; } of_node_put(node); return true; } return false; } #else static bool check_device_tree(struct ath6kl *ar) { return false; } #endif /* CONFIG_OF */ static int ath6kl_fetch_board_file(struct ath6kl *ar) { const char *filename; int ret; if (ar->fw_board != NULL) return 0; if (WARN_ON(ar->hw.fw_board == NULL)) return -EINVAL; filename = ar->hw.fw_board; ret = ath6kl_get_fw(ar, filename, &ar->fw_board, &ar->fw_board_len); if (ret == 0) { /* managed to get proper board file */ return 0; } if (check_device_tree(ar)) { /* got board file from device tree */ return 0; } /* there was no proper board file, try to use default instead */ ath6kl_warn("Failed to get board file %s (%d), trying to find default board file.\n", filename, ret); filename = ar->hw.fw_default_board; ret = ath6kl_get_fw(ar, filename, &ar->fw_board, &ar->fw_board_len); if (ret) { ath6kl_err("Failed to get default board file %s: %d\n", filename, ret); return ret; } ath6kl_warn("WARNING! No proper board file was not found, instead using a default board file.\n"); ath6kl_warn("Most likely your hardware won't work as specified. Install correct board file!\n"); return 0; } static int ath6kl_fetch_otp_file(struct ath6kl *ar) { char filename[100]; int ret; if (ar->fw_otp != NULL) return 0; if (ar->hw.fw.otp == NULL) { ath6kl_dbg(ATH6KL_DBG_BOOT, "no OTP file configured for this hw\n"); return 0; } snprintf(filename, sizeof(filename), "%s/%s", ar->hw.fw.dir, ar->hw.fw.otp); ret = ath6kl_get_fw(ar, filename, &ar->fw_otp, &ar->fw_otp_len); if (ret) { ath6kl_err("Failed to get OTP file %s: %d\n", filename, ret); return ret; } return 0; } static int ath6kl_fetch_testmode_file(struct ath6kl *ar) { char filename[100]; int ret; if (ar->testmode == 0) return 0; ath6kl_dbg(ATH6KL_DBG_BOOT, "testmode %d\n", ar->testmode); if (ar->testmode == 2) { if (ar->hw.fw.utf == NULL) { ath6kl_warn("testmode 2 not supported\n"); return -EOPNOTSUPP; } snprintf(filename, sizeof(filename), "%s/%s", ar->hw.fw.dir, ar->hw.fw.utf); } else { if (ar->hw.fw.tcmd == NULL) { ath6kl_warn("testmode 1 not supported\n"); return -EOPNOTSUPP; } snprintf(filename, sizeof(filename), "%s/%s", ar->hw.fw.dir, ar->hw.fw.tcmd); } set_bit(TESTMODE, &ar->flag); ret = ath6kl_get_fw(ar, filename, &ar->fw, &ar->fw_len); if (ret) { ath6kl_err("Failed to get testmode %d firmware file %s: %d\n", ar->testmode, filename, ret); return ret; } return 0; } static int ath6kl_fetch_fw_file(struct ath6kl *ar) { char filename[100]; int ret; if (ar->fw != NULL) return 0; /* FIXME: remove WARN_ON() as we won't support FW API 1 for long */ if (WARN_ON(ar->hw.fw.fw == NULL)) return -EINVAL; snprintf(filename, sizeof(filename), "%s/%s", ar->hw.fw.dir, ar->hw.fw.fw); ret = ath6kl_get_fw(ar, filename, &ar->fw, &ar->fw_len); if (ret) { ath6kl_err("Failed to get firmware file %s: %d\n", filename, ret); return ret; } return 0; } static int ath6kl_fetch_patch_file(struct ath6kl *ar) { char filename[100]; int ret; if (ar->fw_patch != NULL) return 0; if (ar->hw.fw.patch == NULL) return 0; snprintf(filename, sizeof(filename), "%s/%s", ar->hw.fw.dir, ar->hw.fw.patch); ret = ath6kl_get_fw(ar, filename, &ar->fw_patch, &ar->fw_patch_len); if (ret) { ath6kl_err("Failed to get patch file %s: %d\n", filename, ret); return ret; } return 0; } static int ath6kl_fetch_testscript_file(struct ath6kl *ar) { char filename[100]; int ret; if (ar->testmode != 2) return 0; if (ar->fw_testscript != NULL) return 0; if (ar->hw.fw.testscript == NULL) return 0; snprintf(filename, sizeof(filename), "%s/%s", ar->hw.fw.dir, ar->hw.fw.testscript); ret = ath6kl_get_fw(ar, filename, &ar->fw_testscript, &ar->fw_testscript_len); if (ret) { ath6kl_err("Failed to get testscript file %s: %d\n", filename, ret); return ret; } return 0; } static int ath6kl_fetch_fw_api1(struct ath6kl *ar) { int ret; ret = ath6kl_fetch_otp_file(ar); if (ret) return ret; ret = ath6kl_fetch_fw_file(ar); if (ret) return ret; ret = ath6kl_fetch_patch_file(ar); if (ret) return ret; ret = ath6kl_fetch_testscript_file(ar); if (ret) return ret; return 0; } static int ath6kl_fetch_fw_apin(struct ath6kl *ar, const char *name) { size_t magic_len, len, ie_len; const struct firmware *fw; struct ath6kl_fw_ie *hdr; char filename[100]; const u8 *data; int ret, ie_id, i, index, bit; __le32 *val; snprintf(filename, sizeof(filename), "%s/%s", ar->hw.fw.dir, name); ret = request_firmware(&fw, filename, ar->dev); if (ret) { ath6kl_err("Failed request firmware, rv: %d\n", ret); return ret; } data = fw->data; len = fw->size; /* magic also includes the null byte, check that as well */ magic_len = strlen(ATH6KL_FIRMWARE_MAGIC) + 1; if (len < magic_len) { ath6kl_err("Magic length is invalid, len: %zd magic_len: %zd\n", len, magic_len); ret = -EINVAL; goto out; } if (memcmp(data, ATH6KL_FIRMWARE_MAGIC, magic_len) != 0) { ath6kl_err("Magic is invalid, magic_len: %zd\n", magic_len); ret = -EINVAL; goto out; } len -= magic_len; data += magic_len; /* loop elements */ while (len > sizeof(struct ath6kl_fw_ie)) { /* hdr is unaligned! */ hdr = (struct ath6kl_fw_ie *) data; ie_id = le32_to_cpup(&hdr->id); ie_len = le32_to_cpup(&hdr->len); len -= sizeof(*hdr); data += sizeof(*hdr); ath6kl_dbg(ATH6KL_DBG_BOOT, "ie-id: %d len: %zd (0x%zx)\n", ie_id, ie_len, ie_len); if (len < ie_len) { ath6kl_err("IE len is invalid, len: %zd ie_len: %zd ie-id: %d\n", len, ie_len, ie_id); ret = -EINVAL; goto out; } switch (ie_id) { case ATH6KL_FW_IE_FW_VERSION: strscpy(ar->wiphy->fw_version, data, min(sizeof(ar->wiphy->fw_version), ie_len+1)); ath6kl_dbg(ATH6KL_DBG_BOOT, "found fw version %s\n", ar->wiphy->fw_version); break; case ATH6KL_FW_IE_OTP_IMAGE: ath6kl_dbg(ATH6KL_DBG_BOOT, "found otp image ie (%zd B)\n", ie_len); ar->fw_otp = kmemdup(data, ie_len, GFP_KERNEL); if (ar->fw_otp == NULL) { ath6kl_err("fw_otp cannot be allocated\n"); ret = -ENOMEM; goto out; } ar->fw_otp_len = ie_len; break; case ATH6KL_FW_IE_FW_IMAGE: ath6kl_dbg(ATH6KL_DBG_BOOT, "found fw image ie (%zd B)\n", ie_len); /* in testmode we already might have a fw file */ if (ar->fw != NULL) break; ar->fw = vmalloc(ie_len); if (ar->fw == NULL) { ath6kl_err("fw storage cannot be allocated, len: %zd\n", ie_len); ret = -ENOMEM; goto out; } memcpy(ar->fw, data, ie_len); ar->fw_len = ie_len; break; case ATH6KL_FW_IE_PATCH_IMAGE: ath6kl_dbg(ATH6KL_DBG_BOOT, "found patch image ie (%zd B)\n", ie_len); ar->fw_patch = kmemdup(data, ie_len, GFP_KERNEL); if (ar->fw_patch == NULL) { ath6kl_err("fw_patch storage cannot be allocated, len: %zd\n", ie_len); ret = -ENOMEM; goto out; } ar->fw_patch_len = ie_len; break; case ATH6KL_FW_IE_RESERVED_RAM_SIZE: val = (__le32 *) data; ar->hw.reserved_ram_size = le32_to_cpup(val); ath6kl_dbg(ATH6KL_DBG_BOOT, "found reserved ram size ie %d\n", ar->hw.reserved_ram_size); break; case ATH6KL_FW_IE_CAPABILITIES: ath6kl_dbg(ATH6KL_DBG_BOOT, "found firmware capabilities ie (%zd B)\n", ie_len); for (i = 0; i < ATH6KL_FW_CAPABILITY_MAX; i++) { index = i / 8; bit = i % 8; if (index == ie_len) break; if (data[index] & (1 << bit)) __set_bit(i, ar->fw_capabilities); } ath6kl_dbg_dump(ATH6KL_DBG_BOOT, "capabilities", "", ar->fw_capabilities, sizeof(ar->fw_capabilities)); break; case ATH6KL_FW_IE_PATCH_ADDR: if (ie_len != sizeof(*val)) break; val = (__le32 *) data; ar->hw.dataset_patch_addr = le32_to_cpup(val); ath6kl_dbg(ATH6KL_DBG_BOOT, "found patch address ie 0x%x\n", ar->hw.dataset_patch_addr); break; case ATH6KL_FW_IE_BOARD_ADDR: if (ie_len != sizeof(*val)) break; val = (__le32 *) data; ar->hw.board_addr = le32_to_cpup(val); ath6kl_dbg(ATH6KL_DBG_BOOT, "found board address ie 0x%x\n", ar->hw.board_addr); break; case ATH6KL_FW_IE_VIF_MAX: if (ie_len != sizeof(*val)) break; val = (__le32 *) data; ar->vif_max = min_t(unsigned int, le32_to_cpup(val), ATH6KL_VIF_MAX); if (ar->vif_max > 1 && !ar->p2p) ar->max_norm_iface = 2; ath6kl_dbg(ATH6KL_DBG_BOOT, "found vif max ie %d\n", ar->vif_max); break; default: ath6kl_dbg(ATH6KL_DBG_BOOT, "Unknown fw ie: %u\n", le32_to_cpup(&hdr->id)); break; } len -= ie_len; data += ie_len; } ret = 0; out: release_firmware(fw); return ret; } int ath6kl_init_fetch_firmwares(struct ath6kl *ar) { int ret; ret = ath6kl_fetch_board_file(ar); if (ret) return ret; ret = ath6kl_fetch_testmode_file(ar); if (ret) return ret; ret = ath6kl_fetch_fw_apin(ar, ATH6KL_FW_API5_FILE); if (ret == 0) { ar->fw_api = 5; goto out; } ret = ath6kl_fetch_fw_apin(ar, ATH6KL_FW_API4_FILE); if (ret == 0) { ar->fw_api = 4; goto out; } ret = ath6kl_fetch_fw_apin(ar, ATH6KL_FW_API3_FILE); if (ret == 0) { ar->fw_api = 3; goto out; } ret = ath6kl_fetch_fw_apin(ar, ATH6KL_FW_API2_FILE); if (ret == 0) { ar->fw_api = 2; goto out; } ret = ath6kl_fetch_fw_api1(ar); if (ret) return ret; ar->fw_api = 1; out: ath6kl_dbg(ATH6KL_DBG_BOOT, "using fw api %d\n", ar->fw_api); return 0; } static int ath6kl_upload_board_file(struct ath6kl *ar) { u32 board_address, board_ext_address, param; u32 board_data_size, board_ext_data_size; int ret; if (WARN_ON(ar->fw_board == NULL)) return -ENOENT; /* * Determine where in Target RAM to write Board Data. * For AR6004, host determine Target RAM address for * writing board data. */ if (ar->hw.board_addr != 0) { board_address = ar->hw.board_addr; ath6kl_bmi_write_hi32(ar, hi_board_data, board_address); } else { ret = ath6kl_bmi_read_hi32(ar, hi_board_data, &board_address); if (ret) { ath6kl_err("Failed to get board file target address.\n"); return ret; } } /* determine where in target ram to write extended board data */ ret = ath6kl_bmi_read_hi32(ar, hi_board_ext_data, &board_ext_address); if (ret) { ath6kl_err("Failed to get extended board file target address.\n"); return ret; } if (ar->target_type == TARGET_TYPE_AR6003 && board_ext_address == 0) { ath6kl_err("Failed to get board file target address.\n"); return -EINVAL; } switch (ar->target_type) { case TARGET_TYPE_AR6003: board_data_size = AR6003_BOARD_DATA_SZ; board_ext_data_size = AR6003_BOARD_EXT_DATA_SZ; if (ar->fw_board_len > (board_data_size + board_ext_data_size)) board_ext_data_size = AR6003_BOARD_EXT_DATA_SZ_V2; break; case TARGET_TYPE_AR6004: board_data_size = AR6004_BOARD_DATA_SZ; board_ext_data_size = AR6004_BOARD_EXT_DATA_SZ; break; default: WARN_ON(1); return -EINVAL; } if (board_ext_address && ar->fw_board_len == (board_data_size + board_ext_data_size)) { /* write extended board data */ ath6kl_dbg(ATH6KL_DBG_BOOT, "writing extended board data to 0x%x (%d B)\n", board_ext_address, board_ext_data_size); ret = ath6kl_bmi_write(ar, board_ext_address, ar->fw_board + board_data_size, board_ext_data_size); if (ret) { ath6kl_err("Failed to write extended board data: %d\n", ret); return ret; } /* record that extended board data is initialized */ param = (board_ext_data_size << 16) | 1; ath6kl_bmi_write_hi32(ar, hi_board_ext_data_config, param); } if (ar->fw_board_len < board_data_size) { ath6kl_err("Too small board file: %zu\n", ar->fw_board_len); ret = -EINVAL; return ret; } ath6kl_dbg(ATH6KL_DBG_BOOT, "writing board file to 0x%x (%d B)\n", board_address, board_data_size); ret = ath6kl_bmi_write(ar, board_address, ar->fw_board, board_data_size); if (ret) { ath6kl_err("Board file bmi write failed: %d\n", ret); return ret; } /* record the fact that Board Data IS initialized */ if ((ar->version.target_ver == AR6004_HW_1_3_VERSION) || (ar->version.target_ver == AR6004_HW_3_0_VERSION)) param = board_data_size; else param = 1; ath6kl_bmi_write_hi32(ar, hi_board_data_initialized, param); return ret; } static int ath6kl_upload_otp(struct ath6kl *ar) { u32 address, param; bool from_hw = false; int ret; if (ar->fw_otp == NULL) return 0; address = ar->hw.app_load_addr; ath6kl_dbg(ATH6KL_DBG_BOOT, "writing otp to 0x%x (%zd B)\n", address, ar->fw_otp_len); ret = ath6kl_bmi_fast_download(ar, address, ar->fw_otp, ar->fw_otp_len); if (ret) { ath6kl_err("Failed to upload OTP file: %d\n", ret); return ret; } /* read firmware start address */ ret = ath6kl_bmi_read_hi32(ar, hi_app_start, &address); if (ret) { ath6kl_err("Failed to read hi_app_start: %d\n", ret); return ret; } if (ar->hw.app_start_override_addr == 0) { ar->hw.app_start_override_addr = address; from_hw = true; } ath6kl_dbg(ATH6KL_DBG_BOOT, "app_start_override_addr%s 0x%x\n", from_hw ? " (from hw)" : "", ar->hw.app_start_override_addr); /* execute the OTP code */ ath6kl_dbg(ATH6KL_DBG_BOOT, "executing OTP at 0x%x\n", ar->hw.app_start_override_addr); param = 0; ath6kl_bmi_execute(ar, ar->hw.app_start_override_addr, &param); return ret; } static int ath6kl_upload_firmware(struct ath6kl *ar) { u32 address; int ret; if (WARN_ON(ar->fw == NULL)) return 0; address = ar->hw.app_load_addr; ath6kl_dbg(ATH6KL_DBG_BOOT, "writing firmware to 0x%x (%zd B)\n", address, ar->fw_len); ret = ath6kl_bmi_fast_download(ar, address, ar->fw, ar->fw_len); if (ret) { ath6kl_err("Failed to write firmware: %d\n", ret); return ret; } /* * Set starting address for firmware * Don't need to setup app_start override addr on AR6004 */ if (ar->target_type != TARGET_TYPE_AR6004) { address = ar->hw.app_start_override_addr; ath6kl_bmi_set_app_start(ar, address); } return ret; } static int ath6kl_upload_patch(struct ath6kl *ar) { u32 address; int ret; if (ar->fw_patch == NULL) return 0; address = ar->hw.dataset_patch_addr; ath6kl_dbg(ATH6KL_DBG_BOOT, "writing patch to 0x%x (%zd B)\n", address, ar->fw_patch_len); ret = ath6kl_bmi_write(ar, address, ar->fw_patch, ar->fw_patch_len); if (ret) { ath6kl_err("Failed to write patch file: %d\n", ret); return ret; } ath6kl_bmi_write_hi32(ar, hi_dset_list_head, address); return 0; } static int ath6kl_upload_testscript(struct ath6kl *ar) { u32 address; int ret; if (ar->testmode != 2) return 0; if (ar->fw_testscript == NULL) return 0; address = ar->hw.testscript_addr; ath6kl_dbg(ATH6KL_DBG_BOOT, "writing testscript to 0x%x (%zd B)\n", address, ar->fw_testscript_len); ret = ath6kl_bmi_write(ar, address, ar->fw_testscript, ar->fw_testscript_len); if (ret) { ath6kl_err("Failed to write testscript file: %d\n", ret); return ret; } ath6kl_bmi_write_hi32(ar, hi_ota_testscript, address); if ((ar->version.target_ver != AR6004_HW_1_3_VERSION) && (ar->version.target_ver != AR6004_HW_3_0_VERSION)) ath6kl_bmi_write_hi32(ar, hi_end_ram_reserve_sz, 4096); ath6kl_bmi_write_hi32(ar, hi_test_apps_related, 1); return 0; } static int ath6kl_init_upload(struct ath6kl *ar) { u32 param, options, sleep, address; int status = 0; if (ar->target_type != TARGET_TYPE_AR6003 && ar->target_type != TARGET_TYPE_AR6004) return -EINVAL; /* temporarily disable system sleep */ address = MBOX_BASE_ADDRESS + LOCAL_SCRATCH_ADDRESS; status = ath6kl_bmi_reg_read(ar, address, &param); if (status) return status; options = param; param |= ATH6KL_OPTION_SLEEP_DISABLE; status = ath6kl_bmi_reg_write(ar, address, param); if (status) return status; address = RTC_BASE_ADDRESS + SYSTEM_SLEEP_ADDRESS; status = ath6kl_bmi_reg_read(ar, address, &param); if (status) return status; sleep = param; param |= SM(SYSTEM_SLEEP_DISABLE, 1); status = ath6kl_bmi_reg_write(ar, address, param); if (status) return status; ath6kl_dbg(ATH6KL_DBG_TRC, "old options: %d, old sleep: %d\n", options, sleep); /* program analog PLL register */ /* no need to control 40/44MHz clock on AR6004 */ if (ar->target_type != TARGET_TYPE_AR6004) { status = ath6kl_bmi_reg_write(ar, ATH6KL_ANALOG_PLL_REGISTER, 0xF9104001); if (status) return status; /* Run at 80/88MHz by default */ param = SM(CPU_CLOCK_STANDARD, 1); address = RTC_BASE_ADDRESS + CPU_CLOCK_ADDRESS; status = ath6kl_bmi_reg_write(ar, address, param); if (status) return status; } param = 0; address = RTC_BASE_ADDRESS + LPO_CAL_ADDRESS; param = SM(LPO_CAL_ENABLE, 1); status = ath6kl_bmi_reg_write(ar, address, param); if (status) return status; /* WAR to avoid SDIO CRC err */ if (ar->hw.flags & ATH6KL_HW_SDIO_CRC_ERROR_WAR) { ath6kl_err("temporary war to avoid sdio crc error\n"); param = 0x28; address = GPIO_BASE_ADDRESS + GPIO_PIN9_ADDRESS; status = ath6kl_bmi_reg_write(ar, address, param); if (status) return status; param = 0x20; address = GPIO_BASE_ADDRESS + GPIO_PIN10_ADDRESS; status = ath6kl_bmi_reg_write(ar, address, param); if (status) return status; address = GPIO_BASE_ADDRESS + GPIO_PIN11_ADDRESS; status = ath6kl_bmi_reg_write(ar, address, param); if (status) return status; address = GPIO_BASE_ADDRESS + GPIO_PIN12_ADDRESS; status = ath6kl_bmi_reg_write(ar, address, param); if (status) return status; address = GPIO_BASE_ADDRESS + GPIO_PIN13_ADDRESS; status = ath6kl_bmi_reg_write(ar, address, param); if (status) return status; } /* write EEPROM data to Target RAM */ status = ath6kl_upload_board_file(ar); if (status) return status; /* transfer One time Programmable data */ status = ath6kl_upload_otp(ar); if (status) return status; /* Download Target firmware */ status = ath6kl_upload_firmware(ar); if (status) return status; status = ath6kl_upload_patch(ar); if (status) return status; /* Download the test script */ status = ath6kl_upload_testscript(ar); if (status) return status; /* Restore system sleep */ address = RTC_BASE_ADDRESS + SYSTEM_SLEEP_ADDRESS; status = ath6kl_bmi_reg_write(ar, address, sleep); if (status) return status; address = MBOX_BASE_ADDRESS + LOCAL_SCRATCH_ADDRESS; param = options | 0x20; status = ath6kl_bmi_reg_write(ar, address, param); if (status) return status; return status; } int ath6kl_init_hw_params(struct ath6kl *ar) { const struct ath6kl_hw *hw; int i; for (i = 0; i < ARRAY_SIZE(hw_list); i++) { hw = &hw_list[i]; if (hw->id == ar->version.target_ver) break; } if (i == ARRAY_SIZE(hw_list)) { ath6kl_err("Unsupported hardware version: 0x%x\n", ar->version.target_ver); return -EINVAL; } ar->hw = *hw; ath6kl_dbg(ATH6KL_DBG_BOOT, "target_ver 0x%x target_type 0x%x dataset_patch 0x%x app_load_addr 0x%x\n", ar->version.target_ver, ar->target_type, ar->hw.dataset_patch_addr, ar->hw.app_load_addr); ath6kl_dbg(ATH6KL_DBG_BOOT, "app_start_override_addr 0x%x board_ext_data_addr 0x%x reserved_ram_size 0x%x", ar->hw.app_start_override_addr, ar->hw.board_ext_data_addr, ar->hw.reserved_ram_size); ath6kl_dbg(ATH6KL_DBG_BOOT, "refclk_hz %d uarttx_pin %d", ar->hw.refclk_hz, ar->hw.uarttx_pin); return 0; } static const char *ath6kl_init_get_hif_name(enum ath6kl_hif_type type) { switch (type) { case ATH6KL_HIF_TYPE_SDIO: return "sdio"; case ATH6KL_HIF_TYPE_USB: return "usb"; } return NULL; } static const struct fw_capa_str_map { int id; const char *name; } fw_capa_map[] = { { ATH6KL_FW_CAPABILITY_HOST_P2P, "host-p2p" }, { ATH6KL_FW_CAPABILITY_SCHED_SCAN, "sched-scan" }, { ATH6KL_FW_CAPABILITY_STA_P2PDEV_DUPLEX, "sta-p2pdev-duplex" }, { ATH6KL_FW_CAPABILITY_INACTIVITY_TIMEOUT, "inactivity-timeout" }, { ATH6KL_FW_CAPABILITY_RSN_CAP_OVERRIDE, "rsn-cap-override" }, { ATH6KL_FW_CAPABILITY_WOW_MULTICAST_FILTER, "wow-mc-filter" }, { ATH6KL_FW_CAPABILITY_BMISS_ENHANCE, "bmiss-enhance" }, { ATH6KL_FW_CAPABILITY_SCHED_SCAN_MATCH_LIST, "sscan-match-list" }, { ATH6KL_FW_CAPABILITY_RSSI_SCAN_THOLD, "rssi-scan-thold" }, { ATH6KL_FW_CAPABILITY_CUSTOM_MAC_ADDR, "custom-mac-addr" }, { ATH6KL_FW_CAPABILITY_TX_ERR_NOTIFY, "tx-err-notify" }, { ATH6KL_FW_CAPABILITY_REGDOMAIN, "regdomain" }, { ATH6KL_FW_CAPABILITY_SCHED_SCAN_V2, "sched-scan-v2" }, { ATH6KL_FW_CAPABILITY_HEART_BEAT_POLL, "hb-poll" }, { ATH6KL_FW_CAPABILITY_64BIT_RATES, "64bit-rates" }, { ATH6KL_FW_CAPABILITY_AP_INACTIVITY_MINS, "ap-inactivity-mins" }, { ATH6KL_FW_CAPABILITY_MAP_LP_ENDPOINT, "map-lp-endpoint" }, { ATH6KL_FW_CAPABILITY_RATETABLE_MCS15, "ratetable-mcs15" }, { ATH6KL_FW_CAPABILITY_NO_IP_CHECKSUM, "no-ip-checksum" }, }; static const char *ath6kl_init_get_fw_capa_name(unsigned int id) { int i; for (i = 0; i < ARRAY_SIZE(fw_capa_map); i++) { if (fw_capa_map[i].id == id) return fw_capa_map[i].name; } return "<unknown>"; } static void ath6kl_init_get_fwcaps(struct ath6kl *ar, char *buf, size_t buf_len) { u8 *data = (u8 *) ar->fw_capabilities; size_t trunc_len, len = 0; int i, index, bit; char *trunc = "..."; for (i = 0; i < ATH6KL_FW_CAPABILITY_MAX; i++) { index = i / 8; bit = i % 8; if (index >= sizeof(ar->fw_capabilities) * 4) break; if (buf_len - len < 4) { ath6kl_warn("firmware capability buffer too small!\n"); /* add "..." to the end of string */ trunc_len = strlen(trunc) + 1; memcpy(buf + buf_len - trunc_len, trunc, trunc_len); return; } if (data[index] & (1 << bit)) { len += scnprintf(buf + len, buf_len - len, "%s,", ath6kl_init_get_fw_capa_name(i)); } } /* overwrite the last comma */ if (len > 0) len--; buf[len] = '\0'; } static int ath6kl_init_hw_reset(struct ath6kl *ar) { ath6kl_dbg(ATH6KL_DBG_BOOT, "cold resetting the device"); return ath6kl_diag_write32(ar, RESET_CONTROL_ADDRESS, cpu_to_le32(RESET_CONTROL_COLD_RST)); } static int __ath6kl_init_hw_start(struct ath6kl *ar) { long timeleft; int ret, i; char buf[200]; ath6kl_dbg(ATH6KL_DBG_BOOT, "hw start\n"); ret = ath6kl_hif_power_on(ar); if (ret) return ret; ret = ath6kl_configure_target(ar); if (ret) goto err_power_off; ret = ath6kl_init_upload(ar); if (ret) goto err_power_off; /* Do we need to finish the BMI phase */ ret = ath6kl_bmi_done(ar); if (ret) goto err_power_off; /* * The reason we have to wait for the target here is that the * driver layer has to init BMI in order to set the host block * size. */ ret = ath6kl_htc_wait_target(ar->htc_target); if (ret == -ETIMEDOUT) { /* * Most likely USB target is in odd state after reboot and * needs a reset. A cold reset makes the whole device * disappear from USB bus and initialisation starts from * beginning. */ ath6kl_warn("htc wait target timed out, resetting device\n"); ath6kl_init_hw_reset(ar); goto err_power_off; } else if (ret) { ath6kl_err("htc wait target failed: %d\n", ret); goto err_power_off; } ret = ath6kl_init_service_ep(ar); if (ret) { ath6kl_err("Endpoint service initialization failed: %d\n", ret); goto err_cleanup_scatter; } /* setup credit distribution */ ath6kl_htc_credit_setup(ar->htc_target, &ar->credit_state_info); /* start HTC */ ret = ath6kl_htc_start(ar->htc_target); if (ret) { /* FIXME: call this */ ath6kl_cookie_cleanup(ar); goto err_cleanup_scatter; } /* Wait for Wmi event to be ready */ timeleft = wait_event_interruptible_timeout(ar->event_wq, test_bit(WMI_READY, &ar->flag), WMI_TIMEOUT); if (timeleft <= 0) { clear_bit(WMI_READY, &ar->flag); ath6kl_err("wmi is not ready or wait was interrupted: %ld\n", timeleft); ret = -EIO; goto err_htc_stop; } ath6kl_dbg(ATH6KL_DBG_BOOT, "firmware booted\n"); if (test_and_clear_bit(FIRST_BOOT, &ar->flag)) { ath6kl_info("%s %s fw %s api %d%s\n", ar->hw.name, ath6kl_init_get_hif_name(ar->hif_type), ar->wiphy->fw_version, ar->fw_api, test_bit(TESTMODE, &ar->flag) ? " testmode" : ""); ath6kl_init_get_fwcaps(ar, buf, sizeof(buf)); ath6kl_info("firmware supports: %s\n", buf); } if (ar->version.abi_ver != ATH6KL_ABI_VERSION) { ath6kl_err("abi version mismatch: host(0x%x), target(0x%x)\n", ATH6KL_ABI_VERSION, ar->version.abi_ver); ret = -EIO; goto err_htc_stop; } ath6kl_dbg(ATH6KL_DBG_TRC, "%s: wmi is ready\n", __func__); /* communicate the wmi protocol verision to the target */ /* FIXME: return error */ if ((ath6kl_set_host_app_area(ar)) != 0) ath6kl_err("unable to set the host app area\n"); for (i = 0; i < ar->vif_max; i++) { ret = ath6kl_target_config_wlan_params(ar, i); if (ret) goto err_htc_stop; } return 0; err_htc_stop: ath6kl_htc_stop(ar->htc_target); err_cleanup_scatter: ath6kl_hif_cleanup_scatter(ar); err_power_off: ath6kl_hif_power_off(ar); return ret; } int ath6kl_init_hw_start(struct ath6kl *ar) { int err; err = __ath6kl_init_hw_start(ar); if (err) return err; ar->state = ATH6KL_STATE_ON; return 0; } static int __ath6kl_init_hw_stop(struct ath6kl *ar) { int ret; ath6kl_dbg(ATH6KL_DBG_BOOT, "hw stop\n"); ath6kl_htc_stop(ar->htc_target); ath6kl_hif_stop(ar); ath6kl_bmi_reset(ar); ret = ath6kl_hif_power_off(ar); if (ret) ath6kl_warn("failed to power off hif: %d\n", ret); return 0; } int ath6kl_init_hw_stop(struct ath6kl *ar) { int err; err = __ath6kl_init_hw_stop(ar); if (err) return err; ar->state = ATH6KL_STATE_OFF; return 0; } void ath6kl_init_hw_restart(struct ath6kl *ar) { clear_bit(WMI_READY, &ar->flag); ath6kl_cfg80211_stop_all(ar); if (__ath6kl_init_hw_stop(ar)) { ath6kl_dbg(ATH6KL_DBG_RECOVERY, "Failed to stop during fw error recovery\n"); return; } if (__ath6kl_init_hw_start(ar)) { ath6kl_dbg(ATH6KL_DBG_RECOVERY, "Failed to restart during fw error recovery\n"); return; } } void ath6kl_stop_txrx(struct ath6kl *ar) { struct ath6kl_vif *vif, *tmp_vif; int i; set_bit(DESTROY_IN_PROGRESS, &ar->flag); if (down_interruptible(&ar->sem)) { ath6kl_err("down_interruptible failed\n"); return; } for (i = 0; i < AP_MAX_NUM_STA; i++) aggr_reset_state(ar->sta_list[i].aggr_conn); spin_lock_bh(&ar->list_lock); list_for_each_entry_safe(vif, tmp_vif, &ar->vif_list, list) { list_del(&vif->list); spin_unlock_bh(&ar->list_lock); ath6kl_cfg80211_vif_stop(vif, test_bit(WMI_READY, &ar->flag)); rtnl_lock(); wiphy_lock(ar->wiphy); ath6kl_cfg80211_vif_cleanup(vif); wiphy_unlock(ar->wiphy); rtnl_unlock(); spin_lock_bh(&ar->list_lock); } spin_unlock_bh(&ar->list_lock); clear_bit(WMI_READY, &ar->flag); if (ar->fw_recovery.enable) del_timer_sync(&ar->fw_recovery.hb_timer); /* * After wmi_shudown all WMI events will be dropped. We * need to cleanup the buffers allocated in AP mode and * give disconnect notification to stack, which usually * happens in the disconnect_event. Simulate the disconnect * event by calling the function directly. Sometimes * disconnect_event will be received when the debug logs * are collected. */ ath6kl_wmi_shutdown(ar->wmi); clear_bit(WMI_ENABLED, &ar->flag); if (ar->htc_target) { ath6kl_dbg(ATH6KL_DBG_TRC, "%s: shut down htc\n", __func__); ath6kl_htc_stop(ar->htc_target); } /* * Try to reset the device if we can. The driver may have been * configure NOT to reset the target during a debug session. */ ath6kl_init_hw_reset(ar); up(&ar->sem); } EXPORT_SYMBOL(ath6kl_stop_txrx);
8 5 5 5 5 1 5 5 5 5 1 5 5 3 2 5 5 1 5 5 5 2 2 1 1 2 3 3 3 3 3 3 3 3 3 3 2 3 3 1 1 8 8 1 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 3422 3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434 3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456 3457 3458 3459 3460 3461 3462 3463 3464 3465 3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479 3480 3481 3482 3483 3484 3485 3486 3487 3488 3489 3490 3491 3492 3493 3494 3495 3496 3497 3498 3499 3500 3501 3502 3503 3504 3505 3506 3507 3508 3509 3510 3511 3512 3513 3514 3515 3516 3517 3518 3519 3520 3521 3522 3523 3524 3525 3526 3527 3528 3529 3530 3531 3532 3533 3534 3535 3536 3537 3538 3539 3540 3541 3542 3543 3544 3545 3546 3547 3548 3549 3550 3551 3552 3553 3554 3555 3556 3557 3558 3559 3560 3561 3562 3563 3564 3565 3566 3567 3568 3569 3570 3571 3572 3573 3574 3575 3576 3577 3578 3579 3580 3581 3582 3583 3584 3585 3586 3587 3588 3589 3590 3591 3592 3593 3594 3595 3596 3597 3598 3599 3600 3601 3602 3603 3604 3605 3606 3607 3608 3609 3610 3611 3612 3613 3614 3615 3616 3617 3618 3619 3620 3621 3622 3623 3624 3625 3626 3627 3628 3629 3630 3631 3632 3633 3634 3635 3636 3637 3638 3639 3640 3641 3642 3643 3644 3645 3646 3647 3648 3649 3650 3651 3652 3653 3654 3655 3656 3657 3658 3659 3660 3661 3662 3663 3664 3665 3666 3667 3668 3669 3670 3671 3672 3673 3674 3675 3676 3677 3678 3679 3680 3681 3682 3683 3684 3685 3686 3687 3688 3689 3690 3691 3692 3693 3694 3695 3696 3697 3698 3699 3700 3701 3702 3703 3704 3705 3706 3707 3708 3709 3710 3711 3712 3713 3714 3715 3716 3717 3718 3719 3720 3721 3722 3723 3724 3725 3726 3727 3728 3729 3730 3731 3732 3733 3734 3735 3736 3737 3738 3739 3740 3741 3742 3743 3744 3745 3746 3747 3748 3749 3750 3751 3752 3753 3754 3755 3756 3757 3758 3759 3760 3761 3762 3763 3764 3765 3766 3767 3768 3769 3770 3771 3772 3773 3774 3775 3776 3777 3778 3779 3780 3781 3782 3783 3784 3785 3786 3787 3788 3789 3790 3791 3792 3793 3794 3795 3796 3797 3798 3799 3800 3801 3802 3803 3804 3805 3806 3807 3808 3809 3810 3811 3812 3813 3814 3815 3816 3817 3818 3819 3820 3821 3822 3823 3824 3825 3826 3827 3828 3829 3830 3831 3832 3833 3834 3835 3836 3837 3838 3839 3840 3841 3842 3843 3844 3845 3846 3847 3848 3849 3850 3851 3852 3853 3854 3855 3856 3857 3858 3859 3860 3861 3862 3863 3864 3865 3866 3867 3868 3869 3870 3871 3872 3873 3874 3875 3876 3877 3878 3879 3880 3881 3882 3883 3884 3885 3886 3887 3888 3889 3890 3891 3892 3893 3894 3895 3896 3897 3898 3899 3900 3901 3902 3903 3904 3905 3906 3907 3908 3909 3910 3911 3912 3913 3914 3915 3916 3917 3918 3919 3920 3921 3922 3923 3924 3925 3926 3927 3928 3929 3930 3931 3932 3933 3934 3935 3936 3937 3938 3939 3940 3941 3942 3943 3944 3945 3946 3947 3948 3949 3950 3951 3952 3953 3954 3955 3956 3957 3958 3959 3960 3961 3962 3963 3964 3965 3966 3967 3968 3969 3970 3971 3972 3973 3974 3975 3976 3977 3978 3979 3980 3981 3982 3983 3984 3985 3986 3987 3988 3989 3990 3991 3992 3993 3994 3995 3996 3997 3998 3999 4000 4001 4002 4003 4004 4005 4006 4007 4008 4009 4010 4011 4012 4013 4014 4015 4016 4017 4018 4019 4020 4021 4022 4023 4024 4025 4026 4027 4028 4029 4030 4031 4032 4033 4034 4035 4036 4037 4038 4039 4040 4041 4042 4043 4044 4045 4046 4047 4048 4049 4050 4051 4052 4053 4054 4055 4056 4057 4058 4059 4060 4061 4062 4063 4064 4065 4066 4067 4068 4069 4070 4071 4072 4073 4074 4075 4076 4077 4078 4079 4080 4081 4082 4083 4084 4085 4086 4087 4088 4089 4090 4091 4092 4093 4094 4095 4096 4097 4098 4099 4100 4101 4102 4103 4104 4105 4106 4107 4108 4109 4110 4111 4112 4113 4114 4115 4116 4117 4118 4119 4120 4121 4122 4123 4124 4125 4126 4127 4128 4129 4130 4131 4132 4133 4134 4135 4136 4137 4138 4139 4140 4141 4142 4143 4144 4145 4146 4147 4148 4149 4150 4151 4152 4153 4154 4155 4156 4157 4158 4159 4160 4161 4162 4163 4164 4165 4166 4167 4168 4169 4170 4171 4172 4173 4174 4175 4176 4177 4178 4179 4180 4181 4182 4183 4184 4185 4186 4187 4188 4189 4190 4191 4192 4193 4194 4195 4196 4197 4198 4199 4200 4201 4202 4203 4204 4205 4206 4207 4208 4209 4210 4211 4212 4213 4214 4215 4216 4217 4218 4219 4220 4221 4222 4223 4224 4225 4226 4227 4228 4229 4230 4231 4232 4233 4234 4235 4236 4237 4238 4239 4240 4241 4242 4243 4244 4245 4246 4247 4248 4249 4250 4251 4252 4253 4254 4255 4256 4257 4258 4259 4260 4261 4262 4263 4264 4265 4266 4267 4268 4269 4270 4271 4272 4273 4274 4275 4276 4277 4278 4279 4280 4281 4282 4283 4284 4285 4286 4287 4288 4289 4290 4291 4292 4293 4294 4295 4296 4297 4298 4299 4300 4301 4302 4303 4304 4305 4306 4307 4308 4309 4310 4311 4312 4313 4314 4315 4316 4317 4318 4319 4320 4321 4322 4323 4324 4325 4326 4327 4328 4329 4330 4331 4332 4333 4334 4335 4336 4337 4338 4339 4340 4341 4342 4343 4344 4345 4346 4347 4348 4349 4350 4351 4352 4353 4354 4355 4356 4357 4358 4359 4360 4361 4362 4363 4364 4365 4366 4367 4368 4369 4370 4371 4372 4373 4374 4375 4376 4377 4378 4379 4380 4381 4382 4383 4384 4385 4386 4387 4388 4389 4390 4391 4392 4393 4394 4395 4396 4397 4398 4399 4400 4401 4402 4403 4404 4405 4406 4407 4408 4409 4410 4411 4412 4413 4414 4415 4416 4417 4418 4419 4420 4421 4422 4423 4424 4425 4426 4427 4428 4429 4430 4431 4432 4433 4434 4435 4436 4437 4438 4439 4440 4441 4442 4443 4444 4445 4446 4447 4448 4449 4450 4451 4452 4453 4454 4455 4456 4457 4458 4459 4460 4461 4462 4463 4464 4465 4466 4467 4468 4469 4470 4471 4472 4473 4474 4475 4476 4477 4478 4479 4480 4481 4482 4483 4484 4485 4486 4487 4488 4489 4490 4491 4492 4493 4494 4495 4496 4497 4498 4499 4500 4501 4502 4503 4504 4505 4506 4507 4508 4509 4510 4511 4512 4513 4514 4515 4516 4517 4518 4519 4520 4521 4522 4523 4524 4525 4526 4527 4528 4529 4530 4531 4532 4533 4534 4535 4536 4537 4538 4539 4540 4541 4542 4543 4544 4545 4546 4547 4548 4549 4550 4551 4552 4553 4554 4555 4556 4557 4558 4559 4560 4561 4562 4563 4564 4565 4566 4567 4568 4569 4570 4571 4572 4573 4574 4575 4576 4577 4578 4579 4580 4581 4582 4583 4584 4585 4586 4587 4588 4589 4590 4591 4592 4593 4594 4595 4596 4597 4598 4599 4600 4601 4602 4603 4604 4605 4606 4607 4608 4609 4610 4611 4612 4613 4614 4615 4616 4617 4618 4619 4620 4621 4622 4623 4624 4625 4626 4627 4628 4629 4630 4631 4632 4633 4634 4635 4636 4637 4638 4639 4640 4641 4642 4643 4644 4645 4646 4647 4648 4649 4650 4651 4652 4653 4654 4655 4656 4657 4658 4659 4660 4661 4662 4663 4664 4665 4666 4667 4668 4669 4670 4671 4672 4673 4674 4675 4676 4677 4678 4679 4680 4681 4682 4683 4684 4685 4686 4687 4688 4689 4690 4691 4692 4693 4694 4695 4696 4697 4698 4699 4700 4701 4702 4703 4704 4705 4706 4707 4708 4709 4710 4711 4712 4713 4714 4715 4716 4717 4718 4719 4720 4721 4722 4723 4724 4725 4726 4727 4728 4729 4730 4731 4732 4733 4734 4735 4736 4737 4738 4739 4740 4741 4742 4743 4744 4745 4746 4747 4748 4749 4750 4751 4752 4753 4754 4755 4756 4757 4758 4759 4760 4761 4762 4763 4764 4765 4766 4767 4768 4769 4770 4771 4772 4773 4774 4775 4776 4777 4778 4779 4780 4781 4782 4783 4784 4785 4786 4787 4788 4789 4790 4791 4792 4793 4794 4795 4796 4797 4798 4799 4800 4801 4802 4803 4804 4805 4806 4807 4808 4809 4810 4811 4812 4813 4814 4815 4816 4817 4818 4819 4820 4821 4822 4823 4824 4825 4826 4827 4828 4829 4830 4831 4832 4833 4834 4835 4836 4837 4838 4839 4840 4841 4842 4843 4844 4845 4846 4847 4848 4849 4850 4851 4852 4853 4854 4855 4856 4857 4858 4859 4860 4861 4862 4863 4864 4865 4866 4867 4868 4869 4870 4871 4872 4873 4874 4875 4876 4877 4878 4879 4880 4881 4882 4883 4884 4885 4886 4887 4888 4889 4890 4891 4892 4893 4894 4895 4896 4897 4898 4899 4900 4901 4902 4903 4904 4905 4906 4907 4908 4909 4910 4911 4912 4913 4914 4915 4916 4917 4918 4919 4920 4921 4922 4923 4924 4925 4926 4927 4928 4929 4930 4931 4932 4933 4934 4935 4936 4937 4938 4939 4940 4941 4942 4943 4944 4945 4946 4947 4948 4949 4950 4951 4952 4953 4954 4955 4956 4957 4958 4959 4960 4961 4962 4963 4964 4965 4966 4967 4968 4969 4970 4971 4972 4973 4974 4975 4976 4977 4978 4979 4980 4981 4982 4983 4984 4985 4986 4987 4988 4989 4990 4991 4992 4993 4994 4995 4996 4997 4998 4999 5000 5001 5002 5003 5004 5005 5006 5007 5008 5009 5010 5011 5012 5013 5014 5015 5016 5017 5018 5019 5020 5021 5022 5023 5024 5025 5026 5027 5028 5029 5030 5031 5032 5033 5034 5035 5036 5037 5038 5039 5040 5041 5042 5043 5044 5045 5046 5047 5048 5049 5050 5051 5052 5053 5054 5055 5056 5057 5058 5059 5060 5061 5062 5063 5064 5065 5066 5067 5068 5069 5070 5071 5072 5073 5074 5075 5076 5077 5078 5079 5080 5081 5082 5083 5084 5085 5086 5087 5088 5089 5090 5091 5092 5093 5094 5095 5096 5097 5098 5099 5100 5101 5102 5103 5104 5105 5106 5107 5108 5109 5110 5111 5112 5113 5114 5115 5116 5117 5118 5119 5120 5121 5122 5123 5124 5125 5126 5127 5128 5129 5130 5131 5132 5133 5134 5135 5136 5137 5138 5139 5140 5141 5142 5143 5144 5145 5146 5147 5148 5149 5150 5151 5152 5153 5154 5155 5156 5157 5158 5159 5160 5161 5162 5163 5164 5165 5166 5167 5168 5169 5170 5171 5172 5173 5174 5175 5176 5177 5178 5179 5180 5181 5182 5183 5184 5185 5186 5187 5188 5189 5190 5191 5192 5193 5194 5195 5196 5197 5198 5199 5200 5201 5202 5203 5204 5205 5206 5207 5208 5209 5210 5211 5212 5213 5214 5215 5216 5217 5218 5219 5220 5221 5222 5223 5224 5225 5226 5227 5228 5229 5230 5231 5232 5233 5234 5235 5236 5237 5238 5239 5240 5241 5242 5243 5244 5245 5246 5247 5248 5249 5250 5251 5252 5253 5254 5255 5256 5257 5258 5259 5260 5261 5262 5263 5264 5265 5266 5267 5268 5269 5270 5271 5272 5273 5274 5275 5276 5277 5278 5279 5280 5281 5282 5283 5284 5285 5286 5287 5288 5289 5290 5291 5292 5293 5294 5295 5296 5297 5298 5299 5300 5301 5302 5303 5304 5305 5306 5307 5308 5309 5310 5311 5312 5313 5314 5315 5316 5317 5318 5319 5320 5321 5322 5323 5324 5325 5326 5327 5328 5329 5330 5331 5332 5333 5334 5335 5336 5337 5338 5339 5340 5341 5342 5343 5344 5345 5346 5347 5348 5349 5350 5351 5352 5353 5354 5355 5356 5357 5358 5359 5360 5361 5362 5363 5364 5365 5366 5367 5368 5369 5370 5371 5372 5373 5374 5375 5376 5377 5378 5379 5380 5381 5382 5383 5384 5385 5386 5387 5388 5389 5390 5391 5392 5393 5394 5395 5396 5397 5398 5399 5400 5401 5402 5403 5404 5405 5406 5407 5408 5409 5410 5411 5412 5413 5414 5415 5416 5417 5418 5419 5420 5421 5422 5423 5424 5425 5426 5427 5428 5429 5430 5431 5432 5433 5434 5435 5436 5437 5438 5439 5440 5441 5442 5443 5444 5445 5446 5447 5448 5449 5450 5451 5452 5453 5454 5455 5456 5457 5458 5459 5460 5461 5462 5463 5464 5465 5466 5467 5468 5469 5470 5471 5472 5473 5474 5475 5476 5477 5478 5479 5480 5481 5482 5483 5484 5485 5486 5487 5488 5489 5490 5491 5492 5493 5494 5495 5496 5497 5498 5499 5500 5501 5502 5503 5504 5505 5506 5507 5508 5509 5510 5511 5512 5513 5514 5515 5516 5517 5518 5519 5520 5521 5522 5523 5524 5525 5526 5527 5528 5529 5530 5531 5532 5533 5534 5535 5536 5537 5538 5539 5540 5541 5542 5543 5544 5545 5546 5547 5548 5549 5550 5551 5552 5553 5554 5555 5556 5557 5558 5559 5560 5561 5562 5563 5564 5565 5566 5567 5568 5569 5570 5571 5572 5573 5574 5575 5576 5577 5578 5579 5580 5581 5582 5583 5584 5585 5586 5587 5588 5589 5590 5591 5592 5593 5594 5595 5596 5597 5598 5599 5600 5601 5602 5603 5604 5605 5606 5607 5608 5609 5610 5611 5612 5613 5614 5615 5616 5617 5618 5619 5620 5621 5622 5623 5624 5625 5626 5627 5628 5629 5630 5631 5632 5633 5634 5635 5636 5637 5638 5639 5640 5641 5642 5643 5644 5645 5646 5647 5648 5649 5650 5651 5652 5653 5654 5655 5656 5657 5658 5659 5660 5661 5662 5663 5664 5665 5666 5667 5668 5669 5670 5671 5672 5673 5674 5675 5676 5677 5678 5679 5680 5681 5682 5683 5684 5685 5686 5687 5688 5689 5690 5691 5692 5693 5694 5695 5696 5697 5698 5699 5700 5701 5702 5703 5704 5705 5706 5707 5708 5709 5710 5711 5712 5713 5714 5715 5716 5717 5718 5719 5720 5721 5722 5723 5724 5725 5726 5727 5728 5729 5730 5731 5732 5733 5734 5735 5736 5737 5738 5739 5740 5741 5742 5743 5744 5745 5746 5747 5748 5749 5750 5751 5752 5753 5754 5755 5756 5757 5758 5759 5760 5761 5762 5763 5764 5765 5766 5767 5768 5769 5770 5771 5772 5773 5774 5775 5776 5777 5778 5779 5780 5781 5782 5783 5784 5785 5786 5787 5788 5789 5790 5791 5792 5793 5794 5795 5796 5797 5798 5799 5800 5801 5802 5803 5804 5805 5806 5807 5808 5809 5810 5811 5812 5813 5814 5815 5816 5817 5818 5819 5820 5821 5822 5823 5824 5825 5826 5827 5828 5829 5830 5831 5832 5833 5834 5835 5836 5837 5838 5839 5840 5841 5842 5843 5844 5845 5846 5847 5848 5849 5850 5851 5852 5853 5854 5855 5856 5857 5858 5859 5860 5861 5862 5863 5864 5865 5866 5867 5868 5869 5870 5871 5872 5873 5874 5875 5876 5877 5878 5879 5880 5881 5882 5883 5884 5885 5886 5887 5888 5889 5890 5891 5892 5893 5894 5895 5896 5897 5898 5899 5900 5901 5902 5903 5904 5905 5906 5907 5908 5909 5910 5911 5912 5913 5914 5915 5916 5917 5918 5919 5920 5921 5922 5923 5924 5925 5926 5927 5928 5929 5930 5931 5932 5933 5934 5935 5936 5937 5938 5939 5940 5941 5942 5943 5944 5945 5946 5947 5948 5949 5950 5951 5952 5953 5954 5955 5956 5957 5958 5959 5960 5961 5962 5963 5964 5965 5966 5967 5968 5969 5970 5971 5972 5973 5974 5975 5976 5977 5978 5979 5980 5981 5982 5983 5984 5985 5986 5987 5988 5989 5990 5991 5992 5993 5994 5995 5996 5997 5998 5999 6000 6001 6002 6003 6004 6005 6006 6007 6008 6009 6010 6011 6012 6013 6014 6015 6016 6017 6018 6019 6020 6021 6022 6023 6024 6025 6026 6027 6028 6029 6030 6031 6032 6033 6034 6035 6036 6037 6038 6039 6040 6041 6042 6043 6044 6045 6046 6047 6048 6049 6050 6051 6052 6053 6054 6055 6056 6057 6058 6059 6060 6061 6062 6063 6064 6065 6066 6067 6068 6069 6070 6071 6072 6073 6074 6075 6076 6077 6078 6079 6080 6081 6082 6083 6084 6085 6086 6087 6088 6089 6090 6091 6092 6093 6094 6095 6096 6097 6098 6099 6100 6101 6102 6103 6104 6105 6106 6107 6108 6109 6110 6111 6112 6113 6114 6115 6116 6117 6118 6119 6120 6121 6122 6123 6124 6125 6126 6127 6128 6129 6130 6131 6132 6133 6134 6135 6136 6137 6138 6139 6140 6141 6142 6143 6144 6145 6146 6147 6148 6149 6150 6151 6152 6153 6154 6155 6156 6157 6158 6159 6160 6161 6162 6163 6164 6165 6166 6167 6168 6169 6170 6171 6172 6173 6174 6175 6176 6177 6178 6179 6180 6181 6182 6183 6184 6185 6186 6187 6188 6189 6190 6191 6192 6193 6194 6195 6196 6197 6198 6199 6200 6201 6202 6203 6204 6205 6206 6207 6208 6209 6210 6211 6212 6213 6214 6215 6216 6217 6218 6219 6220 6221 6222 6223 6224 6225 6226 6227 6228 6229 6230 6231 6232 6233 6234 6235 6236 6237 6238 6239 6240 6241 6242 6243 6244 6245 6246 6247 6248 6249 6250 6251 6252 6253 6254 6255 6256 6257 6258 6259 6260 6261 6262 6263 6264 6265 6266 6267 6268 6269 6270 6271 6272 6273 6274 6275 6276 6277 6278 6279 6280 6281 6282 6283 6284 6285 6286 6287 6288 6289 6290 6291 6292 6293 6294 6295 6296 6297 6298 6299 6300 6301 6302 6303 6304 6305 6306 6307 6308 6309 6310 6311 6312 6313 6314 6315 6316 6317 6318 6319 6320 6321 6322 6323 6324 6325 6326 6327 6328 6329 6330 6331 6332 6333 6334 6335 6336 6337 6338 6339 6340 6341 6342 6343 6344 6345 6346 6347 6348 6349 6350 6351 6352 6353 6354 6355 6356 6357 6358 6359 6360 6361 6362 6363 6364 6365 6366 6367 6368 6369 6370 6371 6372 6373 6374 6375 6376 6377 6378 6379 6380 6381 6382 6383 6384 6385 6386 6387 6388 6389 6390 6391 6392 6393 6394 6395 6396 6397 6398 6399 6400 6401 6402 6403 6404 6405 6406 6407 6408 6409 6410 6411 6412 6413 6414 6415 6416 6417 6418 6419 6420 6421 6422 6423 6424 6425 6426 6427 6428 6429 6430 6431 6432 6433 6434 6435 6436 6437 6438 6439 6440 6441 6442 6443 6444 6445 6446 6447 6448 6449 6450 6451 6452 6453 6454 6455 6456 6457 6458 6459 6460 6461 6462 6463 6464 6465 6466 6467 6468 6469 6470 6471 6472 6473 6474 6475 6476 6477 6478 6479 6480 6481 6482 6483 6484 6485 6486 6487 6488 6489 6490 6491 6492 6493 6494 6495 6496 6497 6498 6499 6500 6501 6502 6503 6504 6505 6506 6507 6508 6509 6510 6511 6512 6513 6514 6515 6516 6517 6518 6519 6520 6521 6522 6523 6524 6525 6526 6527 6528 6529 6530 6531 6532 6533 6534 6535 6536 6537 6538 6539 6540 6541 6542 6543 6544 6545 6546 6547 6548 6549 6550 6551 6552 6553 6554 6555 6556 6557 6558 6559 6560 6561 6562 6563 6564 6565 6566 6567 6568 6569 6570 6571 6572 6573 6574 6575 6576 6577 6578 6579 6580 6581 6582 6583 6584 6585 6586 6587 6588 6589 6590 6591 6592 6593 6594 6595 6596 6597 6598 6599 6600 6601 6602 6603 6604 6605 6606 6607 6608 6609 6610 6611 6612 6613 6614 6615 6616 6617 6618 6619 6620 6621 6622 6623 6624 6625 6626 6627 6628 6629 6630 6631 6632 6633 6634 6635 6636 6637 6638 6639 6640 6641 6642 6643 6644 6645 6646 6647 6648 6649 6650 6651 6652 6653 6654 6655 6656 6657 6658 6659 6660 6661 6662 6663 6664 6665 6666 6667 6668 6669 6670 6671 6672 6673 6674 6675 6676 6677 6678 6679 6680 6681 6682 6683 6684 6685 6686 6687 6688 6689 6690 6691 6692 6693 6694 6695 6696 6697 6698 6699 6700 6701 6702 6703 6704 6705 6706 6707 6708 6709 6710 6711 6712 6713 6714 6715 6716 6717 6718 6719 6720 6721 6722 6723 6724 6725 6726 6727 6728 6729 6730 6731 6732 6733 6734 6735 6736 6737 6738 6739 6740 6741 6742 6743 6744 6745 6746 6747 6748 6749 6750 6751 6752 6753 6754 6755 6756 6757 6758 6759 6760 6761 6762 6763 6764 6765 6766 6767 6768 6769 6770 6771 6772 6773 6774 6775 6776 6777 6778 6779 6780 6781 6782 6783 6784 6785 6786 6787 6788 6789 6790 6791 6792 6793 6794 6795 6796 6797 6798 6799 6800 6801 6802 6803 6804 6805 6806 6807 6808 6809 6810 6811 6812 6813 6814 6815 6816 6817 6818 6819 6820 6821 6822 6823 6824 6825 6826 6827 6828 6829 6830 6831 6832 6833 6834 6835 6836 6837 6838 6839 6840 6841 6842 6843 6844 6845 6846 6847 6848 6849 6850 6851 6852 6853 6854 6855 6856 6857 6858 6859 6860 6861 6862 6863 6864 6865 6866 6867 6868 6869 6870 6871 6872 6873 6874 6875 6876 6877 6878 6879 6880 6881 6882 6883 6884 6885 6886 6887 6888 6889 6890 6891 6892 6893 6894 6895 6896 6897 6898 6899 6900 6901 6902 6903 6904 6905 6906 6907 6908 6909 6910 6911 6912 6913 6914 6915 6916 6917 6918 6919 6920 6921 6922 6923 6924 6925 6926 6927 6928 6929 6930 6931 6932 6933 6934 6935 6936 6937 6938 6939 6940 6941 6942 6943 6944 6945 6946 6947 6948 6949 6950 6951 6952 6953 6954 6955 6956 6957 6958 6959 6960 6961 6962 6963 6964 6965 6966 6967 6968 6969 6970 6971 6972 6973 6974 6975 6976 6977 6978 6979 6980 6981 6982 6983 6984 6985 6986 6987 6988 6989 6990 6991 6992 6993 6994 6995 6996 6997 6998 6999 7000 7001 7002 7003 7004 7005 7006 7007 7008 7009 7010 7011 7012 7013 7014 7015 7016 7017 7018 7019 7020 7021 7022 7023 7024 7025 7026 7027 7028 7029 7030 7031 7032 7033 7034 7035 7036 7037 7038 7039 7040 7041 7042 7043 7044 7045 7046 7047 7048 7049 7050 7051 7052 7053 7054 7055 7056 7057 7058 7059 7060 7061 7062 7063 7064 7065 7066 7067 7068 7069 7070 7071 7072 7073 7074 7075 7076 7077 7078 7079 7080 7081 7082 7083 7084 7085 7086 7087 7088 7089 7090 7091 7092 7093 7094 7095 7096 7097 7098 7099 7100 7101 7102 7103 7104 7105 7106 7107 7108 7109 7110 7111 7112 7113 7114 7115 7116 7117 7118 7119 7120 7121 7122 7123 7124 7125 7126 7127 7128 7129 7130 7131 7132 7133 7134 7135 7136 7137 7138 7139 7140 7141 7142 7143 7144 7145 7146 7147 7148 7149 7150 7151 7152 7153 7154 7155 7156 7157 7158 7159 7160 7161 7162 7163 7164 7165 7166 7167 7168 7169 7170 7171 7172 7173 7174 7175 7176 7177 7178 7179 7180 7181 7182 7183 7184 7185 7186 7187 7188 7189 7190 7191 7192 7193 7194 7195 7196 7197 7198 7199 7200 7201 7202 7203 7204 7205 7206 7207 7208 7209 7210 7211 7212 7213 7214 7215 7216 7217 7218 7219 7220 7221 7222 7223 7224 7225 7226 7227 7228 7229 7230 7231 7232 7233 7234 7235 7236 7237 7238 7239 7240 7241 7242 7243 7244 7245 7246 7247 7248 7249 7250 7251 7252 7253 7254 7255 7256 7257 7258 7259 7260 7261 7262 7263 7264 7265 7266 7267 7268 7269 7270 7271 7272 7273 7274 7275 7276 7277 7278 7279 7280 7281 7282 7283 7284 7285 7286 7287 7288 7289 7290 7291 7292 7293 7294 7295 7296 7297 7298 7299 7300 7301 7302 7303 7304 7305 7306 7307 7308 7309 7310 7311 7312 7313 7314 7315 7316 7317 7318 7319 7320 7321 7322 7323 7324 7325 7326 7327 7328 7329 7330 7331 7332 7333 7334 7335 7336 7337 7338 7339 7340 7341 7342 7343 7344 7345 7346 7347 7348 7349 7350 7351 7352 7353 7354 7355 7356 7357 7358 7359 7360 7361 7362 7363 7364 7365 7366 7367 7368 7369 7370 7371 7372 7373 7374 7375 7376 7377 7378 7379 7380 7381 7382 7383 7384 7385 7386 7387 7388 7389 7390 7391 7392 7393 7394 7395 7396 7397 7398 7399 7400 7401 7402 7403 7404 7405 7406 7407 7408 7409 7410 7411 7412 7413 7414 7415 7416 7417 7418 7419 7420 7421 7422 7423 7424 7425 7426 7427 7428 7429 7430 7431 7432 7433 7434 7435 7436 7437 7438 7439 7440 7441 7442 7443 7444 7445 7446 7447 7448 7449 7450 7451 7452 7453 7454 7455 7456 7457 7458 7459 7460 7461 7462 7463 7464 7465 7466 7467 7468 7469 7470 7471 7472 7473 7474 7475 7476 7477 7478 7479 7480 7481 7482 7483 7484 7485 7486 7487 7488 7489 7490 7491 7492 7493 7494 7495 7496 7497 7498 7499 7500 7501 7502 7503 7504 7505 7506 7507 7508 7509 7510 7511 7512 7513 7514 7515 7516 7517 7518 7519 7520 7521 7522 7523 7524 7525 7526 7527 7528 7529 7530 7531 7532 7533 7534 7535 7536 7537 7538 7539 7540 7541 7542 7543 7544 7545 7546 7547 7548 7549 7550 7551 7552 7553 7554 7555 7556 7557 7558 7559 7560 7561 7562 7563 7564 7565 7566 7567 7568 7569 7570 7571 7572 7573 7574 7575 7576 7577 7578 7579 7580 7581 7582 7583 7584 7585 7586 7587 7588 7589 7590 7591 7592 7593 7594 7595 7596 7597 7598 7599 7600 7601 7602 7603 7604 7605 7606 7607 7608 7609 7610 7611 7612 7613 7614 7615 7616 7617 7618 7619 7620 7621 7622 7623 7624 7625 7626 7627 7628 7629 7630 7631 7632 7633 7634 7635 7636 7637 7638 7639 7640 7641 7642 7643 7644 7645 7646 7647 7648 7649 7650 7651 7652 7653 7654 7655 7656 7657 7658 7659 7660 7661 7662 7663 7664 7665 7666 7667 7668 7669 7670 7671 7672 7673 7674 7675 7676 7677 7678 7679 7680 7681 7682 7683 7684 7685 7686 7687 7688 7689 7690 7691 7692 7693 7694 7695 7696 7697 7698 7699 7700 7701 7702 7703 7704 7705 7706 7707 7708 7709 7710 7711 7712 7713 7714 7715 7716 7717 7718 7719 7720 7721 7722 7723 7724 7725 7726 7727 7728 7729 7730 7731 7732 7733 7734 7735 7736 7737 7738 7739 7740 7741 7742 7743 7744 7745 7746 7747 7748 7749 7750 7751 7752 7753 7754 7755 7756 7757 7758 7759 7760 7761 7762 7763 7764 7765 7766 7767 7768 7769 7770 7771 7772 7773 7774 7775 7776 7777 7778 7779 7780 7781 7782 7783 7784 7785 7786 7787 7788 7789 7790 7791 7792 7793 7794 7795 7796 7797 7798 7799 7800 7801 7802 7803 7804 7805 7806 7807 7808 7809 7810 7811 7812 7813 7814 7815 7816 7817 7818 7819 7820 7821 7822 7823 7824 7825 7826 7827 7828 7829 7830 7831 7832 7833 7834 7835 7836 7837 7838 7839 7840 7841 7842 7843 7844 7845 7846 7847 7848 7849 7850 7851 7852 7853 7854 7855 7856 7857 7858 7859 7860 7861 7862 7863 7864 7865 7866 7867 7868 7869 7870 7871 7872 7873 7874 7875 7876 7877 7878 7879 7880 7881 7882 7883 7884 7885 7886 7887 7888 7889 7890 7891 7892 7893 7894 7895 7896 7897 7898 7899 7900 7901 7902 7903 7904 7905 7906 7907 7908 7909 7910 7911 7912 7913 7914 7915 7916 7917 7918 7919 7920 7921 7922 7923 7924 7925 7926 7927 7928 7929 7930 7931 7932 7933 7934 7935 7936 7937 7938 7939 7940 7941 7942 7943 7944 7945 7946 7947 7948 7949 7950 7951 7952 7953 7954 7955 7956 7957 7958 7959 7960 7961 7962 7963 7964 7965 7966 7967 7968 7969 7970 7971 7972 7973 7974 7975 7976 7977 7978 7979 7980 7981 7982 7983 7984 7985 7986 7987 7988 7989 7990 7991 7992 7993 7994 7995 7996 7997 7998 7999 8000 8001 8002 8003 8004 8005 8006 8007 8008 8009 8010 8011 8012 8013 8014 8015 8016 8017 8018 8019 8020 8021 8022 8023 8024 8025 8026 8027 8028 8029 8030 8031 8032 8033 8034 8035 8036 8037 8038 8039 8040 8041 8042 8043 8044 8045 8046 8047 8048 8049 8050 8051 8052 8053 8054 8055 8056 8057 8058 8059 8060 8061 8062 8063 8064 8065 8066 8067 8068 8069 8070 8071 8072 8073 8074 8075 8076 8077 8078 8079 8080 8081 8082 8083 8084 8085 8086 8087 8088 8089 8090 8091 8092 8093 8094 8095 8096 8097 8098 8099 8100 8101 8102 8103 8104 8105 8106 8107 8108 8109 8110 8111 8112 8113 8114 8115 8116 8117 8118 8119 8120 8121 8122 8123 8124 8125 8126 8127 8128 8129 8130 8131 8132 8133 8134 8135 8136 8137 8138 8139 8140 8141 8142 8143 8144 8145 8146 8147 8148 8149 8150 8151 8152 8153 8154 8155 8156 8157 8158 8159 8160 8161 8162 8163 8164 8165 8166 8167 8168 8169 8170 8171 8172 8173 8174 8175 8176 8177 8178 8179 8180 8181 8182 8183 8184 8185 8186 8187 8188 8189 8190 8191 8192 8193 8194 8195 8196 8197 8198 8199 8200 8201 8202 8203 8204 8205 8206 8207 8208 8209 8210 8211 8212 8213 8214 8215 8216 8217 8218 8219 8220 8221 8222 8223 8224 8225 8226 8227 8228 8229 8230 8231 8232 8233 8234 8235 8236 8237 8238 8239 8240 8241 8242 8243 8244 8245 8246 8247 8248 8249 8250 8251 8252 8253 8254 8255 8256 8257 8258 8259 8260 8261 8262 8263 8264 8265 8266 8267 8268 8269 8270 8271 8272 8273 8274 8275 8276 8277 8278 8279 8280 8281 8282 8283 8284 8285 8286 8287 8288 8289 8290 8291 8292 8293 8294 8295 8296 8297 8298 8299 8300 8301 8302 8303 8304 8305 8306 8307 8308 8309 8310 8311 8312 8313 8314 8315 8316 8317 8318 8319 8320 8321 8322 8323 8324 8325 8326 8327 8328 8329 8330 8331 8332 8333 8334 8335 8336 8337 8338 8339 8340 8341 8342 8343 8344 8345 8346 8347 8348 8349 8350 8351 8352 8353 8354 8355 8356 8357 8358 8359 8360 8361 8362 8363 8364 8365 8366 8367 8368 8369 8370 8371 8372 8373 8374 8375 8376 8377 8378 8379 8380 8381 8382 8383 8384 8385 8386 8387 8388 8389 8390 8391 8392 8393 8394 8395 8396 8397 8398 8399 8400 8401 8402 8403 8404 8405 8406 8407 8408 8409 8410 8411 8412 8413 8414 8415 8416 8417 8418 8419 8420 8421 8422 8423 8424 8425 8426 8427 8428 8429 8430 8431 8432 8433 8434 8435 8436 8437 8438 8439 8440 8441 8442 8443 8444 8445 8446 8447 8448 8449 8450 8451 8452 8453 8454 8455 8456 8457 8458 8459 8460 8461 8462 8463 8464 8465 8466 8467 8468 8469 8470 8471 8472 8473 8474 8475 8476 8477 8478 8479 8480 8481 8482 8483 8484 8485 8486 8487 8488 8489 8490 8491 8492 8493 8494 8495 8496 8497 8498 8499 8500 8501 8502 8503 8504 8505 8506 8507 8508 8509 8510 8511 8512 8513 8514 8515 8516 8517 8518 8519 8520 8521 8522 8523 8524 8525 8526 8527 8528 8529 8530 8531 8532 8533 8534 8535 8536 8537 8538 8539 8540 8541 8542 8543 8544 8545 8546 8547 8548 8549 8550 8551 8552 8553 8554 8555 8556 8557 8558 8559 8560 8561 8562 8563 8564 8565 8566 8567 8568 8569 8570 8571 8572 8573 8574 8575 8576 8577 8578 8579 8580 8581 8582 8583 8584 8585 8586 8587 8588 8589 8590 8591 8592 8593 8594 8595 8596 8597 8598 8599 8600 8601 8602 8603 8604 8605 8606 8607 8608 8609 8610 8611 8612 8613 8614 8615 8616 8617 8618 8619 8620 8621 8622 8623 8624 8625 8626 8627 8628 8629 8630 8631 8632 8633 8634 8635 8636 8637 8638 8639 8640 8641 8642 8643 8644 8645 8646 8647 8648 8649 8650 8651 8652 8653 8654 8655 8656 8657 8658 8659 8660 8661 8662 8663 8664 8665 8666 8667 8668 8669 8670 8671 8672 8673 8674 8675 8676 8677 8678 8679 8680 8681 8682 8683 8684 8685 8686 8687 8688 8689 8690 8691 8692 8693 8694 8695 8696 8697 8698 8699 8700 8701 8702 8703 8704 8705 8706 8707 8708 8709 8710 8711 8712 8713 8714 8715 8716 8717 8718 8719 8720 8721 8722 8723 8724 8725 8726 8727 8728 8729 8730 8731 8732 8733 8734 8735 8736 8737 8738 8739 8740 8741 8742 8743 8744 8745 8746 8747 8748 8749 8750 8751 8752 8753 8754 8755 8756 8757 8758 8759 8760 8761 8762 8763 8764 8765 8766 8767 8768 8769 8770 8771 8772 8773 8774 8775 8776 8777 8778 8779 8780 8781 8782 8783 8784 8785 8786 8787 8788 8789 8790 8791 8792 8793 8794 8795 8796 8797 8798 8799 8800 8801 8802 8803 8804 8805 8806 8807 8808 8809 8810 8811 8812 8813 8814 8815 8816 8817 8818 8819 8820 8821 8822 8823 8824 8825 8826 8827 8828 8829 8830 8831 8832 8833 8834 8835 8836 8837 8838 8839 8840 8841 8842 8843 8844 8845 8846 8847 8848 8849 8850 8851 8852 8853 8854 8855 8856 8857 8858 8859 8860 8861 8862 8863 8864 8865 8866 8867 8868 8869 8870 8871 8872 8873 8874 8875 8876 8877 8878 8879 8880 8881 8882 8883 8884 8885 8886 8887 8888 8889 8890 8891 8892 8893 8894 8895 8896 8897 8898 8899 8900 8901 8902 8903 8904 8905 8906 8907 8908 8909 8910 8911 8912 8913 8914 8915 8916 8917 8918 8919 8920 8921 8922 8923 8924 8925 8926 8927 8928 8929 8930 8931 8932 8933 8934 8935 8936 8937 8938 8939 8940 8941 8942 8943 8944 8945 8946 8947 8948 8949 8950 8951 8952 8953 8954 8955 8956 8957 8958 8959 8960 8961 8962 8963 8964 8965 8966 8967 8968 8969 8970 8971 8972 8973 8974 8975 8976 8977 8978 8979 8980 8981 8982 8983 8984 8985 8986 8987 8988 8989 8990 8991 8992 8993 8994 8995 8996 8997 8998 8999 9000 9001 9002 9003 9004 9005 9006 9007 9008 9009 9010 9011 9012 9013 9014 9015 9016 9017 9018 9019 9020 9021 9022 9023 9024 9025 9026 9027 9028 9029 9030 9031 9032 9033 9034 9035 9036 9037 9038 9039 9040 9041 9042 9043 9044 9045 9046 9047 9048 9049 9050 9051 9052 9053 9054 9055 9056 9057 9058 9059 9060 9061 9062 9063 9064 9065 9066 9067 9068 9069 9070 9071 9072 9073 9074 9075 9076 9077 9078 9079 9080 9081 9082 9083 9084 9085 9086 9087 9088 9089 9090 9091 9092 9093 9094 9095 9096 9097 9098 9099 9100 9101 9102 9103 9104 9105 9106 9107 9108 9109 9110 9111 9112 9113 9114 9115 9116 9117 9118 9119 9120 9121 9122 9123 9124 9125 9126 9127 9128 9129 9130 9131 9132 9133 9134 9135 9136 9137 9138 9139 9140 9141 9142 9143 9144 9145 9146 9147 9148 9149 9150 9151 9152 9153 9154 9155 9156 9157 9158 9159 9160 9161 9162 9163 9164 9165 9166 9167 9168 9169 9170 9171 9172 9173 9174 9175 9176 9177 9178 9179 9180 9181 9182 9183 9184 9185 9186 9187 9188 9189 9190 9191 9192 9193 9194 9195 9196 9197 9198 9199 9200 9201 9202 9203 9204 9205 9206 9207 9208 9209 9210 9211 9212 9213 9214 9215 9216 9217 9218 9219 9220 9221 9222 9223 9224 9225 9226 9227 9228 9229 9230 9231 9232 9233 9234 9235 9236 9237 9238 9239 9240 9241 9242 9243 9244 9245 9246 9247 9248 9249 9250 9251 9252 9253 9254 9255 9256 9257 9258 9259 9260 9261 9262 9263 9264 9265 9266 9267 9268 9269 9270 9271 9272 9273 9274 9275 9276 9277 9278 9279 9280 9281 9282 9283 9284 9285 9286 9287 9288 9289 9290 9291 9292 9293 9294 9295 9296 9297 9298 9299 9300 9301 9302 9303 9304 9305 9306 9307 9308 9309 9310 9311 9312 9313 9314 9315 9316 9317 9318 9319 9320 9321 9322 9323 9324 9325 9326 9327 9328 9329 9330 9331 9332 9333 9334 9335 9336 9337 9338 9339 9340 9341 9342 9343 9344 9345 9346 9347 9348 9349 9350 9351 9352 9353 9354 9355 9356 9357 9358 9359 9360 9361 9362 9363 9364 9365 9366 9367 9368 9369 9370 9371 9372 9373 9374 9375 9376 9377 9378 9379 9380 9381 9382 9383 9384 9385 9386 9387 9388 9389 9390 9391 9392 9393 9394 9395 9396 9397 9398 9399 9400 9401 9402 9403 9404 9405 9406 9407 9408 9409 9410 9411 9412 9413 9414 9415 9416 9417 9418 9419 9420 9421 9422 9423 9424 9425 9426 9427 9428 9429 9430 9431 9432 9433 9434 9435 9436 9437 9438 9439 9440 9441 9442 9443 9444 9445 9446 9447 9448 9449 9450 9451 9452 9453 9454 9455 9456 9457 9458 9459 9460 9461 9462 9463 9464 9465 9466 9467 9468 9469 9470 9471 9472 9473 9474 9475 9476 9477 9478 9479 9480 9481 9482 9483 9484 9485 9486 9487 9488 9489 9490 9491 9492 9493 9494 9495 9496 9497 9498 9499 9500 9501 9502 9503 9504 9505 9506 9507 9508 9509 9510 9511 9512 9513 9514 9515 9516 9517 9518 9519 9520 9521 9522 9523 9524 9525 9526 9527 9528 9529 9530 9531 9532 9533 9534 9535 9536 9537 9538 9539 9540 9541 9542 9543 9544 9545 9546 9547 9548 9549 9550 9551 9552 9553 9554 9555 9556 9557 9558 9559 9560 9561 9562 9563 9564 9565 9566 9567 9568 9569 9570 9571 9572 9573 9574 9575 9576 9577 9578 9579 9580 9581 9582 9583 9584 9585 9586 9587 9588 9589 9590 9591 9592 9593 9594 9595 9596 9597 9598 9599 9600 9601 9602 9603 9604 9605 9606 9607 9608 9609 9610 9611 9612 9613 9614 9615 9616 9617 9618 9619 9620 9621 9622 9623 9624 9625 9626 9627 9628 9629 9630 9631 9632 9633 9634 9635 9636 9637 9638 9639 9640 9641 9642 9643 9644 9645 9646 9647 9648 9649 9650 9651 9652 9653 9654 9655 9656 9657 9658 9659 9660 9661 9662 9663 9664 9665 9666 9667 9668 9669 9670 9671 9672 9673 9674 9675 9676 9677 9678 9679 9680 9681 9682 9683 9684 9685 9686 9687 9688 9689 9690 9691 9692 9693 9694 9695 9696 9697 9698 9699 9700 9701 9702 9703 9704 9705 9706 9707 9708 9709 9710 9711 9712 9713 9714 9715 9716 9717 9718 9719 9720 9721 9722 9723 9724 9725 9726 9727 9728 9729 9730 9731 9732 9733 9734 9735 9736 9737 9738 9739 9740 9741 9742 9743 9744 9745 9746 9747 9748 9749 9750 9751 9752 9753 9754 9755 9756 9757 9758 9759 9760 9761 9762 9763 9764 9765 9766 9767 9768 9769 9770 9771 9772 9773 9774 9775 9776 9777 9778 9779 9780 9781 9782 9783 9784 9785 9786 9787 9788 9789 9790 9791 9792 9793 9794 9795 9796 9797 9798 9799 9800 9801 9802 9803 9804 9805 9806 9807 9808 9809 9810 9811 9812 9813 9814 9815 9816 9817 9818 9819 9820 9821 9822 9823 9824 9825 9826 9827 9828 9829 9830 9831 9832 9833 9834 9835 9836 9837 9838 9839 9840 9841 9842 9843 9844 9845 9846 9847 9848 9849 9850 9851 9852 9853 9854 9855 9856 9857 9858 9859 9860 9861 9862 9863 9864 9865 9866 9867 9868 9869 9870 9871 9872 9873 9874 9875 9876 9877 9878 9879 9880 9881 9882 9883 9884 9885 9886 9887 9888 9889 9890 9891 9892 9893 9894 9895 9896 9897 9898 9899 9900 9901 9902 9903 9904 9905 9906 9907 9908 9909 9910 9911 9912 9913 9914 9915 9916 9917 9918 9919 9920 9921 9922 9923 9924 9925 9926 9927 9928 9929 9930 9931 9932 9933 9934 9935 9936 9937 9938 9939 9940 9941 9942 9943 9944 9945 9946 9947 9948 9949 9950 9951 9952 9953 9954 9955 9956 9957 9958 9959 9960 9961 9962 9963 9964 9965 9966 9967 9968 9969 9970 9971 9972 9973 9974 9975 9976 9977 9978 9979 9980 9981 9982 9983 9984 9985 9986 9987 9988 9989 9990 9991 9992 9993 9994 9995 9996 9997 9998 9999 10000 10001 10002 10003 10004 10005 10006 10007 10008 10009 10010 10011 10012 10013 10014 10015 10016 10017 10018 10019 10020 10021 10022 10023 10024 10025 10026 10027 10028 10029 10030 10031 10032 10033 10034 10035 10036 10037 10038 10039 10040 10041 10042 10043 10044 10045 10046 10047 10048 10049 10050 10051 10052 10053 10054 10055 10056 10057 10058 10059 10060 10061 10062 10063 10064 10065 10066 10067 10068 10069 10070 10071 10072 10073 10074 10075 10076 10077 10078 10079 10080 10081 10082 10083 10084 10085 10086 10087 10088 10089 10090 10091 10092 10093 10094 10095 10096 10097 10098 10099 10100 10101 10102 10103 10104 10105 10106 10107 10108 10109 10110 10111 10112 10113 10114 10115 10116 10117 10118 10119 10120 10121 10122 10123 10124 10125 10126 10127 10128 10129 10130 10131 10132 10133 10134 10135 10136 10137 10138 10139 10140 10141 10142 10143 10144 10145 10146 10147 10148 10149 10150 10151 10152 10153 10154 10155 10156 10157 10158 10159 10160 10161 10162 10163 10164 10165 10166 10167 10168 10169 10170 10171 10172 10173 10174 10175 10176 10177 10178 10179 10180 10181 10182 10183 10184 10185 10186 10187 10188 10189 10190 10191 10192 10193 10194 10195 10196 10197 10198 10199 10200 10201 10202 10203 10204 10205 10206 10207 10208 10209 10210 10211 10212 10213 10214 10215 10216 10217 10218 10219 10220 10221 10222 10223 10224 10225 10226 10227 10228 10229 10230 10231 10232 10233 10234 10235 10236 10237 10238 10239 10240 10241 10242 10243 10244 10245 10246 10247 10248 10249 10250 10251 10252 10253 10254 10255 10256 10257 10258 10259 10260 10261 10262 10263 10264 10265 10266 10267 10268 10269 10270 10271 10272 10273 10274 10275 10276 10277 // SPDX-License-Identifier: GPL-2.0-or-later /* md.c : Multiple Devices driver for Linux Copyright (C) 1998, 1999, 2000 Ingo Molnar completely rewritten, based on the MD driver code from Marc Zyngier Changes: - RAID-1/RAID-5 extensions by Miguel de Icaza, Gadi Oxman, Ingo Molnar - RAID-6 extensions by H. Peter Anvin <hpa@zytor.com> - boot support for linear and striped mode by Harald Hoyer <HarryH@Royal.Net> - kerneld support by Boris Tobotras <boris@xtalk.msk.su> - kmod support by: Cyrus Durgin - RAID0 bugfixes: Mark Anthony Lisher <markal@iname.com> - Devfs support by Richard Gooch <rgooch@atnf.csiro.au> - lots of fixes and improvements to the RAID1/RAID5 and generic RAID code (such as request based resynchronization): Neil Brown <neilb@cse.unsw.edu.au>. - persistent bitmap code Copyright (C) 2003-2004, Paul Clements, SteelEye Technology, Inc. Errors, Warnings, etc. Please use: pr_crit() for error conditions that risk data loss pr_err() for error conditions that are unexpected, like an IO error or internal inconsistency pr_warn() for error conditions that could have been predicated, like adding a device to an array when it has incompatible metadata pr_info() for every interesting, very rare events, like an array starting or stopping, or resync starting or stopping pr_debug() for everything else. */ #include <linux/sched/mm.h> #include <linux/sched/signal.h> #include <linux/kthread.h> #include <linux/blkdev.h> #include <linux/blk-integrity.h> #include <linux/badblocks.h> #include <linux/sysctl.h> #include <linux/seq_file.h> #include <linux/fs.h> #include <linux/poll.h> #include <linux/ctype.h> #include <linux/string.h> #include <linux/hdreg.h> #include <linux/proc_fs.h> #include <linux/random.h> #include <linux/major.h> #include <linux/module.h> #include <linux/reboot.h> #include <linux/file.h> #include <linux/compat.h> #include <linux/delay.h> #include <linux/raid/md_p.h> #include <linux/raid/md_u.h> #include <linux/raid/detect.h> #include <linux/slab.h> #include <linux/percpu-refcount.h> #include <linux/part_stat.h> #include "md.h" #include "md-bitmap.h" #include "md-cluster.h" static const char *action_name[NR_SYNC_ACTIONS] = { [ACTION_RESYNC] = "resync", [ACTION_RECOVER] = "recover", [ACTION_CHECK] = "check", [ACTION_REPAIR] = "repair", [ACTION_RESHAPE] = "reshape", [ACTION_FROZEN] = "frozen", [ACTION_IDLE] = "idle", }; /* pers_list is a list of registered personalities protected by pers_lock. */ static LIST_HEAD(pers_list); static DEFINE_SPINLOCK(pers_lock); static const struct kobj_type md_ktype; const struct md_cluster_operations *md_cluster_ops; EXPORT_SYMBOL(md_cluster_ops); static struct module *md_cluster_mod; static DECLARE_WAIT_QUEUE_HEAD(resync_wait); static struct workqueue_struct *md_wq; /* * This workqueue is used for sync_work to register new sync_thread, and for * del_work to remove rdev, and for event_work that is only set by dm-raid. * * Noted that sync_work will grab reconfig_mutex, hence never flush this * workqueue whith reconfig_mutex grabbed. */ static struct workqueue_struct *md_misc_wq; struct workqueue_struct *md_bitmap_wq; static int remove_and_add_spares(struct mddev *mddev, struct md_rdev *this); static void mddev_detach(struct mddev *mddev); static void export_rdev(struct md_rdev *rdev, struct mddev *mddev); static void md_wakeup_thread_directly(struct md_thread __rcu *thread); /* * Default number of read corrections we'll attempt on an rdev * before ejecting it from the array. We divide the read error * count by 2 for every hour elapsed between read errors. */ #define MD_DEFAULT_MAX_CORRECTED_READ_ERRORS 20 /* Default safemode delay: 200 msec */ #define DEFAULT_SAFEMODE_DELAY ((200 * HZ)/1000 +1) /* * Current RAID-1,4,5 parallel reconstruction 'guaranteed speed limit' * is 1000 KB/sec, so the extra system load does not show up that much. * Increase it if you want to have more _guaranteed_ speed. Note that * the RAID driver will use the maximum available bandwidth if the IO * subsystem is idle. There is also an 'absolute maximum' reconstruction * speed limit - in case reconstruction slows down your system despite * idle IO detection. * * you can change it via /proc/sys/dev/raid/speed_limit_min and _max. * or /sys/block/mdX/md/sync_speed_{min,max} */ static int sysctl_speed_limit_min = 1000; static int sysctl_speed_limit_max = 200000; static inline int speed_min(struct mddev *mddev) { return mddev->sync_speed_min ? mddev->sync_speed_min : sysctl_speed_limit_min; } static inline int speed_max(struct mddev *mddev) { return mddev->sync_speed_max ? mddev->sync_speed_max : sysctl_speed_limit_max; } static void rdev_uninit_serial(struct md_rdev *rdev) { if (!test_and_clear_bit(CollisionCheck, &rdev->flags)) return; kvfree(rdev->serial); rdev->serial = NULL; } static void rdevs_uninit_serial(struct mddev *mddev) { struct md_rdev *rdev; rdev_for_each(rdev, mddev) rdev_uninit_serial(rdev); } static int rdev_init_serial(struct md_rdev *rdev) { /* serial_nums equals with BARRIER_BUCKETS_NR */ int i, serial_nums = 1 << ((PAGE_SHIFT - ilog2(sizeof(atomic_t)))); struct serial_in_rdev *serial = NULL; if (test_bit(CollisionCheck, &rdev->flags)) return 0; serial = kvmalloc(sizeof(struct serial_in_rdev) * serial_nums, GFP_KERNEL); if (!serial) return -ENOMEM; for (i = 0; i < serial_nums; i++) { struct serial_in_rdev *serial_tmp = &serial[i]; spin_lock_init(&serial_tmp->serial_lock); serial_tmp->serial_rb = RB_ROOT_CACHED; init_waitqueue_head(&serial_tmp->serial_io_wait); } rdev->serial = serial; set_bit(CollisionCheck, &rdev->flags); return 0; } static int rdevs_init_serial(struct mddev *mddev) { struct md_rdev *rdev; int ret = 0; rdev_for_each(rdev, mddev) { ret = rdev_init_serial(rdev); if (ret) break; } /* Free all resources if pool is not existed */ if (ret && !mddev->serial_info_pool) rdevs_uninit_serial(mddev); return ret; } /* * rdev needs to enable serial stuffs if it meets the conditions: * 1. it is multi-queue device flaged with writemostly. * 2. the write-behind mode is enabled. */ static int rdev_need_serial(struct md_rdev *rdev) { return (rdev && rdev->mddev->bitmap_info.max_write_behind > 0 && rdev->bdev->bd_disk->queue->nr_hw_queues != 1 && test_bit(WriteMostly, &rdev->flags)); } /* * Init resource for rdev(s), then create serial_info_pool if: * 1. rdev is the first device which return true from rdev_enable_serial. * 2. rdev is NULL, means we want to enable serialization for all rdevs. */ void mddev_create_serial_pool(struct mddev *mddev, struct md_rdev *rdev) { int ret = 0; if (rdev && !rdev_need_serial(rdev) && !test_bit(CollisionCheck, &rdev->flags)) return; if (!rdev) ret = rdevs_init_serial(mddev); else ret = rdev_init_serial(rdev); if (ret) return; if (mddev->serial_info_pool == NULL) { /* * already in memalloc noio context by * mddev_suspend() */ mddev->serial_info_pool = mempool_create_kmalloc_pool(NR_SERIAL_INFOS, sizeof(struct serial_info)); if (!mddev->serial_info_pool) { rdevs_uninit_serial(mddev); pr_err("can't alloc memory pool for serialization\n"); } } } /* * Free resource from rdev(s), and destroy serial_info_pool under conditions: * 1. rdev is the last device flaged with CollisionCheck. * 2. when bitmap is destroyed while policy is not enabled. * 3. for disable policy, the pool is destroyed only when no rdev needs it. */ void mddev_destroy_serial_pool(struct mddev *mddev, struct md_rdev *rdev) { if (rdev && !test_bit(CollisionCheck, &rdev->flags)) return; if (mddev->serial_info_pool) { struct md_rdev *temp; int num = 0; /* used to track if other rdevs need the pool */ rdev_for_each(temp, mddev) { if (!rdev) { if (!mddev->serialize_policy || !rdev_need_serial(temp)) rdev_uninit_serial(temp); else num++; } else if (temp != rdev && test_bit(CollisionCheck, &temp->flags)) num++; } if (rdev) rdev_uninit_serial(rdev); if (num) pr_info("The mempool could be used by other devices\n"); else { mempool_destroy(mddev->serial_info_pool); mddev->serial_info_pool = NULL; } } } static struct ctl_table_header *raid_table_header; static struct ctl_table raid_table[] = { { .procname = "speed_limit_min", .data = &sysctl_speed_limit_min, .maxlen = sizeof(int), .mode = S_IRUGO|S_IWUSR, .proc_handler = proc_dointvec, }, { .procname = "speed_limit_max", .data = &sysctl_speed_limit_max, .maxlen = sizeof(int), .mode = S_IRUGO|S_IWUSR, .proc_handler = proc_dointvec, }, }; static int start_readonly; /* * The original mechanism for creating an md device is to create * a device node in /dev and to open it. This causes races with device-close. * The preferred method is to write to the "new_array" module parameter. * This can avoid races. * Setting create_on_open to false disables the original mechanism * so all the races disappear. */ static bool create_on_open = true; /* * We have a system wide 'event count' that is incremented * on any 'interesting' event, and readers of /proc/mdstat * can use 'poll' or 'select' to find out when the event * count increases. * * Events are: * start array, stop array, error, add device, remove device, * start build, activate spare */ static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters); static atomic_t md_event_count; void md_new_event(void) { atomic_inc(&md_event_count); wake_up(&md_event_waiters); } EXPORT_SYMBOL_GPL(md_new_event); /* * Enables to iterate over all existing md arrays * all_mddevs_lock protects this list. */ static LIST_HEAD(all_mddevs); static DEFINE_SPINLOCK(all_mddevs_lock); static bool is_md_suspended(struct mddev *mddev) { return percpu_ref_is_dying(&mddev->active_io); } /* Rather than calling directly into the personality make_request function, * IO requests come here first so that we can check if the device is * being suspended pending a reconfiguration. * We hold a refcount over the call to ->make_request. By the time that * call has finished, the bio has been linked into some internal structure * and so is visible to ->quiesce(), so we don't need the refcount any more. */ static bool is_suspended(struct mddev *mddev, struct bio *bio) { if (is_md_suspended(mddev)) return true; if (bio_data_dir(bio) != WRITE) return false; if (READ_ONCE(mddev->suspend_lo) >= READ_ONCE(mddev->suspend_hi)) return false; if (bio->bi_iter.bi_sector >= READ_ONCE(mddev->suspend_hi)) return false; if (bio_end_sector(bio) < READ_ONCE(mddev->suspend_lo)) return false; return true; } bool md_handle_request(struct mddev *mddev, struct bio *bio) { check_suspended: if (is_suspended(mddev, bio)) { DEFINE_WAIT(__wait); /* Bail out if REQ_NOWAIT is set for the bio */ if (bio->bi_opf & REQ_NOWAIT) { bio_wouldblock_error(bio); return true; } for (;;) { prepare_to_wait(&mddev->sb_wait, &__wait, TASK_UNINTERRUPTIBLE); if (!is_suspended(mddev, bio)) break; schedule(); } finish_wait(&mddev->sb_wait, &__wait); } if (!percpu_ref_tryget_live(&mddev->active_io)) goto check_suspended; if (!mddev->pers->make_request(mddev, bio)) { percpu_ref_put(&mddev->active_io); if (!mddev->gendisk && mddev->pers->prepare_suspend) return false; goto check_suspended; } percpu_ref_put(&mddev->active_io); return true; } EXPORT_SYMBOL(md_handle_request); static void md_submit_bio(struct bio *bio) { const int rw = bio_data_dir(bio); struct mddev *mddev = bio->bi_bdev->bd_disk->private_data; if (mddev == NULL || mddev->pers == NULL) { bio_io_error(bio); return; } if (unlikely(test_bit(MD_BROKEN, &mddev->flags)) && (rw == WRITE)) { bio_io_error(bio); return; } bio = bio_split_to_limits(bio); if (!bio) return; if (mddev->ro == MD_RDONLY && unlikely(rw == WRITE)) { if (bio_sectors(bio) != 0) bio->bi_status = BLK_STS_IOERR; bio_endio(bio); return; } /* bio could be mergeable after passing to underlayer */ bio->bi_opf &= ~REQ_NOMERGE; md_handle_request(mddev, bio); } /* * Make sure no new requests are submitted to the device, and any requests that * have been submitted are completely handled. */ int mddev_suspend(struct mddev *mddev, bool interruptible) { int err = 0; /* * hold reconfig_mutex to wait for normal io will deadlock, because * other context can't update super_block, and normal io can rely on * updating super_block. */ lockdep_assert_not_held(&mddev->reconfig_mutex); if (interruptible) err = mutex_lock_interruptible(&mddev->suspend_mutex); else mutex_lock(&mddev->suspend_mutex); if (err) return err; if (mddev->suspended) { WRITE_ONCE(mddev->suspended, mddev->suspended + 1); mutex_unlock(&mddev->suspend_mutex); return 0; } percpu_ref_kill(&mddev->active_io); if (interruptible) err = wait_event_interruptible(mddev->sb_wait, percpu_ref_is_zero(&mddev->active_io)); else wait_event(mddev->sb_wait, percpu_ref_is_zero(&mddev->active_io)); if (err) { percpu_ref_resurrect(&mddev->active_io); mutex_unlock(&mddev->suspend_mutex); return err; } /* * For raid456, io might be waiting for reshape to make progress, * allow new reshape to start while waiting for io to be done to * prevent deadlock. */ WRITE_ONCE(mddev->suspended, mddev->suspended + 1); /* restrict memory reclaim I/O during raid array is suspend */ mddev->noio_flag = memalloc_noio_save(); mutex_unlock(&mddev->suspend_mutex); return 0; } EXPORT_SYMBOL_GPL(mddev_suspend); static void __mddev_resume(struct mddev *mddev, bool recovery_needed) { lockdep_assert_not_held(&mddev->reconfig_mutex); mutex_lock(&mddev->suspend_mutex); WRITE_ONCE(mddev->suspended, mddev->suspended - 1); if (mddev->suspended) { mutex_unlock(&mddev->suspend_mutex); return; } /* entred the memalloc scope from mddev_suspend() */ memalloc_noio_restore(mddev->noio_flag); percpu_ref_resurrect(&mddev->active_io); wake_up(&mddev->sb_wait); if (recovery_needed) set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); md_wakeup_thread(mddev->thread); md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */ mutex_unlock(&mddev->suspend_mutex); } void mddev_resume(struct mddev *mddev) { return __mddev_resume(mddev, true); } EXPORT_SYMBOL_GPL(mddev_resume); /* sync bdev before setting device to readonly or stopping raid*/ static int mddev_set_closing_and_sync_blockdev(struct mddev *mddev, int opener_num) { mutex_lock(&mddev->open_mutex); if (mddev->pers && atomic_read(&mddev->openers) > opener_num) { mutex_unlock(&mddev->open_mutex); return -EBUSY; } if (test_and_set_bit(MD_CLOSING, &mddev->flags)) { mutex_unlock(&mddev->open_mutex); return -EBUSY; } mutex_unlock(&mddev->open_mutex); sync_blockdev(mddev->gendisk->part0); return 0; } /* * The only difference from bio_chain_endio() is that the current * bi_status of bio does not affect the bi_status of parent. */ static void md_end_flush(struct bio *bio) { struct bio *parent = bio->bi_private; /* * If any flush io error before the power failure, * disk data may be lost. */ if (bio->bi_status) pr_err("md: %pg flush io error %d\n", bio->bi_bdev, blk_status_to_errno(bio->bi_status)); bio_put(bio); bio_endio(parent); } bool md_flush_request(struct mddev *mddev, struct bio *bio) { struct md_rdev *rdev; struct bio *new; /* * md_flush_reqeust() should be called under md_handle_request() and * 'active_io' is already grabbed. Hence it's safe to get rdev directly * without rcu protection. */ WARN_ON(percpu_ref_is_zero(&mddev->active_io)); rdev_for_each(rdev, mddev) { if (rdev->raid_disk < 0 || test_bit(Faulty, &rdev->flags)) continue; new = bio_alloc_bioset(rdev->bdev, 0, REQ_OP_WRITE | REQ_PREFLUSH, GFP_NOIO, &mddev->bio_set); new->bi_private = bio; new->bi_end_io = md_end_flush; bio_inc_remaining(bio); submit_bio(new); } if (bio_sectors(bio) == 0) { bio_endio(bio); return true; } bio->bi_opf &= ~REQ_PREFLUSH; return false; } EXPORT_SYMBOL(md_flush_request); static inline struct mddev *mddev_get(struct mddev *mddev) { lockdep_assert_held(&all_mddevs_lock); if (test_bit(MD_DELETED, &mddev->flags)) return NULL; atomic_inc(&mddev->active); return mddev; } static void mddev_delayed_delete(struct work_struct *ws); static void __mddev_put(struct mddev *mddev) { if (mddev->raid_disks || !list_empty(&mddev->disks) || mddev->ctime || mddev->hold_active) return; /* Array is not configured at all, and not held active, so destroy it */ set_bit(MD_DELETED, &mddev->flags); /* * Call queue_work inside the spinlock so that flush_workqueue() after * mddev_find will succeed in waiting for the work to be done. */ queue_work(md_misc_wq, &mddev->del_work); } void mddev_put(struct mddev *mddev) { if (!atomic_dec_and_lock(&mddev->active, &all_mddevs_lock)) return; __mddev_put(mddev); spin_unlock(&all_mddevs_lock); } static void md_safemode_timeout(struct timer_list *t); static void md_start_sync(struct work_struct *ws); static void active_io_release(struct percpu_ref *ref) { struct mddev *mddev = container_of(ref, struct mddev, active_io); wake_up(&mddev->sb_wait); } static void no_op(struct percpu_ref *r) {} int mddev_init(struct mddev *mddev) { if (percpu_ref_init(&mddev->active_io, active_io_release, PERCPU_REF_ALLOW_REINIT, GFP_KERNEL)) return -ENOMEM; if (percpu_ref_init(&mddev->writes_pending, no_op, PERCPU_REF_ALLOW_REINIT, GFP_KERNEL)) { percpu_ref_exit(&mddev->active_io); return -ENOMEM; } /* We want to start with the refcount at zero */ percpu_ref_put(&mddev->writes_pending); mutex_init(&mddev->open_mutex); mutex_init(&mddev->reconfig_mutex); mutex_init(&mddev->suspend_mutex); mutex_init(&mddev->bitmap_info.mutex); INIT_LIST_HEAD(&mddev->disks); INIT_LIST_HEAD(&mddev->all_mddevs); INIT_LIST_HEAD(&mddev->deleting); timer_setup(&mddev->safemode_timer, md_safemode_timeout, 0); atomic_set(&mddev->active, 1); atomic_set(&mddev->openers, 0); atomic_set(&mddev->sync_seq, 0); spin_lock_init(&mddev->lock); init_waitqueue_head(&mddev->sb_wait); init_waitqueue_head(&mddev->recovery_wait); mddev->reshape_position = MaxSector; mddev->reshape_backwards = 0; mddev->last_sync_action = ACTION_IDLE; mddev->resync_min = 0; mddev->resync_max = MaxSector; mddev->level = LEVEL_NONE; mddev_set_bitmap_ops(mddev); INIT_WORK(&mddev->sync_work, md_start_sync); INIT_WORK(&mddev->del_work, mddev_delayed_delete); return 0; } EXPORT_SYMBOL_GPL(mddev_init); void mddev_destroy(struct mddev *mddev) { percpu_ref_exit(&mddev->active_io); percpu_ref_exit(&mddev->writes_pending); } EXPORT_SYMBOL_GPL(mddev_destroy); static struct mddev *mddev_find_locked(dev_t unit) { struct mddev *mddev; list_for_each_entry(mddev, &all_mddevs, all_mddevs) if (mddev->unit == unit) return mddev; return NULL; } /* find an unused unit number */ static dev_t mddev_alloc_unit(void) { static int next_minor = 512; int start = next_minor; bool is_free = 0; dev_t dev = 0; while (!is_free) { dev = MKDEV(MD_MAJOR, next_minor); next_minor++; if (next_minor > MINORMASK) next_minor = 0; if (next_minor == start) return 0; /* Oh dear, all in use. */ is_free = !mddev_find_locked(dev); } return dev; } static struct mddev *mddev_alloc(dev_t unit) { struct mddev *new; int error; if (unit && MAJOR(unit) != MD_MAJOR) unit &= ~((1 << MdpMinorShift) - 1); new = kzalloc(sizeof(*new), GFP_KERNEL); if (!new) return ERR_PTR(-ENOMEM); error = mddev_init(new); if (error) goto out_free_new; spin_lock(&all_mddevs_lock); if (unit) { error = -EEXIST; if (mddev_find_locked(unit)) goto out_destroy_new; new->unit = unit; if (MAJOR(unit) == MD_MAJOR) new->md_minor = MINOR(unit); else new->md_minor = MINOR(unit) >> MdpMinorShift; new->hold_active = UNTIL_IOCTL; } else { error = -ENODEV; new->unit = mddev_alloc_unit(); if (!new->unit) goto out_destroy_new; new->md_minor = MINOR(new->unit); new->hold_active = UNTIL_STOP; } list_add(&new->all_mddevs, &all_mddevs); spin_unlock(&all_mddevs_lock); return new; out_destroy_new: spin_unlock(&all_mddevs_lock); mddev_destroy(new); out_free_new: kfree(new); return ERR_PTR(error); } static void mddev_free(struct mddev *mddev) { spin_lock(&all_mddevs_lock); list_del(&mddev->all_mddevs); spin_unlock(&all_mddevs_lock); mddev_destroy(mddev); kfree(mddev); } static const struct attribute_group md_redundancy_group; void mddev_unlock(struct mddev *mddev) { struct md_rdev *rdev; struct md_rdev *tmp; LIST_HEAD(delete); if (!list_empty(&mddev->deleting)) list_splice_init(&mddev->deleting, &delete); if (mddev->to_remove) { /* These cannot be removed under reconfig_mutex as * an access to the files will try to take reconfig_mutex * while holding the file unremovable, which leads to * a deadlock. * So hold set sysfs_active while the remove in happeing, * and anything else which might set ->to_remove or my * otherwise change the sysfs namespace will fail with * -EBUSY if sysfs_active is still set. * We set sysfs_active under reconfig_mutex and elsewhere * test it under the same mutex to ensure its correct value * is seen. */ const struct attribute_group *to_remove = mddev->to_remove; mddev->to_remove = NULL; mddev->sysfs_active = 1; mutex_unlock(&mddev->reconfig_mutex); if (mddev->kobj.sd) { if (to_remove != &md_redundancy_group) sysfs_remove_group(&mddev->kobj, to_remove); if (mddev->pers == NULL || mddev->pers->sync_request == NULL) { sysfs_remove_group(&mddev->kobj, &md_redundancy_group); if (mddev->sysfs_action) sysfs_put(mddev->sysfs_action); if (mddev->sysfs_completed) sysfs_put(mddev->sysfs_completed); if (mddev->sysfs_degraded) sysfs_put(mddev->sysfs_degraded); mddev->sysfs_action = NULL; mddev->sysfs_completed = NULL; mddev->sysfs_degraded = NULL; } } mddev->sysfs_active = 0; } else mutex_unlock(&mddev->reconfig_mutex); md_wakeup_thread(mddev->thread); wake_up(&mddev->sb_wait); list_for_each_entry_safe(rdev, tmp, &delete, same_set) { list_del_init(&rdev->same_set); kobject_del(&rdev->kobj); export_rdev(rdev, mddev); } } EXPORT_SYMBOL_GPL(mddev_unlock); struct md_rdev *md_find_rdev_nr_rcu(struct mddev *mddev, int nr) { struct md_rdev *rdev; rdev_for_each_rcu(rdev, mddev) if (rdev->desc_nr == nr) return rdev; return NULL; } EXPORT_SYMBOL_GPL(md_find_rdev_nr_rcu); static struct md_rdev *find_rdev(struct mddev *mddev, dev_t dev) { struct md_rdev *rdev; rdev_for_each(rdev, mddev) if (rdev->bdev->bd_dev == dev) return rdev; return NULL; } struct md_rdev *md_find_rdev_rcu(struct mddev *mddev, dev_t dev) { struct md_rdev *rdev; rdev_for_each_rcu(rdev, mddev) if (rdev->bdev->bd_dev == dev) return rdev; return NULL; } EXPORT_SYMBOL_GPL(md_find_rdev_rcu); static struct md_personality *find_pers(int level, char *clevel) { struct md_personality *pers; list_for_each_entry(pers, &pers_list, list) { if (level != LEVEL_NONE && pers->level == level) return pers; if (strcmp(pers->name, clevel)==0) return pers; } return NULL; } /* return the offset of the super block in 512byte sectors */ static inline sector_t calc_dev_sboffset(struct md_rdev *rdev) { return MD_NEW_SIZE_SECTORS(bdev_nr_sectors(rdev->bdev)); } static int alloc_disk_sb(struct md_rdev *rdev) { rdev->sb_page = alloc_page(GFP_KERNEL); if (!rdev->sb_page) return -ENOMEM; return 0; } void md_rdev_clear(struct md_rdev *rdev) { if (rdev->sb_page) { put_page(rdev->sb_page); rdev->sb_loaded = 0; rdev->sb_page = NULL; rdev->sb_start = 0; rdev->sectors = 0; } if (rdev->bb_page) { put_page(rdev->bb_page); rdev->bb_page = NULL; } badblocks_exit(&rdev->badblocks); } EXPORT_SYMBOL_GPL(md_rdev_clear); static void super_written(struct bio *bio) { struct md_rdev *rdev = bio->bi_private; struct mddev *mddev = rdev->mddev; if (bio->bi_status) { pr_err("md: %s gets error=%d\n", __func__, blk_status_to_errno(bio->bi_status)); md_error(mddev, rdev); if (!test_bit(Faulty, &rdev->flags) && (bio->bi_opf & MD_FAILFAST)) { set_bit(MD_SB_NEED_REWRITE, &mddev->sb_flags); set_bit(LastDev, &rdev->flags); } } else clear_bit(LastDev, &rdev->flags); bio_put(bio); rdev_dec_pending(rdev, mddev); if (atomic_dec_and_test(&mddev->pending_writes)) wake_up(&mddev->sb_wait); } void md_super_write(struct mddev *mddev, struct md_rdev *rdev, sector_t sector, int size, struct page *page) { /* write first size bytes of page to sector of rdev * Increment mddev->pending_writes before returning * and decrement it on completion, waking up sb_wait * if zero is reached. * If an error occurred, call md_error */ struct bio *bio; if (!page) return; if (test_bit(Faulty, &rdev->flags)) return; bio = bio_alloc_bioset(rdev->meta_bdev ? rdev->meta_bdev : rdev->bdev, 1, REQ_OP_WRITE | REQ_SYNC | REQ_IDLE | REQ_META | REQ_PREFLUSH | REQ_FUA, GFP_NOIO, &mddev->sync_set); atomic_inc(&rdev->nr_pending); bio->bi_iter.bi_sector = sector; __bio_add_page(bio, page, size, 0); bio->bi_private = rdev; bio->bi_end_io = super_written; if (test_bit(MD_FAILFAST_SUPPORTED, &mddev->flags) && test_bit(FailFast, &rdev->flags) && !test_bit(LastDev, &rdev->flags)) bio->bi_opf |= MD_FAILFAST; atomic_inc(&mddev->pending_writes); submit_bio(bio); } int md_super_wait(struct mddev *mddev) { /* wait for all superblock writes that were scheduled to complete */ wait_event(mddev->sb_wait, atomic_read(&mddev->pending_writes)==0); if (test_and_clear_bit(MD_SB_NEED_REWRITE, &mddev->sb_flags)) return -EAGAIN; return 0; } int sync_page_io(struct md_rdev *rdev, sector_t sector, int size, struct page *page, blk_opf_t opf, bool metadata_op) { struct bio bio; struct bio_vec bvec; if (metadata_op && rdev->meta_bdev) bio_init(&bio, rdev->meta_bdev, &bvec, 1, opf); else bio_init(&bio, rdev->bdev, &bvec, 1, opf); if (metadata_op) bio.bi_iter.bi_sector = sector + rdev->sb_start; else if (rdev->mddev->reshape_position != MaxSector && (rdev->mddev->reshape_backwards == (sector >= rdev->mddev->reshape_position))) bio.bi_iter.bi_sector = sector + rdev->new_data_offset; else bio.bi_iter.bi_sector = sector + rdev->data_offset; __bio_add_page(&bio, page, size, 0); submit_bio_wait(&bio); return !bio.bi_status; } EXPORT_SYMBOL_GPL(sync_page_io); static int read_disk_sb(struct md_rdev *rdev, int size) { if (rdev->sb_loaded) return 0; if (!sync_page_io(rdev, 0, size, rdev->sb_page, REQ_OP_READ, true)) goto fail; rdev->sb_loaded = 1; return 0; fail: pr_err("md: disabled device %pg, could not read superblock.\n", rdev->bdev); return -EINVAL; } static int md_uuid_equal(mdp_super_t *sb1, mdp_super_t *sb2) { return sb1->set_uuid0 == sb2->set_uuid0 && sb1->set_uuid1 == sb2->set_uuid1 && sb1->set_uuid2 == sb2->set_uuid2 && sb1->set_uuid3 == sb2->set_uuid3; } static int md_sb_equal(mdp_super_t *sb1, mdp_super_t *sb2) { int ret; mdp_super_t *tmp1, *tmp2; tmp1 = kmalloc(sizeof(*tmp1),GFP_KERNEL); tmp2 = kmalloc(sizeof(*tmp2),GFP_KERNEL); if (!tmp1 || !tmp2) { ret = 0; goto abort; } *tmp1 = *sb1; *tmp2 = *sb2; /* * nr_disks is not constant */ tmp1->nr_disks = 0; tmp2->nr_disks = 0; ret = (memcmp(tmp1, tmp2, MD_SB_GENERIC_CONSTANT_WORDS * 4) == 0); abort: kfree(tmp1); kfree(tmp2); return ret; } static u32 md_csum_fold(u32 csum) { csum = (csum & 0xffff) + (csum >> 16); return (csum & 0xffff) + (csum >> 16); } static unsigned int calc_sb_csum(mdp_super_t *sb) { u64 newcsum = 0; u32 *sb32 = (u32*)sb; int i; unsigned int disk_csum, csum; disk_csum = sb->sb_csum; sb->sb_csum = 0; for (i = 0; i < MD_SB_BYTES/4 ; i++) newcsum += sb32[i]; csum = (newcsum & 0xffffffff) + (newcsum>>32); #ifdef CONFIG_ALPHA /* This used to use csum_partial, which was wrong for several * reasons including that different results are returned on * different architectures. It isn't critical that we get exactly * the same return value as before (we always csum_fold before * testing, and that removes any differences). However as we * know that csum_partial always returned a 16bit value on * alphas, do a fold to maximise conformity to previous behaviour. */ sb->sb_csum = md_csum_fold(disk_csum); #else sb->sb_csum = disk_csum; #endif return csum; } /* * Handle superblock details. * We want to be able to handle multiple superblock formats * so we have a common interface to them all, and an array of * different handlers. * We rely on user-space to write the initial superblock, and support * reading and updating of superblocks. * Interface methods are: * int load_super(struct md_rdev *dev, struct md_rdev *refdev, int minor_version) * loads and validates a superblock on dev. * if refdev != NULL, compare superblocks on both devices * Return: * 0 - dev has a superblock that is compatible with refdev * 1 - dev has a superblock that is compatible and newer than refdev * so dev should be used as the refdev in future * -EINVAL superblock incompatible or invalid * -othererror e.g. -EIO * * int validate_super(struct mddev *mddev, struct md_rdev *dev) * Verify that dev is acceptable into mddev. * The first time, mddev->raid_disks will be 0, and data from * dev should be merged in. Subsequent calls check that dev * is new enough. Return 0 or -EINVAL * * void sync_super(struct mddev *mddev, struct md_rdev *dev) * Update the superblock for rdev with data in mddev * This does not write to disc. * */ struct super_type { char *name; struct module *owner; int (*load_super)(struct md_rdev *rdev, struct md_rdev *refdev, int minor_version); int (*validate_super)(struct mddev *mddev, struct md_rdev *freshest, struct md_rdev *rdev); void (*sync_super)(struct mddev *mddev, struct md_rdev *rdev); unsigned long long (*rdev_size_change)(struct md_rdev *rdev, sector_t num_sectors); int (*allow_new_offset)(struct md_rdev *rdev, unsigned long long new_offset); }; /* * Check that the given mddev has no bitmap. * * This function is called from the run method of all personalities that do not * support bitmaps. It prints an error message and returns non-zero if mddev * has a bitmap. Otherwise, it returns 0. * */ int md_check_no_bitmap(struct mddev *mddev) { if (!mddev->bitmap_info.file && !mddev->bitmap_info.offset) return 0; pr_warn("%s: bitmaps are not supported for %s\n", mdname(mddev), mddev->pers->name); return 1; } EXPORT_SYMBOL(md_check_no_bitmap); /* * load_super for 0.90.0 */ static int super_90_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_version) { mdp_super_t *sb; int ret; bool spare_disk = true; /* * Calculate the position of the superblock (512byte sectors), * it's at the end of the disk. * * It also happens to be a multiple of 4Kb. */ rdev->sb_start = calc_dev_sboffset(rdev); ret = read_disk_sb(rdev, MD_SB_BYTES); if (ret) return ret; ret = -EINVAL; sb = page_address(rdev->sb_page); if (sb->md_magic != MD_SB_MAGIC) { pr_warn("md: invalid raid superblock magic on %pg\n", rdev->bdev); goto abort; } if (sb->major_version != 0 || sb->minor_version < 90 || sb->minor_version > 91) { pr_warn("Bad version number %d.%d on %pg\n", sb->major_version, sb->minor_version, rdev->bdev); goto abort; } if (sb->raid_disks <= 0) goto abort; if (md_csum_fold(calc_sb_csum(sb)) != md_csum_fold(sb->sb_csum)) { pr_warn("md: invalid superblock checksum on %pg\n", rdev->bdev); goto abort; } rdev->preferred_minor = sb->md_minor; rdev->data_offset = 0; rdev->new_data_offset = 0; rdev->sb_size = MD_SB_BYTES; rdev->badblocks.shift = -1; rdev->desc_nr = sb->this_disk.number; /* not spare disk */ if (rdev->desc_nr >= 0 && rdev->desc_nr < MD_SB_DISKS && sb->disks[rdev->desc_nr].state & ((1<<MD_DISK_SYNC) | (1 << MD_DISK_ACTIVE))) spare_disk = false; if (!refdev) { if (!spare_disk) ret = 1; else ret = 0; } else { __u64 ev1, ev2; mdp_super_t *refsb = page_address(refdev->sb_page); if (!md_uuid_equal(refsb, sb)) { pr_warn("md: %pg has different UUID to %pg\n", rdev->bdev, refdev->bdev); goto abort; } if (!md_sb_equal(refsb, sb)) { pr_warn("md: %pg has same UUID but different superblock to %pg\n", rdev->bdev, refdev->bdev); goto abort; } ev1 = md_event(sb); ev2 = md_event(refsb); if (!spare_disk && ev1 > ev2) ret = 1; else ret = 0; } rdev->sectors = rdev->sb_start; /* Limit to 4TB as metadata cannot record more than that. * (not needed for Linear and RAID0 as metadata doesn't * record this size) */ if ((u64)rdev->sectors >= (2ULL << 32) && sb->level >= 1) rdev->sectors = (sector_t)(2ULL << 32) - 2; if (rdev->sectors < ((sector_t)sb->size) * 2 && sb->level >= 1) /* "this cannot possibly happen" ... */ ret = -EINVAL; abort: return ret; } static u64 md_bitmap_events_cleared(struct mddev *mddev) { struct md_bitmap_stats stats; int err; err = mddev->bitmap_ops->get_stats(mddev->bitmap, &stats); if (err) return 0; return stats.events_cleared; } /* * validate_super for 0.90.0 * note: we are not using "freshest" for 0.9 superblock */ static int super_90_validate(struct mddev *mddev, struct md_rdev *freshest, struct md_rdev *rdev) { mdp_disk_t *desc; mdp_super_t *sb = page_address(rdev->sb_page); __u64 ev1 = md_event(sb); rdev->raid_disk = -1; clear_bit(Faulty, &rdev->flags); clear_bit(In_sync, &rdev->flags); clear_bit(Bitmap_sync, &rdev->flags); clear_bit(WriteMostly, &rdev->flags); if (mddev->raid_disks == 0) { mddev->major_version = 0; mddev->minor_version = sb->minor_version; mddev->patch_version = sb->patch_version; mddev->external = 0; mddev->chunk_sectors = sb->chunk_size >> 9; mddev->ctime = sb->ctime; mddev->utime = sb->utime; mddev->level = sb->level; mddev->clevel[0] = 0; mddev->layout = sb->layout; mddev->raid_disks = sb->raid_disks; mddev->dev_sectors = ((sector_t)sb->size) * 2; mddev->events = ev1; mddev->bitmap_info.offset = 0; mddev->bitmap_info.space = 0; /* bitmap can use 60 K after the 4K superblocks */ mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9; mddev->bitmap_info.default_space = 64*2 - (MD_SB_BYTES >> 9); mddev->reshape_backwards = 0; if (mddev->minor_version >= 91) { mddev->reshape_position = sb->reshape_position; mddev->delta_disks = sb->delta_disks; mddev->new_level = sb->new_level; mddev->new_layout = sb->new_layout; mddev->new_chunk_sectors = sb->new_chunk >> 9; if (mddev->delta_disks < 0) mddev->reshape_backwards = 1; } else { mddev->reshape_position = MaxSector; mddev->delta_disks = 0; mddev->new_level = mddev->level; mddev->new_layout = mddev->layout; mddev->new_chunk_sectors = mddev->chunk_sectors; } if (mddev->level == 0) mddev->layout = -1; if (sb->state & (1<<MD_SB_CLEAN)) mddev->recovery_cp = MaxSector; else { if (sb->events_hi == sb->cp_events_hi && sb->events_lo == sb->cp_events_lo) { mddev->recovery_cp = sb->recovery_cp; } else mddev->recovery_cp = 0; } memcpy(mddev->uuid+0, &sb->set_uuid0, 4); memcpy(mddev->uuid+4, &sb->set_uuid1, 4); memcpy(mddev->uuid+8, &sb->set_uuid2, 4); memcpy(mddev->uuid+12,&sb->set_uuid3, 4); mddev->max_disks = MD_SB_DISKS; if (sb->state & (1<<MD_SB_BITMAP_PRESENT) && mddev->bitmap_info.file == NULL) { mddev->bitmap_info.offset = mddev->bitmap_info.default_offset; mddev->bitmap_info.space = mddev->bitmap_info.default_space; } } else if (mddev->pers == NULL) { /* Insist on good event counter while assembling, except * for spares (which don't need an event count) */ ++ev1; if (sb->disks[rdev->desc_nr].state & ( (1<<MD_DISK_SYNC) | (1 << MD_DISK_ACTIVE))) if (ev1 < mddev->events) return -EINVAL; } else if (mddev->bitmap) { /* if adding to array with a bitmap, then we can accept an * older device ... but not too old. */ if (ev1 < md_bitmap_events_cleared(mddev)) return 0; if (ev1 < mddev->events) set_bit(Bitmap_sync, &rdev->flags); } else { if (ev1 < mddev->events) /* just a hot-add of a new device, leave raid_disk at -1 */ return 0; } desc = sb->disks + rdev->desc_nr; if (desc->state & (1<<MD_DISK_FAULTY)) set_bit(Faulty, &rdev->flags); else if (desc->state & (1<<MD_DISK_SYNC)) { set_bit(In_sync, &rdev->flags); rdev->raid_disk = desc->raid_disk; rdev->saved_raid_disk = desc->raid_disk; } else if (desc->state & (1<<MD_DISK_ACTIVE)) { /* active but not in sync implies recovery up to * reshape position. We don't know exactly where * that is, so set to zero for now */ if (mddev->minor_version >= 91) { rdev->recovery_offset = 0; rdev->raid_disk = desc->raid_disk; } } if (desc->state & (1<<MD_DISK_WRITEMOSTLY)) set_bit(WriteMostly, &rdev->flags); if (desc->state & (1<<MD_DISK_FAILFAST)) set_bit(FailFast, &rdev->flags); return 0; } /* * sync_super for 0.90.0 */ static void super_90_sync(struct mddev *mddev, struct md_rdev *rdev) { mdp_super_t *sb; struct md_rdev *rdev2; int next_spare = mddev->raid_disks; /* make rdev->sb match mddev data.. * * 1/ zero out disks * 2/ Add info for each disk, keeping track of highest desc_nr (next_spare); * 3/ any empty disks < next_spare become removed * * disks[0] gets initialised to REMOVED because * we cannot be sure from other fields if it has * been initialised or not. */ int i; int active=0, working=0,failed=0,spare=0,nr_disks=0; rdev->sb_size = MD_SB_BYTES; sb = page_address(rdev->sb_page); memset(sb, 0, sizeof(*sb)); sb->md_magic = MD_SB_MAGIC; sb->major_version = mddev->major_version; sb->patch_version = mddev->patch_version; sb->gvalid_words = 0; /* ignored */ memcpy(&sb->set_uuid0, mddev->uuid+0, 4); memcpy(&sb->set_uuid1, mddev->uuid+4, 4); memcpy(&sb->set_uuid2, mddev->uuid+8, 4); memcpy(&sb->set_uuid3, mddev->uuid+12,4); sb->ctime = clamp_t(time64_t, mddev->ctime, 0, U32_MAX); sb->level = mddev->level; sb->size = mddev->dev_sectors / 2; sb->raid_disks = mddev->raid_disks; sb->md_minor = mddev->md_minor; sb->not_persistent = 0; sb->utime = clamp_t(time64_t, mddev->utime, 0, U32_MAX); sb->state = 0; sb->events_hi = (mddev->events>>32); sb->events_lo = (u32)mddev->events; if (mddev->reshape_position == MaxSector) sb->minor_version = 90; else { sb->minor_version = 91; sb->reshape_position = mddev->reshape_position; sb->new_level = mddev->new_level; sb->delta_disks = mddev->delta_disks; sb->new_layout = mddev->new_layout; sb->new_chunk = mddev->new_chunk_sectors << 9; } mddev->minor_version = sb->minor_version; if (mddev->in_sync) { sb->recovery_cp = mddev->recovery_cp; sb->cp_events_hi = (mddev->events>>32); sb->cp_events_lo = (u32)mddev->events; if (mddev->recovery_cp == MaxSector) sb->state = (1<< MD_SB_CLEAN); } else sb->recovery_cp = 0; sb->layout = mddev->layout; sb->chunk_size = mddev->chunk_sectors << 9; if (mddev->bitmap && mddev->bitmap_info.file == NULL) sb->state |= (1<<MD_SB_BITMAP_PRESENT); sb->disks[0].state = (1<<MD_DISK_REMOVED); rdev_for_each(rdev2, mddev) { mdp_disk_t *d; int desc_nr; int is_active = test_bit(In_sync, &rdev2->flags); if (rdev2->raid_disk >= 0 && sb->minor_version >= 91) /* we have nowhere to store the recovery_offset, * but if it is not below the reshape_position, * we can piggy-back on that. */ is_active = 1; if (rdev2->raid_disk < 0 || test_bit(Faulty, &rdev2->flags)) is_active = 0; if (is_active) desc_nr = rdev2->raid_disk; else desc_nr = next_spare++; rdev2->desc_nr = desc_nr; d = &sb->disks[rdev2->desc_nr]; nr_disks++; d->number = rdev2->desc_nr; d->major = MAJOR(rdev2->bdev->bd_dev); d->minor = MINOR(rdev2->bdev->bd_dev); if (is_active) d->raid_disk = rdev2->raid_disk; else d->raid_disk = rdev2->desc_nr; /* compatibility */ if (test_bit(Faulty, &rdev2->flags)) d->state = (1<<MD_DISK_FAULTY); else if (is_active) { d->state = (1<<MD_DISK_ACTIVE); if (test_bit(In_sync, &rdev2->flags)) d->state |= (1<<MD_DISK_SYNC); active++; working++; } else { d->state = 0; spare++; working++; } if (test_bit(WriteMostly, &rdev2->flags)) d->state |= (1<<MD_DISK_WRITEMOSTLY); if (test_bit(FailFast, &rdev2->flags)) d->state |= (1<<MD_DISK_FAILFAST); } /* now set the "removed" and "faulty" bits on any missing devices */ for (i=0 ; i < mddev->raid_disks ; i++) { mdp_disk_t *d = &sb->disks[i]; if (d->state == 0 && d->number == 0) { d->number = i; d->raid_disk = i; d->state = (1<<MD_DISK_REMOVED); d->state |= (1<<MD_DISK_FAULTY); failed++; } } sb->nr_disks = nr_disks; sb->active_disks = active; sb->working_disks = working; sb->failed_disks = failed; sb->spare_disks = spare; sb->this_disk = sb->disks[rdev->desc_nr]; sb->sb_csum = calc_sb_csum(sb); } /* * rdev_size_change for 0.90.0 */ static unsigned long long super_90_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors) { if (num_sectors && num_sectors < rdev->mddev->dev_sectors) return 0; /* component must fit device */ if (rdev->mddev->bitmap_info.offset) return 0; /* can't move bitmap */ rdev->sb_start = calc_dev_sboffset(rdev); if (!num_sectors || num_sectors > rdev->sb_start) num_sectors = rdev->sb_start; /* Limit to 4TB as metadata cannot record more than that. * 4TB == 2^32 KB, or 2*2^32 sectors. */ if ((u64)num_sectors >= (2ULL << 32) && rdev->mddev->level >= 1) num_sectors = (sector_t)(2ULL << 32) - 2; do { md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size, rdev->sb_page); } while (md_super_wait(rdev->mddev) < 0); return num_sectors; } static int super_90_allow_new_offset(struct md_rdev *rdev, unsigned long long new_offset) { /* non-zero offset changes not possible with v0.90 */ return new_offset == 0; } /* * version 1 superblock */ static __le32 calc_sb_1_csum(struct mdp_superblock_1 *sb) { __le32 disk_csum; u32 csum; unsigned long long newcsum; int size = 256 + le32_to_cpu(sb->max_dev)*2; __le32 *isuper = (__le32*)sb; disk_csum = sb->sb_csum; sb->sb_csum = 0; newcsum = 0; for (; size >= 4; size -= 4) newcsum += le32_to_cpu(*isuper++); if (size == 2) newcsum += le16_to_cpu(*(__le16*) isuper); csum = (newcsum & 0xffffffff) + (newcsum >> 32); sb->sb_csum = disk_csum; return cpu_to_le32(csum); } static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_version) { struct mdp_superblock_1 *sb; int ret; sector_t sb_start; sector_t sectors; int bmask; bool spare_disk = true; /* * Calculate the position of the superblock in 512byte sectors. * It is always aligned to a 4K boundary and * depeding on minor_version, it can be: * 0: At least 8K, but less than 12K, from end of device * 1: At start of device * 2: 4K from start of device. */ switch(minor_version) { case 0: sb_start = bdev_nr_sectors(rdev->bdev) - 8 * 2; sb_start &= ~(sector_t)(4*2-1); break; case 1: sb_start = 0; break; case 2: sb_start = 8; break; default: return -EINVAL; } rdev->sb_start = sb_start; /* superblock is rarely larger than 1K, but it can be larger, * and it is safe to read 4k, so we do that */ ret = read_disk_sb(rdev, 4096); if (ret) return ret; sb = page_address(rdev->sb_page); if (sb->magic != cpu_to_le32(MD_SB_MAGIC) || sb->major_version != cpu_to_le32(1) || le32_to_cpu(sb->max_dev) > (4096-256)/2 || le64_to_cpu(sb->super_offset) != rdev->sb_start || (le32_to_cpu(sb->feature_map) & ~MD_FEATURE_ALL) != 0) return -EINVAL; if (calc_sb_1_csum(sb) != sb->sb_csum) { pr_warn("md: invalid superblock checksum on %pg\n", rdev->bdev); return -EINVAL; } if (le64_to_cpu(sb->data_size) < 10) { pr_warn("md: data_size too small on %pg\n", rdev->bdev); return -EINVAL; } if (sb->pad0 || sb->pad3[0] || memcmp(sb->pad3, sb->pad3+1, sizeof(sb->pad3) - sizeof(sb->pad3[1]))) /* Some padding is non-zero, might be a new feature */ return -EINVAL; rdev->preferred_minor = 0xffff; rdev->data_offset = le64_to_cpu(sb->data_offset); rdev->new_data_offset = rdev->data_offset; if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE) && (le32_to_cpu(sb->feature_map) & MD_FEATURE_NEW_OFFSET)) rdev->new_data_offset += (s32)le32_to_cpu(sb->new_offset); atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read)); rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256; bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1; if (rdev->sb_size & bmask) rdev->sb_size = (rdev->sb_size | bmask) + 1; if (minor_version && rdev->data_offset < sb_start + (rdev->sb_size/512)) return -EINVAL; if (minor_version && rdev->new_data_offset < sb_start + (rdev->sb_size/512)) return -EINVAL; rdev->desc_nr = le32_to_cpu(sb->dev_number); if (!rdev->bb_page) { rdev->bb_page = alloc_page(GFP_KERNEL); if (!rdev->bb_page) return -ENOMEM; } if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BAD_BLOCKS) && rdev->badblocks.count == 0) { /* need to load the bad block list. * Currently we limit it to one page. */ s32 offset; sector_t bb_sector; __le64 *bbp; int i; int sectors = le16_to_cpu(sb->bblog_size); if (sectors > (PAGE_SIZE / 512)) return -EINVAL; offset = le32_to_cpu(sb->bblog_offset); if (offset == 0) return -EINVAL; bb_sector = (long long)offset; if (!sync_page_io(rdev, bb_sector, sectors << 9, rdev->bb_page, REQ_OP_READ, true)) return -EIO; bbp = (__le64 *)page_address(rdev->bb_page); rdev->badblocks.shift = sb->bblog_shift; for (i = 0 ; i < (sectors << (9-3)) ; i++, bbp++) { u64 bb = le64_to_cpu(*bbp); int count = bb & (0x3ff); u64 sector = bb >> 10; sector <<= sb->bblog_shift; count <<= sb->bblog_shift; if (bb + 1 == 0) break; if (badblocks_set(&rdev->badblocks, sector, count, 1)) return -EINVAL; } } else if (sb->bblog_offset != 0) rdev->badblocks.shift = 0; if ((le32_to_cpu(sb->feature_map) & (MD_FEATURE_PPL | MD_FEATURE_MULTIPLE_PPLS))) { rdev->ppl.offset = (__s16)le16_to_cpu(sb->ppl.offset); rdev->ppl.size = le16_to_cpu(sb->ppl.size); rdev->ppl.sector = rdev->sb_start + rdev->ppl.offset; } if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RAID0_LAYOUT) && sb->level != 0) return -EINVAL; /* not spare disk */ if (rdev->desc_nr >= 0 && rdev->desc_nr < le32_to_cpu(sb->max_dev) && (le16_to_cpu(sb->dev_roles[rdev->desc_nr]) < MD_DISK_ROLE_MAX || le16_to_cpu(sb->dev_roles[rdev->desc_nr]) == MD_DISK_ROLE_JOURNAL)) spare_disk = false; if (!refdev) { if (!spare_disk) ret = 1; else ret = 0; } else { __u64 ev1, ev2; struct mdp_superblock_1 *refsb = page_address(refdev->sb_page); if (memcmp(sb->set_uuid, refsb->set_uuid, 16) != 0 || sb->level != refsb->level || sb->layout != refsb->layout || sb->chunksize != refsb->chunksize) { pr_warn("md: %pg has strangely different superblock to %pg\n", rdev->bdev, refdev->bdev); return -EINVAL; } ev1 = le64_to_cpu(sb->events); ev2 = le64_to_cpu(refsb->events); if (!spare_disk && ev1 > ev2) ret = 1; else ret = 0; } if (minor_version) sectors = bdev_nr_sectors(rdev->bdev) - rdev->data_offset; else sectors = rdev->sb_start; if (sectors < le64_to_cpu(sb->data_size)) return -EINVAL; rdev->sectors = le64_to_cpu(sb->data_size); return ret; } static int super_1_validate(struct mddev *mddev, struct md_rdev *freshest, struct md_rdev *rdev) { struct mdp_superblock_1 *sb = page_address(rdev->sb_page); __u64 ev1 = le64_to_cpu(sb->events); int role; rdev->raid_disk = -1; clear_bit(Faulty, &rdev->flags); clear_bit(In_sync, &rdev->flags); clear_bit(Bitmap_sync, &rdev->flags); clear_bit(WriteMostly, &rdev->flags); if (mddev->raid_disks == 0) { mddev->major_version = 1; mddev->patch_version = 0; mddev->external = 0; mddev->chunk_sectors = le32_to_cpu(sb->chunksize); mddev->ctime = le64_to_cpu(sb->ctime); mddev->utime = le64_to_cpu(sb->utime); mddev->level = le32_to_cpu(sb->level); mddev->clevel[0] = 0; mddev->layout = le32_to_cpu(sb->layout); mddev->raid_disks = le32_to_cpu(sb->raid_disks); mddev->dev_sectors = le64_to_cpu(sb->size); mddev->events = ev1; mddev->bitmap_info.offset = 0; mddev->bitmap_info.space = 0; /* Default location for bitmap is 1K after superblock * using 3K - total of 4K */ mddev->bitmap_info.default_offset = 1024 >> 9; mddev->bitmap_info.default_space = (4096-1024) >> 9; mddev->reshape_backwards = 0; mddev->recovery_cp = le64_to_cpu(sb->resync_offset); memcpy(mddev->uuid, sb->set_uuid, 16); mddev->max_disks = (4096-256)/2; if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BITMAP_OFFSET) && mddev->bitmap_info.file == NULL) { mddev->bitmap_info.offset = (__s32)le32_to_cpu(sb->bitmap_offset); /* Metadata doesn't record how much space is available. * For 1.0, we assume we can use up to the superblock * if before, else to 4K beyond superblock. * For others, assume no change is possible. */ if (mddev->minor_version > 0) mddev->bitmap_info.space = 0; else if (mddev->bitmap_info.offset > 0) mddev->bitmap_info.space = 8 - mddev->bitmap_info.offset; else mddev->bitmap_info.space = -mddev->bitmap_info.offset; } if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE)) { mddev->reshape_position = le64_to_cpu(sb->reshape_position); mddev->delta_disks = le32_to_cpu(sb->delta_disks); mddev->new_level = le32_to_cpu(sb->new_level); mddev->new_layout = le32_to_cpu(sb->new_layout); mddev->new_chunk_sectors = le32_to_cpu(sb->new_chunk); if (mddev->delta_disks < 0 || (mddev->delta_disks == 0 && (le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_BACKWARDS))) mddev->reshape_backwards = 1; } else { mddev->reshape_position = MaxSector; mddev->delta_disks = 0; mddev->new_level = mddev->level; mddev->new_layout = mddev->layout; mddev->new_chunk_sectors = mddev->chunk_sectors; } if (mddev->level == 0 && !(le32_to_cpu(sb->feature_map) & MD_FEATURE_RAID0_LAYOUT)) mddev->layout = -1; if (le32_to_cpu(sb->feature_map) & MD_FEATURE_JOURNAL) set_bit(MD_HAS_JOURNAL, &mddev->flags); if (le32_to_cpu(sb->feature_map) & (MD_FEATURE_PPL | MD_FEATURE_MULTIPLE_PPLS)) { if (le32_to_cpu(sb->feature_map) & (MD_FEATURE_BITMAP_OFFSET | MD_FEATURE_JOURNAL)) return -EINVAL; if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_PPL) && (le32_to_cpu(sb->feature_map) & MD_FEATURE_MULTIPLE_PPLS)) return -EINVAL; set_bit(MD_HAS_PPL, &mddev->flags); } } else if (mddev->pers == NULL) { /* Insist of good event counter while assembling, except for * spares (which don't need an event count). * Similar to mdadm, we allow event counter difference of 1 * from the freshest device. */ if (rdev->desc_nr >= 0 && rdev->desc_nr < le32_to_cpu(sb->max_dev) && (le16_to_cpu(sb->dev_roles[rdev->desc_nr]) < MD_DISK_ROLE_MAX || le16_to_cpu(sb->dev_roles[rdev->desc_nr]) == MD_DISK_ROLE_JOURNAL)) if (ev1 + 1 < mddev->events) return -EINVAL; } else if (mddev->bitmap) { /* If adding to array with a bitmap, then we can accept an * older device, but not too old. */ if (ev1 < md_bitmap_events_cleared(mddev)) return 0; if (ev1 < mddev->events) set_bit(Bitmap_sync, &rdev->flags); } else { if (ev1 < mddev->events) /* just a hot-add of a new device, leave raid_disk at -1 */ return 0; } if (rdev->desc_nr < 0 || rdev->desc_nr >= le32_to_cpu(sb->max_dev)) { role = MD_DISK_ROLE_SPARE; rdev->desc_nr = -1; } else if (mddev->pers == NULL && freshest && ev1 < mddev->events) { /* * If we are assembling, and our event counter is smaller than the * highest event counter, we cannot trust our superblock about the role. * It could happen that our rdev was marked as Faulty, and all other * superblocks were updated with +1 event counter. * Then, before the next superblock update, which typically happens when * remove_and_add_spares() removes the device from the array, there was * a crash or reboot. * If we allow current rdev without consulting the freshest superblock, * we could cause data corruption. * Note that in this case our event counter is smaller by 1 than the * highest, otherwise, this rdev would not be allowed into array; * both kernel and mdadm allow event counter difference of 1. */ struct mdp_superblock_1 *freshest_sb = page_address(freshest->sb_page); u32 freshest_max_dev = le32_to_cpu(freshest_sb->max_dev); if (rdev->desc_nr >= freshest_max_dev) { /* this is unexpected, better not proceed */ pr_warn("md: %s: rdev[%pg]: desc_nr(%d) >= freshest(%pg)->sb->max_dev(%u)\n", mdname(mddev), rdev->bdev, rdev->desc_nr, freshest->bdev, freshest_max_dev); return -EUCLEAN; } role = le16_to_cpu(freshest_sb->dev_roles[rdev->desc_nr]); pr_debug("md: %s: rdev[%pg]: role=%d(0x%x) according to freshest %pg\n", mdname(mddev), rdev->bdev, role, role, freshest->bdev); } else { role = le16_to_cpu(sb->dev_roles[rdev->desc_nr]); } switch (role) { case MD_DISK_ROLE_SPARE: /* spare */ break; case MD_DISK_ROLE_FAULTY: /* faulty */ set_bit(Faulty, &rdev->flags); break; case MD_DISK_ROLE_JOURNAL: /* journal device */ if (!(le32_to_cpu(sb->feature_map) & MD_FEATURE_JOURNAL)) { /* journal device without journal feature */ pr_warn("md: journal device provided without journal feature, ignoring the device\n"); return -EINVAL; } set_bit(Journal, &rdev->flags); rdev->journal_tail = le64_to_cpu(sb->journal_tail); rdev->raid_disk = 0; break; default: rdev->saved_raid_disk = role; if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RECOVERY_OFFSET)) { rdev->recovery_offset = le64_to_cpu(sb->recovery_offset); if (!(le32_to_cpu(sb->feature_map) & MD_FEATURE_RECOVERY_BITMAP)) rdev->saved_raid_disk = -1; } else { /* * If the array is FROZEN, then the device can't * be in_sync with rest of array. */ if (!test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) set_bit(In_sync, &rdev->flags); } rdev->raid_disk = role; break; } if (sb->devflags & WriteMostly1) set_bit(WriteMostly, &rdev->flags); if (sb->devflags & FailFast1) set_bit(FailFast, &rdev->flags); if (le32_to_cpu(sb->feature_map) & MD_FEATURE_REPLACEMENT) set_bit(Replacement, &rdev->flags); return 0; } static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev) { struct mdp_superblock_1 *sb; struct md_rdev *rdev2; int max_dev, i; /* make rdev->sb match mddev and rdev data. */ sb = page_address(rdev->sb_page); sb->feature_map = 0; sb->pad0 = 0; sb->recovery_offset = cpu_to_le64(0); memset(sb->pad3, 0, sizeof(sb->pad3)); sb->utime = cpu_to_le64((__u64)mddev->utime); sb->events = cpu_to_le64(mddev->events); if (mddev->in_sync) sb->resync_offset = cpu_to_le64(mddev->recovery_cp); else if (test_bit(MD_JOURNAL_CLEAN, &mddev->flags)) sb->resync_offset = cpu_to_le64(MaxSector); else sb->resync_offset = cpu_to_le64(0); sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors)); sb->raid_disks = cpu_to_le32(mddev->raid_disks); sb->size = cpu_to_le64(mddev->dev_sectors); sb->chunksize = cpu_to_le32(mddev->chunk_sectors); sb->level = cpu_to_le32(mddev->level); sb->layout = cpu_to_le32(mddev->layout); if (test_bit(FailFast, &rdev->flags)) sb->devflags |= FailFast1; else sb->devflags &= ~FailFast1; if (test_bit(WriteMostly, &rdev->flags)) sb->devflags |= WriteMostly1; else sb->devflags &= ~WriteMostly1; sb->data_offset = cpu_to_le64(rdev->data_offset); sb->data_size = cpu_to_le64(rdev->sectors); if (mddev->bitmap && mddev->bitmap_info.file == NULL) { sb->bitmap_offset = cpu_to_le32((__u32)mddev->bitmap_info.offset); sb->feature_map = cpu_to_le32(MD_FEATURE_BITMAP_OFFSET); } if (rdev->raid_disk >= 0 && !test_bit(Journal, &rdev->flags) && !test_bit(In_sync, &rdev->flags)) { sb->feature_map |= cpu_to_le32(MD_FEATURE_RECOVERY_OFFSET); sb->recovery_offset = cpu_to_le64(rdev->recovery_offset); if (rdev->saved_raid_disk >= 0 && mddev->bitmap) sb->feature_map |= cpu_to_le32(MD_FEATURE_RECOVERY_BITMAP); } /* Note: recovery_offset and journal_tail share space */ if (test_bit(Journal, &rdev->flags)) sb->journal_tail = cpu_to_le64(rdev->journal_tail); if (test_bit(Replacement, &rdev->flags)) sb->feature_map |= cpu_to_le32(MD_FEATURE_REPLACEMENT); if (mddev->reshape_position != MaxSector) { sb->feature_map |= cpu_to_le32(MD_FEATURE_RESHAPE_ACTIVE); sb->reshape_position = cpu_to_le64(mddev->reshape_position); sb->new_layout = cpu_to_le32(mddev->new_layout); sb->delta_disks = cpu_to_le32(mddev->delta_disks); sb->new_level = cpu_to_le32(mddev->new_level); sb->new_chunk = cpu_to_le32(mddev->new_chunk_sectors); if (mddev->delta_disks == 0 && mddev->reshape_backwards) sb->feature_map |= cpu_to_le32(MD_FEATURE_RESHAPE_BACKWARDS); if (rdev->new_data_offset != rdev->data_offset) { sb->feature_map |= cpu_to_le32(MD_FEATURE_NEW_OFFSET); sb->new_offset = cpu_to_le32((__u32)(rdev->new_data_offset - rdev->data_offset)); } } if (mddev_is_clustered(mddev)) sb->feature_map |= cpu_to_le32(MD_FEATURE_CLUSTERED); if (rdev->badblocks.count == 0) /* Nothing to do for bad blocks*/ ; else if (sb->bblog_offset == 0) /* Cannot record bad blocks on this device */ md_error(mddev, rdev); else { struct badblocks *bb = &rdev->badblocks; __le64 *bbp = (__le64 *)page_address(rdev->bb_page); u64 *p = bb->page; sb->feature_map |= cpu_to_le32(MD_FEATURE_BAD_BLOCKS); if (bb->changed) { unsigned seq; retry: seq = read_seqbegin(&bb->lock); memset(bbp, 0xff, PAGE_SIZE); for (i = 0 ; i < bb->count ; i++) { u64 internal_bb = p[i]; u64 store_bb = ((BB_OFFSET(internal_bb) << 10) | BB_LEN(internal_bb)); bbp[i] = cpu_to_le64(store_bb); } bb->changed = 0; if (read_seqretry(&bb->lock, seq)) goto retry; bb->sector = (rdev->sb_start + (int)le32_to_cpu(sb->bblog_offset)); bb->size = le16_to_cpu(sb->bblog_size); } } max_dev = 0; rdev_for_each(rdev2, mddev) if (rdev2->desc_nr+1 > max_dev) max_dev = rdev2->desc_nr+1; if (max_dev > le32_to_cpu(sb->max_dev)) { int bmask; sb->max_dev = cpu_to_le32(max_dev); rdev->sb_size = max_dev * 2 + 256; bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1; if (rdev->sb_size & bmask) rdev->sb_size = (rdev->sb_size | bmask) + 1; } else max_dev = le32_to_cpu(sb->max_dev); for (i=0; i<max_dev;i++) sb->dev_roles[i] = cpu_to_le16(MD_DISK_ROLE_SPARE); if (test_bit(MD_HAS_JOURNAL, &mddev->flags)) sb->feature_map |= cpu_to_le32(MD_FEATURE_JOURNAL); if (test_bit(MD_HAS_PPL, &mddev->flags)) { if (test_bit(MD_HAS_MULTIPLE_PPLS, &mddev->flags)) sb->feature_map |= cpu_to_le32(MD_FEATURE_MULTIPLE_PPLS); else sb->feature_map |= cpu_to_le32(MD_FEATURE_PPL); sb->ppl.offset = cpu_to_le16(rdev->ppl.offset); sb->ppl.size = cpu_to_le16(rdev->ppl.size); } rdev_for_each(rdev2, mddev) { i = rdev2->desc_nr; if (test_bit(Faulty, &rdev2->flags)) sb->dev_roles[i] = cpu_to_le16(MD_DISK_ROLE_FAULTY); else if (test_bit(In_sync, &rdev2->flags)) sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk); else if (test_bit(Journal, &rdev2->flags)) sb->dev_roles[i] = cpu_to_le16(MD_DISK_ROLE_JOURNAL); else if (rdev2->raid_disk >= 0) sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk); else sb->dev_roles[i] = cpu_to_le16(MD_DISK_ROLE_SPARE); } sb->sb_csum = calc_sb_1_csum(sb); } static sector_t super_1_choose_bm_space(sector_t dev_size) { sector_t bm_space; /* if the device is bigger than 8Gig, save 64k for bitmap * usage, if bigger than 200Gig, save 128k */ if (dev_size < 64*2) bm_space = 0; else if (dev_size - 64*2 >= 200*1024*1024*2) bm_space = 128*2; else if (dev_size - 4*2 > 8*1024*1024*2) bm_space = 64*2; else bm_space = 4*2; return bm_space; } static unsigned long long super_1_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors) { struct mdp_superblock_1 *sb; sector_t max_sectors; if (num_sectors && num_sectors < rdev->mddev->dev_sectors) return 0; /* component must fit device */ if (rdev->data_offset != rdev->new_data_offset) return 0; /* too confusing */ if (rdev->sb_start < rdev->data_offset) { /* minor versions 1 and 2; superblock before data */ max_sectors = bdev_nr_sectors(rdev->bdev) - rdev->data_offset; if (!num_sectors || num_sectors > max_sectors) num_sectors = max_sectors; } else if (rdev->mddev->bitmap_info.offset) { /* minor version 0 with bitmap we can't move */ return 0; } else { /* minor version 0; superblock after data */ sector_t sb_start, bm_space; sector_t dev_size = bdev_nr_sectors(rdev->bdev); /* 8K is for superblock */ sb_start = dev_size - 8*2; sb_start &= ~(sector_t)(4*2 - 1); bm_space = super_1_choose_bm_space(dev_size); /* Space that can be used to store date needs to decrease * superblock bitmap space and bad block space(4K) */ max_sectors = sb_start - bm_space - 4*2; if (!num_sectors || num_sectors > max_sectors) num_sectors = max_sectors; rdev->sb_start = sb_start; } sb = page_address(rdev->sb_page); sb->data_size = cpu_to_le64(num_sectors); sb->super_offset = cpu_to_le64(rdev->sb_start); sb->sb_csum = calc_sb_1_csum(sb); do { md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size, rdev->sb_page); } while (md_super_wait(rdev->mddev) < 0); return num_sectors; } static int super_1_allow_new_offset(struct md_rdev *rdev, unsigned long long new_offset) { /* All necessary checks on new >= old have been done */ if (new_offset >= rdev->data_offset) return 1; /* with 1.0 metadata, there is no metadata to tread on * so we can always move back */ if (rdev->mddev->minor_version == 0) return 1; /* otherwise we must be sure not to step on * any metadata, so stay: * 36K beyond start of superblock * beyond end of badblocks * beyond write-intent bitmap */ if (rdev->sb_start + (32+4)*2 > new_offset) return 0; if (!rdev->mddev->bitmap_info.file) { struct mddev *mddev = rdev->mddev; struct md_bitmap_stats stats; int err; err = mddev->bitmap_ops->get_stats(mddev->bitmap, &stats); if (!err && rdev->sb_start + mddev->bitmap_info.offset + stats.file_pages * (PAGE_SIZE >> 9) > new_offset) return 0; } if (rdev->badblocks.sector + rdev->badblocks.size > new_offset) return 0; return 1; } static struct super_type super_types[] = { [0] = { .name = "0.90.0", .owner = THIS_MODULE, .load_super = super_90_load, .validate_super = super_90_validate, .sync_super = super_90_sync, .rdev_size_change = super_90_rdev_size_change, .allow_new_offset = super_90_allow_new_offset, }, [1] = { .name = "md-1", .owner = THIS_MODULE, .load_super = super_1_load, .validate_super = super_1_validate, .sync_super = super_1_sync, .rdev_size_change = super_1_rdev_size_change, .allow_new_offset = super_1_allow_new_offset, }, }; static void sync_super(struct mddev *mddev, struct md_rdev *rdev) { if (mddev->sync_super) { mddev->sync_super(mddev, rdev); return; } BUG_ON(mddev->major_version >= ARRAY_SIZE(super_types)); super_types[mddev->major_version].sync_super(mddev, rdev); } static int match_mddev_units(struct mddev *mddev1, struct mddev *mddev2) { struct md_rdev *rdev, *rdev2; rcu_read_lock(); rdev_for_each_rcu(rdev, mddev1) { if (test_bit(Faulty, &rdev->flags) || test_bit(Journal, &rdev->flags) || rdev->raid_disk == -1) continue; rdev_for_each_rcu(rdev2, mddev2) { if (test_bit(Faulty, &rdev2->flags) || test_bit(Journal, &rdev2->flags) || rdev2->raid_disk == -1) continue; if (rdev->bdev->bd_disk == rdev2->bdev->bd_disk) { rcu_read_unlock(); return 1; } } } rcu_read_unlock(); return 0; } static LIST_HEAD(pending_raid_disks); /* * Try to register data integrity profile for an mddev * * This is called when an array is started and after a disk has been kicked * from the array. It only succeeds if all working and active component devices * are integrity capable with matching profiles. */ int md_integrity_register(struct mddev *mddev) { if (list_empty(&mddev->disks)) return 0; /* nothing to do */ if (mddev_is_dm(mddev) || !blk_get_integrity(mddev->gendisk)) return 0; /* shouldn't register */ pr_debug("md: data integrity enabled on %s\n", mdname(mddev)); if (bioset_integrity_create(&mddev->bio_set, BIO_POOL_SIZE) || (mddev->level != 1 && mddev->level != 10 && bioset_integrity_create(&mddev->io_clone_set, BIO_POOL_SIZE))) { /* * No need to handle the failure of bioset_integrity_create, * because the function is called by md_run() -> pers->run(), * md_run calls bioset_exit -> bioset_integrity_free in case * of failure case. */ pr_err("md: failed to create integrity pool for %s\n", mdname(mddev)); return -EINVAL; } return 0; } EXPORT_SYMBOL(md_integrity_register); static bool rdev_read_only(struct md_rdev *rdev) { return bdev_read_only(rdev->bdev) || (rdev->meta_bdev && bdev_read_only(rdev->meta_bdev)); } static int bind_rdev_to_array(struct md_rdev *rdev, struct mddev *mddev) { char b[BDEVNAME_SIZE]; int err; /* prevent duplicates */ if (find_rdev(mddev, rdev->bdev->bd_dev)) return -EEXIST; if (rdev_read_only(rdev) && mddev->pers) return -EROFS; /* make sure rdev->sectors exceeds mddev->dev_sectors */ if (!test_bit(Journal, &rdev->flags) && rdev->sectors && (mddev->dev_sectors == 0 || rdev->sectors < mddev->dev_sectors)) { if (mddev->pers) { /* Cannot change size, so fail * If mddev->level <= 0, then we don't care * about aligning sizes (e.g. linear) */ if (mddev->level > 0) return -ENOSPC; } else mddev->dev_sectors = rdev->sectors; } /* Verify rdev->desc_nr is unique. * If it is -1, assign a free number, else * check number is not in use */ rcu_read_lock(); if (rdev->desc_nr < 0) { int choice = 0; if (mddev->pers) choice = mddev->raid_disks; while (md_find_rdev_nr_rcu(mddev, choice)) choice++; rdev->desc_nr = choice; } else { if (md_find_rdev_nr_rcu(mddev, rdev->desc_nr)) { rcu_read_unlock(); return -EBUSY; } } rcu_read_unlock(); if (!test_bit(Journal, &rdev->flags) && mddev->max_disks && rdev->desc_nr >= mddev->max_disks) { pr_warn("md: %s: array is limited to %d devices\n", mdname(mddev), mddev->max_disks); return -EBUSY; } snprintf(b, sizeof(b), "%pg", rdev->bdev); strreplace(b, '/', '!'); rdev->mddev = mddev; pr_debug("md: bind<%s>\n", b); if (mddev->raid_disks) mddev_create_serial_pool(mddev, rdev); if ((err = kobject_add(&rdev->kobj, &mddev->kobj, "dev-%s", b))) goto fail; /* failure here is OK */ err = sysfs_create_link(&rdev->kobj, bdev_kobj(rdev->bdev), "block"); rdev->sysfs_state = sysfs_get_dirent_safe(rdev->kobj.sd, "state"); rdev->sysfs_unack_badblocks = sysfs_get_dirent_safe(rdev->kobj.sd, "unacknowledged_bad_blocks"); rdev->sysfs_badblocks = sysfs_get_dirent_safe(rdev->kobj.sd, "bad_blocks"); list_add_rcu(&rdev->same_set, &mddev->disks); bd_link_disk_holder(rdev->bdev, mddev->gendisk); /* May as well allow recovery to be retried once */ mddev->recovery_disabled++; return 0; fail: pr_warn("md: failed to register dev-%s for %s\n", b, mdname(mddev)); mddev_destroy_serial_pool(mddev, rdev); return err; } void md_autodetect_dev(dev_t dev); /* just for claiming the bdev */ static struct md_rdev claim_rdev; static void export_rdev(struct md_rdev *rdev, struct mddev *mddev) { pr_debug("md: export_rdev(%pg)\n", rdev->bdev); md_rdev_clear(rdev); #ifndef MODULE if (test_bit(AutoDetected, &rdev->flags)) md_autodetect_dev(rdev->bdev->bd_dev); #endif fput(rdev->bdev_file); rdev->bdev = NULL; kobject_put(&rdev->kobj); } static void md_kick_rdev_from_array(struct md_rdev *rdev) { struct mddev *mddev = rdev->mddev; bd_unlink_disk_holder(rdev->bdev, rdev->mddev->gendisk); list_del_rcu(&rdev->same_set); pr_debug("md: unbind<%pg>\n", rdev->bdev); mddev_destroy_serial_pool(rdev->mddev, rdev); WRITE_ONCE(rdev->mddev, NULL); sysfs_remove_link(&rdev->kobj, "block"); sysfs_put(rdev->sysfs_state); sysfs_put(rdev->sysfs_unack_badblocks); sysfs_put(rdev->sysfs_badblocks); rdev->sysfs_state = NULL; rdev->sysfs_unack_badblocks = NULL; rdev->sysfs_badblocks = NULL; rdev->badblocks.count = 0; synchronize_rcu(); /* * kobject_del() will wait for all in progress writers to be done, where * reconfig_mutex is held, hence it can't be called under * reconfig_mutex and it's delayed to mddev_unlock(). */ list_add(&rdev->same_set, &mddev->deleting); } static void export_array(struct mddev *mddev) { struct md_rdev *rdev; while (!list_empty(&mddev->disks)) { rdev = list_first_entry(&mddev->disks, struct md_rdev, same_set); md_kick_rdev_from_array(rdev); } mddev->raid_disks = 0; mddev->major_version = 0; } static bool set_in_sync(struct mddev *mddev) { lockdep_assert_held(&mddev->lock); if (!mddev->in_sync) { mddev->sync_checkers++; spin_unlock(&mddev->lock); percpu_ref_switch_to_atomic_sync(&mddev->writes_pending); spin_lock(&mddev->lock); if (!mddev->in_sync && percpu_ref_is_zero(&mddev->writes_pending)) { mddev->in_sync = 1; /* * Ensure ->in_sync is visible before we clear * ->sync_checkers. */ smp_mb(); set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags); sysfs_notify_dirent_safe(mddev->sysfs_state); } if (--mddev->sync_checkers == 0) percpu_ref_switch_to_percpu(&mddev->writes_pending); } if (mddev->safemode == 1) mddev->safemode = 0; return mddev->in_sync; } static void sync_sbs(struct mddev *mddev, int nospares) { /* Update each superblock (in-memory image), but * if we are allowed to, skip spares which already * have the right event counter, or have one earlier * (which would mean they aren't being marked as dirty * with the rest of the array) */ struct md_rdev *rdev; rdev_for_each(rdev, mddev) { if (rdev->sb_events == mddev->events || (nospares && rdev->raid_disk < 0 && rdev->sb_events+1 == mddev->events)) { /* Don't update this superblock */ rdev->sb_loaded = 2; } else { sync_super(mddev, rdev); rdev->sb_loaded = 1; } } } static bool does_sb_need_changing(struct mddev *mddev) { struct md_rdev *rdev = NULL, *iter; struct mdp_superblock_1 *sb; int role; /* Find a good rdev */ rdev_for_each(iter, mddev) if ((iter->raid_disk >= 0) && !test_bit(Faulty, &iter->flags)) { rdev = iter; break; } /* No good device found. */ if (!rdev) return false; sb = page_address(rdev->sb_page); /* Check if a device has become faulty or a spare become active */ rdev_for_each(rdev, mddev) { role = le16_to_cpu(sb->dev_roles[rdev->desc_nr]); /* Device activated? */ if (role == MD_DISK_ROLE_SPARE && rdev->raid_disk >= 0 && !test_bit(Faulty, &rdev->flags)) return true; /* Device turned faulty? */ if (test_bit(Faulty, &rdev->flags) && (role < MD_DISK_ROLE_MAX)) return true; } /* Check if any mddev parameters have changed */ if ((mddev->dev_sectors != le64_to_cpu(sb->size)) || (mddev->reshape_position != le64_to_cpu(sb->reshape_position)) || (mddev->layout != le32_to_cpu(sb->layout)) || (mddev->raid_disks != le32_to_cpu(sb->raid_disks)) || (mddev->chunk_sectors != le32_to_cpu(sb->chunksize))) return true; return false; } void md_update_sb(struct mddev *mddev, int force_change) { struct md_rdev *rdev; int sync_req; int nospares = 0; int any_badblocks_changed = 0; int ret = -1; if (!md_is_rdwr(mddev)) { if (force_change) set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); return; } repeat: if (mddev_is_clustered(mddev)) { if (test_and_clear_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags)) force_change = 1; if (test_and_clear_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags)) nospares = 1; ret = md_cluster_ops->metadata_update_start(mddev); /* Has someone else has updated the sb */ if (!does_sb_need_changing(mddev)) { if (ret == 0) md_cluster_ops->metadata_update_cancel(mddev); bit_clear_unless(&mddev->sb_flags, BIT(MD_SB_CHANGE_PENDING), BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_CLEAN)); return; } } /* * First make sure individual recovery_offsets are correct * curr_resync_completed can only be used during recovery. * During reshape/resync it might use array-addresses rather * that device addresses. */ rdev_for_each(rdev, mddev) { if (rdev->raid_disk >= 0 && mddev->delta_disks >= 0 && test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) && test_bit(MD_RECOVERY_RECOVER, &mddev->recovery) && !test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && !test_bit(Journal, &rdev->flags) && !test_bit(In_sync, &rdev->flags) && mddev->curr_resync_completed > rdev->recovery_offset) rdev->recovery_offset = mddev->curr_resync_completed; } if (!mddev->persistent) { clear_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags); clear_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); if (!mddev->external) { clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags); rdev_for_each(rdev, mddev) { if (rdev->badblocks.changed) { rdev->badblocks.changed = 0; ack_all_badblocks(&rdev->badblocks); md_error(mddev, rdev); } clear_bit(Blocked, &rdev->flags); clear_bit(BlockedBadBlocks, &rdev->flags); wake_up(&rdev->blocked_wait); } } wake_up(&mddev->sb_wait); return; } spin_lock(&mddev->lock); mddev->utime = ktime_get_real_seconds(); if (test_and_clear_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags)) force_change = 1; if (test_and_clear_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags)) /* just a clean<-> dirty transition, possibly leave spares alone, * though if events isn't the right even/odd, we will have to do * spares after all */ nospares = 1; if (force_change) nospares = 0; if (mddev->degraded) /* If the array is degraded, then skipping spares is both * dangerous and fairly pointless. * Dangerous because a device that was removed from the array * might have a event_count that still looks up-to-date, * so it can be re-added without a resync. * Pointless because if there are any spares to skip, * then a recovery will happen and soon that array won't * be degraded any more and the spare can go back to sleep then. */ nospares = 0; sync_req = mddev->in_sync; /* If this is just a dirty<->clean transition, and the array is clean * and 'events' is odd, we can roll back to the previous clean state */ if (nospares && (mddev->in_sync && mddev->recovery_cp == MaxSector) && mddev->can_decrease_events && mddev->events != 1) { mddev->events--; mddev->can_decrease_events = 0; } else { /* otherwise we have to go forward and ... */ mddev->events ++; mddev->can_decrease_events = nospares; } /* * This 64-bit counter should never wrap. * Either we are in around ~1 trillion A.C., assuming * 1 reboot per second, or we have a bug... */ WARN_ON(mddev->events == 0); rdev_for_each(rdev, mddev) { if (rdev->badblocks.changed) any_badblocks_changed++; if (test_bit(Faulty, &rdev->flags)) set_bit(FaultRecorded, &rdev->flags); } sync_sbs(mddev, nospares); spin_unlock(&mddev->lock); pr_debug("md: updating %s RAID superblock on device (in sync %d)\n", mdname(mddev), mddev->in_sync); mddev_add_trace_msg(mddev, "md md_update_sb"); rewrite: mddev->bitmap_ops->update_sb(mddev->bitmap); rdev_for_each(rdev, mddev) { if (rdev->sb_loaded != 1) continue; /* no noise on spare devices */ if (!test_bit(Faulty, &rdev->flags)) { md_super_write(mddev,rdev, rdev->sb_start, rdev->sb_size, rdev->sb_page); pr_debug("md: (write) %pg's sb offset: %llu\n", rdev->bdev, (unsigned long long)rdev->sb_start); rdev->sb_events = mddev->events; if (rdev->badblocks.size) { md_super_write(mddev, rdev, rdev->badblocks.sector, rdev->badblocks.size << 9, rdev->bb_page); rdev->badblocks.size = 0; } } else pr_debug("md: %pg (skipping faulty)\n", rdev->bdev); } if (md_super_wait(mddev) < 0) goto rewrite; /* if there was a failure, MD_SB_CHANGE_DEVS was set, and we re-write super */ if (mddev_is_clustered(mddev) && ret == 0) md_cluster_ops->metadata_update_finish(mddev); if (mddev->in_sync != sync_req || !bit_clear_unless(&mddev->sb_flags, BIT(MD_SB_CHANGE_PENDING), BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_CLEAN))) /* have to write it out again */ goto repeat; wake_up(&mddev->sb_wait); if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) sysfs_notify_dirent_safe(mddev->sysfs_completed); rdev_for_each(rdev, mddev) { if (test_and_clear_bit(FaultRecorded, &rdev->flags)) clear_bit(Blocked, &rdev->flags); if (any_badblocks_changed) ack_all_badblocks(&rdev->badblocks); clear_bit(BlockedBadBlocks, &rdev->flags); wake_up(&rdev->blocked_wait); } } EXPORT_SYMBOL(md_update_sb); static int add_bound_rdev(struct md_rdev *rdev) { struct mddev *mddev = rdev->mddev; int err = 0; bool add_journal = test_bit(Journal, &rdev->flags); if (!mddev->pers->hot_remove_disk || add_journal) { /* If there is hot_add_disk but no hot_remove_disk * then added disks for geometry changes, * and should be added immediately. */ super_types[mddev->major_version]. validate_super(mddev, NULL/*freshest*/, rdev); err = mddev->pers->hot_add_disk(mddev, rdev); if (err) { md_kick_rdev_from_array(rdev); return err; } } sysfs_notify_dirent_safe(rdev->sysfs_state); set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); if (mddev->degraded) set_bit(MD_RECOVERY_RECOVER, &mddev->recovery); set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); md_new_event(); return 0; } /* words written to sysfs files may, or may not, be \n terminated. * We want to accept with case. For this we use cmd_match. */ static int cmd_match(const char *cmd, const char *str) { /* See if cmd, written into a sysfs file, matches * str. They must either be the same, or cmd can * have a trailing newline */ while (*cmd && *str && *cmd == *str) { cmd++; str++; } if (*cmd == '\n') cmd++; if (*str || *cmd) return 0; return 1; } struct rdev_sysfs_entry { struct attribute attr; ssize_t (*show)(struct md_rdev *, char *); ssize_t (*store)(struct md_rdev *, const char *, size_t); }; static ssize_t state_show(struct md_rdev *rdev, char *page) { char *sep = ","; size_t len = 0; unsigned long flags = READ_ONCE(rdev->flags); if (test_bit(Faulty, &flags) || (!test_bit(ExternalBbl, &flags) && rdev->badblocks.unacked_exist)) len += sprintf(page+len, "faulty%s", sep); if (test_bit(In_sync, &flags)) len += sprintf(page+len, "in_sync%s", sep); if (test_bit(Journal, &flags)) len += sprintf(page+len, "journal%s", sep); if (test_bit(WriteMostly, &flags)) len += sprintf(page+len, "write_mostly%s", sep); if (test_bit(Blocked, &flags) || (rdev->badblocks.unacked_exist && !test_bit(Faulty, &flags))) len += sprintf(page+len, "blocked%s", sep); if (!test_bit(Faulty, &flags) && !test_bit(Journal, &flags) && !test_bit(In_sync, &flags)) len += sprintf(page+len, "spare%s", sep); if (test_bit(WriteErrorSeen, &flags)) len += sprintf(page+len, "write_error%s", sep); if (test_bit(WantReplacement, &flags)) len += sprintf(page+len, "want_replacement%s", sep); if (test_bit(Replacement, &flags)) len += sprintf(page+len, "replacement%s", sep); if (test_bit(ExternalBbl, &flags)) len += sprintf(page+len, "external_bbl%s", sep); if (test_bit(FailFast, &flags)) len += sprintf(page+len, "failfast%s", sep); if (len) len -= strlen(sep); return len+sprintf(page+len, "\n"); } static ssize_t state_store(struct md_rdev *rdev, const char *buf, size_t len) { /* can write * faulty - simulates an error * remove - disconnects the device * writemostly - sets write_mostly * -writemostly - clears write_mostly * blocked - sets the Blocked flags * -blocked - clears the Blocked and possibly simulates an error * insync - sets Insync providing device isn't active * -insync - clear Insync for a device with a slot assigned, * so that it gets rebuilt based on bitmap * write_error - sets WriteErrorSeen * -write_error - clears WriteErrorSeen * {,-}failfast - set/clear FailFast */ struct mddev *mddev = rdev->mddev; int err = -EINVAL; bool need_update_sb = false; if (cmd_match(buf, "faulty") && rdev->mddev->pers) { md_error(rdev->mddev, rdev); if (test_bit(MD_BROKEN, &rdev->mddev->flags)) err = -EBUSY; else err = 0; } else if (cmd_match(buf, "remove")) { if (rdev->mddev->pers) { clear_bit(Blocked, &rdev->flags); remove_and_add_spares(rdev->mddev, rdev); } if (rdev->raid_disk >= 0) err = -EBUSY; else { err = 0; if (mddev_is_clustered(mddev)) err = md_cluster_ops->remove_disk(mddev, rdev); if (err == 0) { md_kick_rdev_from_array(rdev); if (mddev->pers) set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); md_new_event(); } } } else if (cmd_match(buf, "writemostly")) { set_bit(WriteMostly, &rdev->flags); mddev_create_serial_pool(rdev->mddev, rdev); need_update_sb = true; err = 0; } else if (cmd_match(buf, "-writemostly")) { mddev_destroy_serial_pool(rdev->mddev, rdev); clear_bit(WriteMostly, &rdev->flags); need_update_sb = true; err = 0; } else if (cmd_match(buf, "blocked")) { set_bit(Blocked, &rdev->flags); err = 0; } else if (cmd_match(buf, "-blocked")) { if (!test_bit(Faulty, &rdev->flags) && !test_bit(ExternalBbl, &rdev->flags) && rdev->badblocks.unacked_exist) { /* metadata handler doesn't understand badblocks, * so we need to fail the device */ md_error(rdev->mddev, rdev); } clear_bit(Blocked, &rdev->flags); clear_bit(BlockedBadBlocks, &rdev->flags); wake_up(&rdev->blocked_wait); set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery); err = 0; } else if (cmd_match(buf, "insync") && rdev->raid_disk == -1) { set_bit(In_sync, &rdev->flags); err = 0; } else if (cmd_match(buf, "failfast")) { set_bit(FailFast, &rdev->flags); need_update_sb = true; err = 0; } else if (cmd_match(buf, "-failfast")) { clear_bit(FailFast, &rdev->flags); need_update_sb = true; err = 0; } else if (cmd_match(buf, "-insync") && rdev->raid_disk >= 0 && !test_bit(Journal, &rdev->flags)) { if (rdev->mddev->pers == NULL) { clear_bit(In_sync, &rdev->flags); rdev->saved_raid_disk = rdev->raid_disk; rdev->raid_disk = -1; err = 0; } } else if (cmd_match(buf, "write_error")) { set_bit(WriteErrorSeen, &rdev->flags); err = 0; } else if (cmd_match(buf, "-write_error")) { clear_bit(WriteErrorSeen, &rdev->flags); err = 0; } else if (cmd_match(buf, "want_replacement")) { /* Any non-spare device that is not a replacement can * become want_replacement at any time, but we then need to * check if recovery is needed. */ if (rdev->raid_disk >= 0 && !test_bit(Journal, &rdev->flags) && !test_bit(Replacement, &rdev->flags)) set_bit(WantReplacement, &rdev->flags); set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery); err = 0; } else if (cmd_match(buf, "-want_replacement")) { /* Clearing 'want_replacement' is always allowed. * Once replacements starts it is too late though. */ err = 0; clear_bit(WantReplacement, &rdev->flags); } else if (cmd_match(buf, "replacement")) { /* Can only set a device as a replacement when array has not * yet been started. Once running, replacement is automatic * from spares, or by assigning 'slot'. */ if (rdev->mddev->pers) err = -EBUSY; else { set_bit(Replacement, &rdev->flags); err = 0; } } else if (cmd_match(buf, "-replacement")) { /* Similarly, can only clear Replacement before start */ if (rdev->mddev->pers) err = -EBUSY; else { clear_bit(Replacement, &rdev->flags); err = 0; } } else if (cmd_match(buf, "re-add")) { if (!rdev->mddev->pers) err = -EINVAL; else if (test_bit(Faulty, &rdev->flags) && (rdev->raid_disk == -1) && rdev->saved_raid_disk >= 0) { /* clear_bit is performed _after_ all the devices * have their local Faulty bit cleared. If any writes * happen in the meantime in the local node, they * will land in the local bitmap, which will be synced * by this node eventually */ if (!mddev_is_clustered(rdev->mddev) || (err = md_cluster_ops->gather_bitmaps(rdev)) == 0) { clear_bit(Faulty, &rdev->flags); err = add_bound_rdev(rdev); } } else err = -EBUSY; } else if (cmd_match(buf, "external_bbl") && (rdev->mddev->external)) { set_bit(ExternalBbl, &rdev->flags); rdev->badblocks.shift = 0; err = 0; } else if (cmd_match(buf, "-external_bbl") && (rdev->mddev->external)) { clear_bit(ExternalBbl, &rdev->flags); err = 0; } if (need_update_sb) md_update_sb(mddev, 1); if (!err) sysfs_notify_dirent_safe(rdev->sysfs_state); return err ? err : len; } static struct rdev_sysfs_entry rdev_state = __ATTR_PREALLOC(state, S_IRUGO|S_IWUSR, state_show, state_store); static ssize_t errors_show(struct md_rdev *rdev, char *page) { return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors)); } static ssize_t errors_store(struct md_rdev *rdev, const char *buf, size_t len) { unsigned int n; int rv; rv = kstrtouint(buf, 10, &n); if (rv < 0) return rv; atomic_set(&rdev->corrected_errors, n); return len; } static struct rdev_sysfs_entry rdev_errors = __ATTR(errors, S_IRUGO|S_IWUSR, errors_show, errors_store); static ssize_t slot_show(struct md_rdev *rdev, char *page) { if (test_bit(Journal, &rdev->flags)) return sprintf(page, "journal\n"); else if (rdev->raid_disk < 0) return sprintf(page, "none\n"); else return sprintf(page, "%d\n", rdev->raid_disk); } static ssize_t slot_store(struct md_rdev *rdev, const char *buf, size_t len) { int slot; int err; if (test_bit(Journal, &rdev->flags)) return -EBUSY; if (strncmp(buf, "none", 4)==0) slot = -1; else { err = kstrtouint(buf, 10, (unsigned int *)&slot); if (err < 0) return err; if (slot < 0) /* overflow */ return -ENOSPC; } if (rdev->mddev->pers && slot == -1) { /* Setting 'slot' on an active array requires also * updating the 'rd%d' link, and communicating * with the personality with ->hot_*_disk. * For now we only support removing * failed/spare devices. This normally happens automatically, * but not when the metadata is externally managed. */ if (rdev->raid_disk == -1) return -EEXIST; /* personality does all needed checks */ if (rdev->mddev->pers->hot_remove_disk == NULL) return -EINVAL; clear_bit(Blocked, &rdev->flags); remove_and_add_spares(rdev->mddev, rdev); if (rdev->raid_disk >= 0) return -EBUSY; set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery); } else if (rdev->mddev->pers) { /* Activating a spare .. or possibly reactivating * if we ever get bitmaps working here. */ int err; if (rdev->raid_disk != -1) return -EBUSY; if (test_bit(MD_RECOVERY_RUNNING, &rdev->mddev->recovery)) return -EBUSY; if (rdev->mddev->pers->hot_add_disk == NULL) return -EINVAL; if (slot >= rdev->mddev->raid_disks && slot >= rdev->mddev->raid_disks + rdev->mddev->delta_disks) return -ENOSPC; rdev->raid_disk = slot; if (test_bit(In_sync, &rdev->flags)) rdev->saved_raid_disk = slot; else rdev->saved_raid_disk = -1; clear_bit(In_sync, &rdev->flags); clear_bit(Bitmap_sync, &rdev->flags); err = rdev->mddev->pers->hot_add_disk(rdev->mddev, rdev); if (err) { rdev->raid_disk = -1; return err; } else sysfs_notify_dirent_safe(rdev->sysfs_state); /* failure here is OK */; sysfs_link_rdev(rdev->mddev, rdev); /* don't wakeup anyone, leave that to userspace. */ } else { if (slot >= rdev->mddev->raid_disks && slot >= rdev->mddev->raid_disks + rdev->mddev->delta_disks) return -ENOSPC; rdev->raid_disk = slot; /* assume it is working */ clear_bit(Faulty, &rdev->flags); clear_bit(WriteMostly, &rdev->flags); set_bit(In_sync, &rdev->flags); sysfs_notify_dirent_safe(rdev->sysfs_state); } return len; } static struct rdev_sysfs_entry rdev_slot = __ATTR(slot, S_IRUGO|S_IWUSR, slot_show, slot_store); static ssize_t offset_show(struct md_rdev *rdev, char *page) { return sprintf(page, "%llu\n", (unsigned long long)rdev->data_offset); } static ssize_t offset_store(struct md_rdev *rdev, const char *buf, size_t len) { unsigned long long offset; if (kstrtoull(buf, 10, &offset) < 0) return -EINVAL; if (rdev->mddev->pers && rdev->raid_disk >= 0) return -EBUSY; if (rdev->sectors && rdev->mddev->external) /* Must set offset before size, so overlap checks * can be sane */ return -EBUSY; rdev->data_offset = offset; rdev->new_data_offset = offset; return len; } static struct rdev_sysfs_entry rdev_offset = __ATTR(offset, S_IRUGO|S_IWUSR, offset_show, offset_store); static ssize_t new_offset_show(struct md_rdev *rdev, char *page) { return sprintf(page, "%llu\n", (unsigned long long)rdev->new_data_offset); } static ssize_t new_offset_store(struct md_rdev *rdev, const char *buf, size_t len) { unsigned long long new_offset; struct mddev *mddev = rdev->mddev; if (kstrtoull(buf, 10, &new_offset) < 0) return -EINVAL; if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) return -EBUSY; if (new_offset == rdev->data_offset) /* reset is always permitted */ ; else if (new_offset > rdev->data_offset) { /* must not push array size beyond rdev_sectors */ if (new_offset - rdev->data_offset + mddev->dev_sectors > rdev->sectors) return -E2BIG; } /* Metadata worries about other space details. */ /* decreasing the offset is inconsistent with a backwards * reshape. */ if (new_offset < rdev->data_offset && mddev->reshape_backwards) return -EINVAL; /* Increasing offset is inconsistent with forwards * reshape. reshape_direction should be set to * 'backwards' first. */ if (new_offset > rdev->data_offset && !mddev->reshape_backwards) return -EINVAL; if (mddev->pers && mddev->persistent && !super_types[mddev->major_version] .allow_new_offset(rdev, new_offset)) return -E2BIG; rdev->new_data_offset = new_offset; if (new_offset > rdev->data_offset) mddev->reshape_backwards = 1; else if (new_offset < rdev->data_offset) mddev->reshape_backwards = 0; return len; } static struct rdev_sysfs_entry rdev_new_offset = __ATTR(new_offset, S_IRUGO|S_IWUSR, new_offset_show, new_offset_store); static ssize_t rdev_size_show(struct md_rdev *rdev, char *page) { return sprintf(page, "%llu\n", (unsigned long long)rdev->sectors / 2); } static int md_rdevs_overlap(struct md_rdev *a, struct md_rdev *b) { /* check if two start/length pairs overlap */ if (a->data_offset + a->sectors <= b->data_offset) return false; if (b->data_offset + b->sectors <= a->data_offset) return false; return true; } static bool md_rdev_overlaps(struct md_rdev *rdev) { struct mddev *mddev; struct md_rdev *rdev2; spin_lock(&all_mddevs_lock); list_for_each_entry(mddev, &all_mddevs, all_mddevs) { if (test_bit(MD_DELETED, &mddev->flags)) continue; rdev_for_each(rdev2, mddev) { if (rdev != rdev2 && rdev->bdev == rdev2->bdev && md_rdevs_overlap(rdev, rdev2)) { spin_unlock(&all_mddevs_lock); return true; } } } spin_unlock(&all_mddevs_lock); return false; } static int strict_blocks_to_sectors(const char *buf, sector_t *sectors) { unsigned long long blocks; sector_t new; if (kstrtoull(buf, 10, &blocks) < 0) return -EINVAL; if (blocks & 1ULL << (8 * sizeof(blocks) - 1)) return -EINVAL; /* sector conversion overflow */ new = blocks * 2; if (new != blocks * 2) return -EINVAL; /* unsigned long long to sector_t overflow */ *sectors = new; return 0; } static ssize_t rdev_size_store(struct md_rdev *rdev, const char *buf, size_t len) { struct mddev *my_mddev = rdev->mddev; sector_t oldsectors = rdev->sectors; sector_t sectors; if (test_bit(Journal, &rdev->flags)) return -EBUSY; if (strict_blocks_to_sectors(buf, &sectors) < 0) return -EINVAL; if (rdev->data_offset != rdev->new_data_offset) return -EINVAL; /* too confusing */ if (my_mddev->pers && rdev->raid_disk >= 0) { if (my_mddev->persistent) { sectors = super_types[my_mddev->major_version]. rdev_size_change(rdev, sectors); if (!sectors) return -EBUSY; } else if (!sectors) sectors = bdev_nr_sectors(rdev->bdev) - rdev->data_offset; if (!my_mddev->pers->resize) /* Cannot change size for RAID0 or Linear etc */ return -EINVAL; } if (sectors < my_mddev->dev_sectors) return -EINVAL; /* component must fit device */ rdev->sectors = sectors; /* * Check that all other rdevs with the same bdev do not overlap. This * check does not provide a hard guarantee, it just helps avoid * dangerous mistakes. */ if (sectors > oldsectors && my_mddev->external && md_rdev_overlaps(rdev)) { /* * Someone else could have slipped in a size change here, but * doing so is just silly. We put oldsectors back because we * know it is safe, and trust userspace not to race with itself. */ rdev->sectors = oldsectors; return -EBUSY; } return len; } static struct rdev_sysfs_entry rdev_size = __ATTR(size, S_IRUGO|S_IWUSR, rdev_size_show, rdev_size_store); static ssize_t recovery_start_show(struct md_rdev *rdev, char *page) { unsigned long long recovery_start = rdev->recovery_offset; if (test_bit(In_sync, &rdev->flags) || recovery_start == MaxSector) return sprintf(page, "none\n"); return sprintf(page, "%llu\n", recovery_start); } static ssize_t recovery_start_store(struct md_rdev *rdev, const char *buf, size_t len) { unsigned long long recovery_start; if (cmd_match(buf, "none")) recovery_start = MaxSector; else if (kstrtoull(buf, 10, &recovery_start)) return -EINVAL; if (rdev->mddev->pers && rdev->raid_disk >= 0) return -EBUSY; rdev->recovery_offset = recovery_start; if (recovery_start == MaxSector) set_bit(In_sync, &rdev->flags); else clear_bit(In_sync, &rdev->flags); return len; } static struct rdev_sysfs_entry rdev_recovery_start = __ATTR(recovery_start, S_IRUGO|S_IWUSR, recovery_start_show, recovery_start_store); /* sysfs access to bad-blocks list. * We present two files. * 'bad-blocks' lists sector numbers and lengths of ranges that * are recorded as bad. The list is truncated to fit within * the one-page limit of sysfs. * Writing "sector length" to this file adds an acknowledged * bad block list. * 'unacknowledged-bad-blocks' lists bad blocks that have not yet * been acknowledged. Writing to this file adds bad blocks * without acknowledging them. This is largely for testing. */ static ssize_t bb_show(struct md_rdev *rdev, char *page) { return badblocks_show(&rdev->badblocks, page, 0); } static ssize_t bb_store(struct md_rdev *rdev, const char *page, size_t len) { int rv = badblocks_store(&rdev->badblocks, page, len, 0); /* Maybe that ack was all we needed */ if (test_and_clear_bit(BlockedBadBlocks, &rdev->flags)) wake_up(&rdev->blocked_wait); return rv; } static struct rdev_sysfs_entry rdev_bad_blocks = __ATTR(bad_blocks, S_IRUGO|S_IWUSR, bb_show, bb_store); static ssize_t ubb_show(struct md_rdev *rdev, char *page) { return badblocks_show(&rdev->badblocks, page, 1); } static ssize_t ubb_store(struct md_rdev *rdev, const char *page, size_t len) { return badblocks_store(&rdev->badblocks, page, len, 1); } static struct rdev_sysfs_entry rdev_unack_bad_blocks = __ATTR(unacknowledged_bad_blocks, S_IRUGO|S_IWUSR, ubb_show, ubb_store); static ssize_t ppl_sector_show(struct md_rdev *rdev, char *page) { return sprintf(page, "%llu\n", (unsigned long long)rdev->ppl.sector); } static ssize_t ppl_sector_store(struct md_rdev *rdev, const char *buf, size_t len) { unsigned long long sector; if (kstrtoull(buf, 10, &sector) < 0) return -EINVAL; if (sector != (sector_t)sector) return -EINVAL; if (rdev->mddev->pers && test_bit(MD_HAS_PPL, &rdev->mddev->flags) && rdev->raid_disk >= 0) return -EBUSY; if (rdev->mddev->persistent) { if (rdev->mddev->major_version == 0) return -EINVAL; if ((sector > rdev->sb_start && sector - rdev->sb_start > S16_MAX) || (sector < rdev->sb_start && rdev->sb_start - sector > -S16_MIN)) return -EINVAL; rdev->ppl.offset = sector - rdev->sb_start; } else if (!rdev->mddev->external) { return -EBUSY; } rdev->ppl.sector = sector; return len; } static struct rdev_sysfs_entry rdev_ppl_sector = __ATTR(ppl_sector, S_IRUGO|S_IWUSR, ppl_sector_show, ppl_sector_store); static ssize_t ppl_size_show(struct md_rdev *rdev, char *page) { return sprintf(page, "%u\n", rdev->ppl.size); } static ssize_t ppl_size_store(struct md_rdev *rdev, const char *buf, size_t len) { unsigned int size; if (kstrtouint(buf, 10, &size) < 0) return -EINVAL; if (rdev->mddev->pers && test_bit(MD_HAS_PPL, &rdev->mddev->flags) && rdev->raid_disk >= 0) return -EBUSY; if (rdev->mddev->persistent) { if (rdev->mddev->major_version == 0) return -EINVAL; if (size > U16_MAX) return -EINVAL; } else if (!rdev->mddev->external) { return -EBUSY; } rdev->ppl.size = size; return len; } static struct rdev_sysfs_entry rdev_ppl_size = __ATTR(ppl_size, S_IRUGO|S_IWUSR, ppl_size_show, ppl_size_store); static struct attribute *rdev_default_attrs[] = { &rdev_state.attr, &rdev_errors.attr, &rdev_slot.attr, &rdev_offset.attr, &rdev_new_offset.attr, &rdev_size.attr, &rdev_recovery_start.attr, &rdev_bad_blocks.attr, &rdev_unack_bad_blocks.attr, &rdev_ppl_sector.attr, &rdev_ppl_size.attr, NULL, }; ATTRIBUTE_GROUPS(rdev_default); static ssize_t rdev_attr_show(struct kobject *kobj, struct attribute *attr, char *page) { struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr); struct md_rdev *rdev = container_of(kobj, struct md_rdev, kobj); if (!entry->show) return -EIO; if (!rdev->mddev) return -ENODEV; return entry->show(rdev, page); } static ssize_t rdev_attr_store(struct kobject *kobj, struct attribute *attr, const char *page, size_t length) { struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr); struct md_rdev *rdev = container_of(kobj, struct md_rdev, kobj); struct kernfs_node *kn = NULL; bool suspend = false; ssize_t rv; struct mddev *mddev = READ_ONCE(rdev->mddev); if (!entry->store) return -EIO; if (!capable(CAP_SYS_ADMIN)) return -EACCES; if (!mddev) return -ENODEV; if (entry->store == state_store) { if (cmd_match(page, "remove")) kn = sysfs_break_active_protection(kobj, attr); if (cmd_match(page, "remove") || cmd_match(page, "re-add") || cmd_match(page, "writemostly") || cmd_match(page, "-writemostly")) suspend = true; } rv = suspend ? mddev_suspend_and_lock(mddev) : mddev_lock(mddev); if (!rv) { if (rdev->mddev == NULL) rv = -ENODEV; else rv = entry->store(rdev, page, length); suspend ? mddev_unlock_and_resume(mddev) : mddev_unlock(mddev); } if (kn) sysfs_unbreak_active_protection(kn); return rv; } static void rdev_free(struct kobject *ko) { struct md_rdev *rdev = container_of(ko, struct md_rdev, kobj); kfree(rdev); } static const struct sysfs_ops rdev_sysfs_ops = { .show = rdev_attr_show, .store = rdev_attr_store, }; static const struct kobj_type rdev_ktype = { .release = rdev_free, .sysfs_ops = &rdev_sysfs_ops, .default_groups = rdev_default_groups, }; int md_rdev_init(struct md_rdev *rdev) { rdev->desc_nr = -1; rdev->saved_raid_disk = -1; rdev->raid_disk = -1; rdev->flags = 0; rdev->data_offset = 0; rdev->new_data_offset = 0; rdev->sb_events = 0; rdev->last_read_error = 0; rdev->sb_loaded = 0; rdev->bb_page = NULL; atomic_set(&rdev->nr_pending, 0); atomic_set(&rdev->read_errors, 0); atomic_set(&rdev->corrected_errors, 0); INIT_LIST_HEAD(&rdev->same_set); init_waitqueue_head(&rdev->blocked_wait); /* Add space to store bad block list. * This reserves the space even on arrays where it cannot * be used - I wonder if that matters */ return badblocks_init(&rdev->badblocks, 0); } EXPORT_SYMBOL_GPL(md_rdev_init); /* * Import a device. If 'super_format' >= 0, then sanity check the superblock * * mark the device faulty if: * * - the device is nonexistent (zero size) * - the device has no valid superblock * * a faulty rdev _never_ has rdev->sb set. */ static struct md_rdev *md_import_device(dev_t newdev, int super_format, int super_minor) { struct md_rdev *rdev; sector_t size; int err; rdev = kzalloc(sizeof(*rdev), GFP_KERNEL); if (!rdev) return ERR_PTR(-ENOMEM); err = md_rdev_init(rdev); if (err) goto out_free_rdev; err = alloc_disk_sb(rdev); if (err) goto out_clear_rdev; rdev->bdev_file = bdev_file_open_by_dev(newdev, BLK_OPEN_READ | BLK_OPEN_WRITE, super_format == -2 ? &claim_rdev : rdev, NULL); if (IS_ERR(rdev->bdev_file)) { pr_warn("md: could not open device unknown-block(%u,%u).\n", MAJOR(newdev), MINOR(newdev)); err = PTR_ERR(rdev->bdev_file); goto out_clear_rdev; } rdev->bdev = file_bdev(rdev->bdev_file); kobject_init(&rdev->kobj, &rdev_ktype); size = bdev_nr_bytes(rdev->bdev) >> BLOCK_SIZE_BITS; if (!size) { pr_warn("md: %pg has zero or unknown size, marking faulty!\n", rdev->bdev); err = -EINVAL; goto out_blkdev_put; } if (super_format >= 0) { err = super_types[super_format]. load_super(rdev, NULL, super_minor); if (err == -EINVAL) { pr_warn("md: %pg does not have a valid v%d.%d superblock, not importing!\n", rdev->bdev, super_format, super_minor); goto out_blkdev_put; } if (err < 0) { pr_warn("md: could not read %pg's sb, not importing!\n", rdev->bdev); goto out_blkdev_put; } } return rdev; out_blkdev_put: fput(rdev->bdev_file); out_clear_rdev: md_rdev_clear(rdev); out_free_rdev: kfree(rdev); return ERR_PTR(err); } /* * Check a full RAID array for plausibility */ static int analyze_sbs(struct mddev *mddev) { int i; struct md_rdev *rdev, *freshest, *tmp; freshest = NULL; rdev_for_each_safe(rdev, tmp, mddev) switch (super_types[mddev->major_version]. load_super(rdev, freshest, mddev->minor_version)) { case 1: freshest = rdev; break; case 0: break; default: pr_warn("md: fatal superblock inconsistency in %pg -- removing from array\n", rdev->bdev); md_kick_rdev_from_array(rdev); } /* Cannot find a valid fresh disk */ if (!freshest) { pr_warn("md: cannot find a valid disk\n"); return -EINVAL; } super_types[mddev->major_version]. validate_super(mddev, NULL/*freshest*/, freshest); i = 0; rdev_for_each_safe(rdev, tmp, mddev) { if (mddev->max_disks && (rdev->desc_nr >= mddev->max_disks || i > mddev->max_disks)) { pr_warn("md: %s: %pg: only %d devices permitted\n", mdname(mddev), rdev->bdev, mddev->max_disks); md_kick_rdev_from_array(rdev); continue; } if (rdev != freshest) { if (super_types[mddev->major_version]. validate_super(mddev, freshest, rdev)) { pr_warn("md: kicking non-fresh %pg from array!\n", rdev->bdev); md_kick_rdev_from_array(rdev); continue; } } if (rdev->raid_disk >= (mddev->raid_disks - min(0, mddev->delta_disks)) && !test_bit(Journal, &rdev->flags)) { rdev->raid_disk = -1; clear_bit(In_sync, &rdev->flags); } } return 0; } /* Read a fixed-point number. * Numbers in sysfs attributes should be in "standard" units where * possible, so time should be in seconds. * However we internally use a a much smaller unit such as * milliseconds or jiffies. * This function takes a decimal number with a possible fractional * component, and produces an integer which is the result of * multiplying that number by 10^'scale'. * all without any floating-point arithmetic. */ int strict_strtoul_scaled(const char *cp, unsigned long *res, int scale) { unsigned long result = 0; long decimals = -1; while (isdigit(*cp) || (*cp == '.' && decimals < 0)) { if (*cp == '.') decimals = 0; else if (decimals < scale) { unsigned int value; value = *cp - '0'; result = result * 10 + value; if (decimals >= 0) decimals++; } cp++; } if (*cp == '\n') cp++; if (*cp) return -EINVAL; if (decimals < 0) decimals = 0; *res = result * int_pow(10, scale - decimals); return 0; } static ssize_t safe_delay_show(struct mddev *mddev, char *page) { unsigned int msec = ((unsigned long)mddev->safemode_delay*1000)/HZ; return sprintf(page, "%u.%03u\n", msec/1000, msec%1000); } static ssize_t safe_delay_store(struct mddev *mddev, const char *cbuf, size_t len) { unsigned long msec; if (mddev_is_clustered(mddev)) { pr_warn("md: Safemode is disabled for clustered mode\n"); return -EINVAL; } if (strict_strtoul_scaled(cbuf, &msec, 3) < 0 || msec > UINT_MAX / HZ) return -EINVAL; if (msec == 0) mddev->safemode_delay = 0; else { unsigned long old_delay = mddev->safemode_delay; unsigned long new_delay = (msec*HZ)/1000; if (new_delay == 0) new_delay = 1; mddev->safemode_delay = new_delay; if (new_delay < old_delay || old_delay == 0) mod_timer(&mddev->safemode_timer, jiffies+1); } return len; } static struct md_sysfs_entry md_safe_delay = __ATTR(safe_mode_delay, S_IRUGO|S_IWUSR,safe_delay_show, safe_delay_store); static ssize_t level_show(struct mddev *mddev, char *page) { struct md_personality *p; int ret; spin_lock(&mddev->lock); p = mddev->pers; if (p) ret = sprintf(page, "%s\n", p->name); else if (mddev->clevel[0]) ret = sprintf(page, "%s\n", mddev->clevel); else if (mddev->level != LEVEL_NONE) ret = sprintf(page, "%d\n", mddev->level); else ret = 0; spin_unlock(&mddev->lock); return ret; } static ssize_t level_store(struct mddev *mddev, const char *buf, size_t len) { char clevel[16]; ssize_t rv; size_t slen = len; struct md_personality *pers, *oldpers; long level; void *priv, *oldpriv; struct md_rdev *rdev; if (slen == 0 || slen >= sizeof(clevel)) return -EINVAL; rv = mddev_suspend_and_lock(mddev); if (rv) return rv; if (mddev->pers == NULL) { memcpy(mddev->clevel, buf, slen); if (mddev->clevel[slen-1] == '\n') slen--; mddev->clevel[slen] = 0; mddev->level = LEVEL_NONE; rv = len; goto out_unlock; } rv = -EROFS; if (!md_is_rdwr(mddev)) goto out_unlock; /* request to change the personality. Need to ensure: * - array is not engaged in resync/recovery/reshape * - old personality can be suspended * - new personality will access other array. */ rv = -EBUSY; if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) || mddev->reshape_position != MaxSector || mddev->sysfs_active) goto out_unlock; rv = -EINVAL; if (!mddev->pers->quiesce) { pr_warn("md: %s: %s does not support online personality change\n", mdname(mddev), mddev->pers->name); goto out_unlock; } /* Now find the new personality */ memcpy(clevel, buf, slen); if (clevel[slen-1] == '\n') slen--; clevel[slen] = 0; if (kstrtol(clevel, 10, &level)) level = LEVEL_NONE; if (request_module("md-%s", clevel) != 0) request_module("md-level-%s", clevel); spin_lock(&pers_lock); pers = find_pers(level, clevel); if (!pers || !try_module_get(pers->owner)) { spin_unlock(&pers_lock); pr_warn("md: personality %s not loaded\n", clevel); rv = -EINVAL; goto out_unlock; } spin_unlock(&pers_lock); if (pers == mddev->pers) { /* Nothing to do! */ module_put(pers->owner); rv = len; goto out_unlock; } if (!pers->takeover) { module_put(pers->owner); pr_warn("md: %s: %s does not support personality takeover\n", mdname(mddev), clevel); rv = -EINVAL; goto out_unlock; } rdev_for_each(rdev, mddev) rdev->new_raid_disk = rdev->raid_disk; /* ->takeover must set new_* and/or delta_disks * if it succeeds, and may set them when it fails. */ priv = pers->takeover(mddev); if (IS_ERR(priv)) { mddev->new_level = mddev->level; mddev->new_layout = mddev->layout; mddev->new_chunk_sectors = mddev->chunk_sectors; mddev->raid_disks -= mddev->delta_disks; mddev->delta_disks = 0; mddev->reshape_backwards = 0; module_put(pers->owner); pr_warn("md: %s: %s would not accept array\n", mdname(mddev), clevel); rv = PTR_ERR(priv); goto out_unlock; } /* Looks like we have a winner */ mddev_detach(mddev); spin_lock(&mddev->lock); oldpers = mddev->pers; oldpriv = mddev->private; mddev->pers = pers; mddev->private = priv; strscpy(mddev->clevel, pers->name, sizeof(mddev->clevel)); mddev->level = mddev->new_level; mddev->layout = mddev->new_layout; mddev->chunk_sectors = mddev->new_chunk_sectors; mddev->delta_disks = 0; mddev->reshape_backwards = 0; mddev->degraded = 0; spin_unlock(&mddev->lock); if (oldpers->sync_request == NULL && mddev->external) { /* We are converting from a no-redundancy array * to a redundancy array and metadata is managed * externally so we need to be sure that writes * won't block due to a need to transition * clean->dirty * until external management is started. */ mddev->in_sync = 0; mddev->safemode_delay = 0; mddev->safemode = 0; } oldpers->free(mddev, oldpriv); if (oldpers->sync_request == NULL && pers->sync_request != NULL) { /* need to add the md_redundancy_group */ if (sysfs_create_group(&mddev->kobj, &md_redundancy_group)) pr_warn("md: cannot register extra attributes for %s\n", mdname(mddev)); mddev->sysfs_action = sysfs_get_dirent(mddev->kobj.sd, "sync_action"); mddev->sysfs_completed = sysfs_get_dirent_safe(mddev->kobj.sd, "sync_completed"); mddev->sysfs_degraded = sysfs_get_dirent_safe(mddev->kobj.sd, "degraded"); } if (oldpers->sync_request != NULL && pers->sync_request == NULL) { /* need to remove the md_redundancy_group */ if (mddev->to_remove == NULL) mddev->to_remove = &md_redundancy_group; } module_put(oldpers->owner); rdev_for_each(rdev, mddev) { if (rdev->raid_disk < 0) continue; if (rdev->new_raid_disk >= mddev->raid_disks) rdev->new_raid_disk = -1; if (rdev->new_raid_disk == rdev->raid_disk) continue; sysfs_unlink_rdev(mddev, rdev); } rdev_for_each(rdev, mddev) { if (rdev->raid_disk < 0) continue; if (rdev->new_raid_disk == rdev->raid_disk) continue; rdev->raid_disk = rdev->new_raid_disk; if (rdev->raid_disk < 0) clear_bit(In_sync, &rdev->flags); else { if (sysfs_link_rdev(mddev, rdev)) pr_warn("md: cannot register rd%d for %s after level change\n", rdev->raid_disk, mdname(mddev)); } } if (pers->sync_request == NULL) { /* this is now an array without redundancy, so * it must always be in_sync */ mddev->in_sync = 1; del_timer_sync(&mddev->safemode_timer); } pers->run(mddev); set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); if (!mddev->thread) md_update_sb(mddev, 1); sysfs_notify_dirent_safe(mddev->sysfs_level); md_new_event(); rv = len; out_unlock: mddev_unlock_and_resume(mddev); return rv; } static struct md_sysfs_entry md_level = __ATTR(level, S_IRUGO|S_IWUSR, level_show, level_store); static ssize_t new_level_show(struct mddev *mddev, char *page) { return sprintf(page, "%d\n", mddev->new_level); } static ssize_t new_level_store(struct mddev *mddev, const char *buf, size_t len) { unsigned int n; int err; err = kstrtouint(buf, 10, &n); if (err < 0) return err; err = mddev_lock(mddev); if (err) return err; mddev->new_level = n; md_update_sb(mddev, 1); mddev_unlock(mddev); return len; } static struct md_sysfs_entry md_new_level = __ATTR(new_level, 0664, new_level_show, new_level_store); static ssize_t layout_show(struct mddev *mddev, char *page) { /* just a number, not meaningful for all levels */ if (mddev->reshape_position != MaxSector && mddev->layout != mddev->new_layout) return sprintf(page, "%d (%d)\n", mddev->new_layout, mddev->layout); return sprintf(page, "%d\n", mddev->layout); } static ssize_t layout_store(struct mddev *mddev, const char *buf, size_t len) { unsigned int n; int err; err = kstrtouint(buf, 10, &n); if (err < 0) return err; err = mddev_lock(mddev); if (err) return err; if (mddev->pers) { if (mddev->pers->check_reshape == NULL) err = -EBUSY; else if (!md_is_rdwr(mddev)) err = -EROFS; else { mddev->new_layout = n; err = mddev->pers->check_reshape(mddev); if (err) mddev->new_layout = mddev->layout; } } else { mddev->new_layout = n; if (mddev->reshape_position == MaxSector) mddev->layout = n; } mddev_unlock(mddev); return err ?: len; } static struct md_sysfs_entry md_layout = __ATTR(layout, S_IRUGO|S_IWUSR, layout_show, layout_store); static ssize_t raid_disks_show(struct mddev *mddev, char *page) { if (mddev->raid_disks == 0) return 0; if (mddev->reshape_position != MaxSector && mddev->delta_disks != 0) return sprintf(page, "%d (%d)\n", mddev->raid_disks, mddev->raid_disks - mddev->delta_disks); return sprintf(page, "%d\n", mddev->raid_disks); } static int update_raid_disks(struct mddev *mddev, int raid_disks); static ssize_t raid_disks_store(struct mddev *mddev, const char *buf, size_t len) { unsigned int n; int err; err = kstrtouint(buf, 10, &n); if (err < 0) return err; err = mddev_lock(mddev); if (err) return err; if (mddev->pers) err = update_raid_disks(mddev, n); else if (mddev->reshape_position != MaxSector) { struct md_rdev *rdev; int olddisks = mddev->raid_disks - mddev->delta_disks; err = -EINVAL; rdev_for_each(rdev, mddev) { if (olddisks < n && rdev->data_offset < rdev->new_data_offset) goto out_unlock; if (olddisks > n && rdev->data_offset > rdev->new_data_offset) goto out_unlock; } err = 0; mddev->delta_disks = n - olddisks; mddev->raid_disks = n; mddev->reshape_backwards = (mddev->delta_disks < 0); } else mddev->raid_disks = n; out_unlock: mddev_unlock(mddev); return err ? err : len; } static struct md_sysfs_entry md_raid_disks = __ATTR(raid_disks, S_IRUGO|S_IWUSR, raid_disks_show, raid_disks_store); static ssize_t uuid_show(struct mddev *mddev, char *page) { return sprintf(page, "%pU\n", mddev->uuid); } static struct md_sysfs_entry md_uuid = __ATTR(uuid, S_IRUGO, uuid_show, NULL); static ssize_t chunk_size_show(struct mddev *mddev, char *page) { if (mddev->reshape_position != MaxSector && mddev->chunk_sectors != mddev->new_chunk_sectors) return sprintf(page, "%d (%d)\n", mddev->new_chunk_sectors << 9, mddev->chunk_sectors << 9); return sprintf(page, "%d\n", mddev->chunk_sectors << 9); } static ssize_t chunk_size_store(struct mddev *mddev, const char *buf, size_t len) { unsigned long n; int err; err = kstrtoul(buf, 10, &n); if (err < 0) return err; err = mddev_lock(mddev); if (err) return err; if (mddev->pers) { if (mddev->pers->check_reshape == NULL) err = -EBUSY; else if (!md_is_rdwr(mddev)) err = -EROFS; else { mddev->new_chunk_sectors = n >> 9; err = mddev->pers->check_reshape(mddev); if (err) mddev->new_chunk_sectors = mddev->chunk_sectors; } } else { mddev->new_chunk_sectors = n >> 9; if (mddev->reshape_position == MaxSector) mddev->chunk_sectors = n >> 9; } mddev_unlock(mddev); return err ?: len; } static struct md_sysfs_entry md_chunk_size = __ATTR(chunk_size, S_IRUGO|S_IWUSR, chunk_size_show, chunk_size_store); static ssize_t resync_start_show(struct mddev *mddev, char *page) { if (mddev->recovery_cp == MaxSector) return sprintf(page, "none\n"); return sprintf(page, "%llu\n", (unsigned long long)mddev->recovery_cp); } static ssize_t resync_start_store(struct mddev *mddev, const char *buf, size_t len) { unsigned long long n; int err; if (cmd_match(buf, "none")) n = MaxSector; else { err = kstrtoull(buf, 10, &n); if (err < 0) return err; if (n != (sector_t)n) return -EINVAL; } err = mddev_lock(mddev); if (err) return err; if (mddev->pers && !test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) err = -EBUSY; if (!err) { mddev->recovery_cp = n; if (mddev->pers) set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags); } mddev_unlock(mddev); return err ?: len; } static struct md_sysfs_entry md_resync_start = __ATTR_PREALLOC(resync_start, S_IRUGO|S_IWUSR, resync_start_show, resync_start_store); /* * The array state can be: * * clear * No devices, no size, no level * Equivalent to STOP_ARRAY ioctl * inactive * May have some settings, but array is not active * all IO results in error * When written, doesn't tear down array, but just stops it * suspended (not supported yet) * All IO requests will block. The array can be reconfigured. * Writing this, if accepted, will block until array is quiescent * readonly * no resync can happen. no superblocks get written. * write requests fail * read-auto * like readonly, but behaves like 'clean' on a write request. * * clean - no pending writes, but otherwise active. * When written to inactive array, starts without resync * If a write request arrives then * if metadata is known, mark 'dirty' and switch to 'active'. * if not known, block and switch to write-pending * If written to an active array that has pending writes, then fails. * active * fully active: IO and resync can be happening. * When written to inactive array, starts with resync * * write-pending * clean, but writes are blocked waiting for 'active' to be written. * * active-idle * like active, but no writes have been seen for a while (100msec). * * broken * Array is failed. It's useful because mounted-arrays aren't stopped * when array is failed, so this state will at least alert the user that * something is wrong. */ enum array_state { clear, inactive, suspended, readonly, read_auto, clean, active, write_pending, active_idle, broken, bad_word}; static char *array_states[] = { "clear", "inactive", "suspended", "readonly", "read-auto", "clean", "active", "write-pending", "active-idle", "broken", NULL }; static int match_word(const char *word, char **list) { int n; for (n=0; list[n]; n++) if (cmd_match(word, list[n])) break; return n; } static ssize_t array_state_show(struct mddev *mddev, char *page) { enum array_state st = inactive; if (mddev->pers && !test_bit(MD_NOT_READY, &mddev->flags)) { switch(mddev->ro) { case MD_RDONLY: st = readonly; break; case MD_AUTO_READ: st = read_auto; break; case MD_RDWR: spin_lock(&mddev->lock); if (test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) st = write_pending; else if (mddev->in_sync) st = clean; else if (mddev->safemode) st = active_idle; else st = active; spin_unlock(&mddev->lock); } if (test_bit(MD_BROKEN, &mddev->flags) && st == clean) st = broken; } else { if (list_empty(&mddev->disks) && mddev->raid_disks == 0 && mddev->dev_sectors == 0) st = clear; else st = inactive; } return sprintf(page, "%s\n", array_states[st]); } static int do_md_stop(struct mddev *mddev, int ro); static int md_set_readonly(struct mddev *mddev); static int restart_array(struct mddev *mddev); static ssize_t array_state_store(struct mddev *mddev, const char *buf, size_t len) { int err = 0; enum array_state st = match_word(buf, array_states); /* No lock dependent actions */ switch (st) { case suspended: /* not supported yet */ case write_pending: /* cannot be set */ case active_idle: /* cannot be set */ case broken: /* cannot be set */ case bad_word: return -EINVAL; case clear: case readonly: case inactive: case read_auto: if (!mddev->pers || !md_is_rdwr(mddev)) break; /* write sysfs will not open mddev and opener should be 0 */ err = mddev_set_closing_and_sync_blockdev(mddev, 0); if (err) return err; break; default: break; } if (mddev->pers && (st == active || st == clean) && mddev->ro != MD_RDONLY) { /* don't take reconfig_mutex when toggling between * clean and active */ spin_lock(&mddev->lock); if (st == active) { restart_array(mddev); clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags); md_wakeup_thread(mddev->thread); wake_up(&mddev->sb_wait); } else /* st == clean */ { restart_array(mddev); if (!set_in_sync(mddev)) err = -EBUSY; } if (!err) sysfs_notify_dirent_safe(mddev->sysfs_state); spin_unlock(&mddev->lock); return err ?: len; } err = mddev_lock(mddev); if (err) return err; switch (st) { case inactive: /* stop an active array, return 0 otherwise */ if (mddev->pers) err = do_md_stop(mddev, 2); break; case clear: err = do_md_stop(mddev, 0); break; case readonly: if (mddev->pers) err = md_set_readonly(mddev); else { mddev->ro = MD_RDONLY; set_disk_ro(mddev->gendisk, 1); err = do_md_run(mddev); } break; case read_auto: if (mddev->pers) { if (md_is_rdwr(mddev)) err = md_set_readonly(mddev); else if (mddev->ro == MD_RDONLY) err = restart_array(mddev); if (err == 0) { mddev->ro = MD_AUTO_READ; set_disk_ro(mddev->gendisk, 0); } } else { mddev->ro = MD_AUTO_READ; err = do_md_run(mddev); } break; case clean: if (mddev->pers) { err = restart_array(mddev); if (err) break; spin_lock(&mddev->lock); if (!set_in_sync(mddev)) err = -EBUSY; spin_unlock(&mddev->lock); } else err = -EINVAL; break; case active: if (mddev->pers) { err = restart_array(mddev); if (err) break; clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags); wake_up(&mddev->sb_wait); err = 0; } else { mddev->ro = MD_RDWR; set_disk_ro(mddev->gendisk, 0); err = do_md_run(mddev); } break; default: err = -EINVAL; break; } if (!err) { if (mddev->hold_active == UNTIL_IOCTL) mddev->hold_active = 0; sysfs_notify_dirent_safe(mddev->sysfs_state); } mddev_unlock(mddev); if (st == readonly || st == read_auto || st == inactive || (err && st == clear)) clear_bit(MD_CLOSING, &mddev->flags); return err ?: len; } static struct md_sysfs_entry md_array_state = __ATTR_PREALLOC(array_state, S_IRUGO|S_IWUSR, array_state_show, array_state_store); static ssize_t max_corrected_read_errors_show(struct mddev *mddev, char *page) { return sprintf(page, "%d\n", atomic_read(&mddev->max_corr_read_errors)); } static ssize_t max_corrected_read_errors_store(struct mddev *mddev, const char *buf, size_t len) { unsigned int n; int rv; rv = kstrtouint(buf, 10, &n); if (rv < 0) return rv; if (n > INT_MAX) return -EINVAL; atomic_set(&mddev->max_corr_read_errors, n); return len; } static struct md_sysfs_entry max_corr_read_errors = __ATTR(max_read_errors, S_IRUGO|S_IWUSR, max_corrected_read_errors_show, max_corrected_read_errors_store); static ssize_t null_show(struct mddev *mddev, char *page) { return -EINVAL; } static ssize_t new_dev_store(struct mddev *mddev, const char *buf, size_t len) { /* buf must be %d:%d\n? giving major and minor numbers */ /* The new device is added to the array. * If the array has a persistent superblock, we read the * superblock to initialise info and check validity. * Otherwise, only checking done is that in bind_rdev_to_array, * which mainly checks size. */ char *e; int major = simple_strtoul(buf, &e, 10); int minor; dev_t dev; struct md_rdev *rdev; int err; if (!*buf || *e != ':' || !e[1] || e[1] == '\n') return -EINVAL; minor = simple_strtoul(e+1, &e, 10); if (*e && *e != '\n') return -EINVAL; dev = MKDEV(major, minor); if (major != MAJOR(dev) || minor != MINOR(dev)) return -EOVERFLOW; err = mddev_suspend_and_lock(mddev); if (err) return err; if (mddev->persistent) { rdev = md_import_device(dev, mddev->major_version, mddev->minor_version); if (!IS_ERR(rdev) && !list_empty(&mddev->disks)) { struct md_rdev *rdev0 = list_entry(mddev->disks.next, struct md_rdev, same_set); err = super_types[mddev->major_version] .load_super(rdev, rdev0, mddev->minor_version); if (err < 0) goto out; } } else if (mddev->external) rdev = md_import_device(dev, -2, -1); else rdev = md_import_device(dev, -1, -1); if (IS_ERR(rdev)) { mddev_unlock_and_resume(mddev); return PTR_ERR(rdev); } err = bind_rdev_to_array(rdev, mddev); out: if (err) export_rdev(rdev, mddev); mddev_unlock_and_resume(mddev); if (!err) md_new_event(); return err ? err : len; } static struct md_sysfs_entry md_new_device = __ATTR(new_dev, S_IWUSR, null_show, new_dev_store); static ssize_t bitmap_store(struct mddev *mddev, const char *buf, size_t len) { char *end; unsigned long chunk, end_chunk; int err; err = mddev_lock(mddev); if (err) return err; if (!mddev->bitmap) goto out; /* buf should be <chunk> <chunk> ... or <chunk>-<chunk> ... (range) */ while (*buf) { chunk = end_chunk = simple_strtoul(buf, &end, 0); if (buf == end) break; if (*end == '-') { /* range */ buf = end + 1; end_chunk = simple_strtoul(buf, &end, 0); if (buf == end) break; } if (*end && !isspace(*end)) break; mddev->bitmap_ops->dirty_bits(mddev, chunk, end_chunk); buf = skip_spaces(end); } mddev->bitmap_ops->unplug(mddev, true); /* flush the bits to disk */ out: mddev_unlock(mddev); return len; } static struct md_sysfs_entry md_bitmap = __ATTR(bitmap_set_bits, S_IWUSR, null_show, bitmap_store); static ssize_t size_show(struct mddev *mddev, char *page) { return sprintf(page, "%llu\n", (unsigned long long)mddev->dev_sectors / 2); } static int update_size(struct mddev *mddev, sector_t num_sectors); static ssize_t size_store(struct mddev *mddev, const char *buf, size_t len) { /* If array is inactive, we can reduce the component size, but * not increase it (except from 0). * If array is active, we can try an on-line resize */ sector_t sectors; int err = strict_blocks_to_sectors(buf, &sectors); if (err < 0) return err; err = mddev_lock(mddev); if (err) return err; if (mddev->pers) { err = update_size(mddev, sectors); if (err == 0) md_update_sb(mddev, 1); } else { if (mddev->dev_sectors == 0 || mddev->dev_sectors > sectors) mddev->dev_sectors = sectors; else err = -ENOSPC; } mddev_unlock(mddev); return err ? err : len; } static struct md_sysfs_entry md_size = __ATTR(component_size, S_IRUGO|S_IWUSR, size_show, size_store); /* Metadata version. * This is one of * 'none' for arrays with no metadata (good luck...) * 'external' for arrays with externally managed metadata, * or N.M for internally known formats */ static ssize_t metadata_show(struct mddev *mddev, char *page) { if (mddev->persistent) return sprintf(page, "%d.%d\n", mddev->major_version, mddev->minor_version); else if (mddev->external) return sprintf(page, "external:%s\n", mddev->metadata_type); else return sprintf(page, "none\n"); } static ssize_t metadata_store(struct mddev *mddev, const char *buf, size_t len) { int major, minor; char *e; int err; /* Changing the details of 'external' metadata is * always permitted. Otherwise there must be * no devices attached to the array. */ err = mddev_lock(mddev); if (err) return err; err = -EBUSY; if (mddev->external && strncmp(buf, "external:", 9) == 0) ; else if (!list_empty(&mddev->disks)) goto out_unlock; err = 0; if (cmd_match(buf, "none")) { mddev->persistent = 0; mddev->external = 0; mddev->major_version = 0; mddev->minor_version = 90; goto out_unlock; } if (strncmp(buf, "external:", 9) == 0) { size_t namelen = len-9; if (namelen >= sizeof(mddev->metadata_type)) namelen = sizeof(mddev->metadata_type)-1; memcpy(mddev->metadata_type, buf+9, namelen); mddev->metadata_type[namelen] = 0; if (namelen && mddev->metadata_type[namelen-1] == '\n') mddev->metadata_type[--namelen] = 0; mddev->persistent = 0; mddev->external = 1; mddev->major_version = 0; mddev->minor_version = 90; goto out_unlock; } major = simple_strtoul(buf, &e, 10); err = -EINVAL; if (e==buf || *e != '.') goto out_unlock; buf = e+1; minor = simple_strtoul(buf, &e, 10); if (e==buf || (*e && *e != '\n') ) goto out_unlock; err = -ENOENT; if (major >= ARRAY_SIZE(super_types) || super_types[major].name == NULL) goto out_unlock; mddev->major_version = major; mddev->minor_version = minor; mddev->persistent = 1; mddev->external = 0; err = 0; out_unlock: mddev_unlock(mddev); return err ?: len; } static struct md_sysfs_entry md_metadata = __ATTR_PREALLOC(metadata_version, S_IRUGO|S_IWUSR, metadata_show, metadata_store); enum sync_action md_sync_action(struct mddev *mddev) { unsigned long recovery = mddev->recovery; /* * frozen has the highest priority, means running sync_thread will be * stopped immediately, and no new sync_thread can start. */ if (test_bit(MD_RECOVERY_FROZEN, &recovery)) return ACTION_FROZEN; /* * read-only array can't register sync_thread, and it can only * add/remove spares. */ if (!md_is_rdwr(mddev)) return ACTION_IDLE; /* * idle means no sync_thread is running, and no new sync_thread is * requested. */ if (!test_bit(MD_RECOVERY_RUNNING, &recovery) && !test_bit(MD_RECOVERY_NEEDED, &recovery)) return ACTION_IDLE; if (test_bit(MD_RECOVERY_RESHAPE, &recovery) || mddev->reshape_position != MaxSector) return ACTION_RESHAPE; if (test_bit(MD_RECOVERY_RECOVER, &recovery)) return ACTION_RECOVER; if (test_bit(MD_RECOVERY_SYNC, &recovery)) { /* * MD_RECOVERY_CHECK must be paired with * MD_RECOVERY_REQUESTED. */ if (test_bit(MD_RECOVERY_CHECK, &recovery)) return ACTION_CHECK; if (test_bit(MD_RECOVERY_REQUESTED, &recovery)) return ACTION_REPAIR; return ACTION_RESYNC; } /* * MD_RECOVERY_NEEDED or MD_RECOVERY_RUNNING is set, however, no * sync_action is specified. */ return ACTION_IDLE; } enum sync_action md_sync_action_by_name(const char *page) { enum sync_action action; for (action = 0; action < NR_SYNC_ACTIONS; ++action) { if (cmd_match(page, action_name[action])) return action; } return NR_SYNC_ACTIONS; } const char *md_sync_action_name(enum sync_action action) { return action_name[action]; } static ssize_t action_show(struct mddev *mddev, char *page) { enum sync_action action = md_sync_action(mddev); return sprintf(page, "%s\n", md_sync_action_name(action)); } /** * stop_sync_thread() - wait for sync_thread to stop if it's running. * @mddev: the array. * @locked: if set, reconfig_mutex will still be held after this function * return; if not set, reconfig_mutex will be released after this * function return. */ static void stop_sync_thread(struct mddev *mddev, bool locked) { int sync_seq = atomic_read(&mddev->sync_seq); if (!test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) { if (!locked) mddev_unlock(mddev); return; } mddev_unlock(mddev); set_bit(MD_RECOVERY_INTR, &mddev->recovery); /* * Thread might be blocked waiting for metadata update which will now * never happen */ md_wakeup_thread_directly(mddev->sync_thread); if (work_pending(&mddev->sync_work)) flush_work(&mddev->sync_work); wait_event(resync_wait, !test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) || (!test_bit(MD_RECOVERY_FROZEN, &mddev->recovery) && sync_seq != atomic_read(&mddev->sync_seq))); if (locked) mddev_lock_nointr(mddev); } void md_idle_sync_thread(struct mddev *mddev) { lockdep_assert_held(&mddev->reconfig_mutex); clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); stop_sync_thread(mddev, true); } EXPORT_SYMBOL_GPL(md_idle_sync_thread); void md_frozen_sync_thread(struct mddev *mddev) { lockdep_assert_held(&mddev->reconfig_mutex); set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); stop_sync_thread(mddev, true); } EXPORT_SYMBOL_GPL(md_frozen_sync_thread); void md_unfrozen_sync_thread(struct mddev *mddev) { lockdep_assert_held(&mddev->reconfig_mutex); clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); md_wakeup_thread(mddev->thread); sysfs_notify_dirent_safe(mddev->sysfs_action); } EXPORT_SYMBOL_GPL(md_unfrozen_sync_thread); static int mddev_start_reshape(struct mddev *mddev) { int ret; if (mddev->pers->start_reshape == NULL) return -EINVAL; if (mddev->reshape_position == MaxSector || mddev->pers->check_reshape == NULL || mddev->pers->check_reshape(mddev)) { clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); ret = mddev->pers->start_reshape(mddev); if (ret) return ret; } else { /* * If reshape is still in progress, and md_check_recovery() can * continue to reshape, don't restart reshape because data can * be corrupted for raid456. */ clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); } sysfs_notify_dirent_safe(mddev->sysfs_degraded); return 0; } static ssize_t action_store(struct mddev *mddev, const char *page, size_t len) { int ret; enum sync_action action; if (!mddev->pers || !mddev->pers->sync_request) return -EINVAL; retry: if (work_busy(&mddev->sync_work)) flush_work(&mddev->sync_work); ret = mddev_lock(mddev); if (ret) return ret; if (work_busy(&mddev->sync_work)) { mddev_unlock(mddev); goto retry; } action = md_sync_action_by_name(page); /* TODO: mdadm rely on "idle" to start sync_thread. */ if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) { switch (action) { case ACTION_FROZEN: md_frozen_sync_thread(mddev); ret = len; goto out; case ACTION_IDLE: md_idle_sync_thread(mddev); break; case ACTION_RESHAPE: case ACTION_RECOVER: case ACTION_CHECK: case ACTION_REPAIR: case ACTION_RESYNC: ret = -EBUSY; goto out; default: ret = -EINVAL; goto out; } } else { switch (action) { case ACTION_FROZEN: set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); ret = len; goto out; case ACTION_RESHAPE: clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); ret = mddev_start_reshape(mddev); if (ret) goto out; break; case ACTION_RECOVER: clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); set_bit(MD_RECOVERY_RECOVER, &mddev->recovery); break; case ACTION_CHECK: set_bit(MD_RECOVERY_CHECK, &mddev->recovery); fallthrough; case ACTION_REPAIR: set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery); set_bit(MD_RECOVERY_SYNC, &mddev->recovery); fallthrough; case ACTION_RESYNC: case ACTION_IDLE: clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); break; default: ret = -EINVAL; goto out; } } if (mddev->ro == MD_AUTO_READ) { /* A write to sync_action is enough to justify * canceling read-auto mode */ mddev->ro = MD_RDWR; md_wakeup_thread(mddev->sync_thread); } set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); md_wakeup_thread(mddev->thread); sysfs_notify_dirent_safe(mddev->sysfs_action); ret = len; out: mddev_unlock(mddev); return ret; } static struct md_sysfs_entry md_scan_mode = __ATTR_PREALLOC(sync_action, S_IRUGO|S_IWUSR, action_show, action_store); static ssize_t last_sync_action_show(struct mddev *mddev, char *page) { return sprintf(page, "%s\n", md_sync_action_name(mddev->last_sync_action)); } static struct md_sysfs_entry md_last_scan_mode = __ATTR_RO(last_sync_action); static ssize_t mismatch_cnt_show(struct mddev *mddev, char *page) { return sprintf(page, "%llu\n", (unsigned long long) atomic64_read(&mddev->resync_mismatches)); } static struct md_sysfs_entry md_mismatches = __ATTR_RO(mismatch_cnt); static ssize_t sync_min_show(struct mddev *mddev, char *page) { return sprintf(page, "%d (%s)\n", speed_min(mddev), mddev->sync_speed_min ? "local": "system"); } static ssize_t sync_min_store(struct mddev *mddev, const char *buf, size_t len) { unsigned int min; int rv; if (strncmp(buf, "system", 6)==0) { min = 0; } else { rv = kstrtouint(buf, 10, &min); if (rv < 0) return rv; if (min == 0) return -EINVAL; } mddev->sync_speed_min = min; return len; } static struct md_sysfs_entry md_sync_min = __ATTR(sync_speed_min, S_IRUGO|S_IWUSR, sync_min_show, sync_min_store); static ssize_t sync_max_show(struct mddev *mddev, char *page) { return sprintf(page, "%d (%s)\n", speed_max(mddev), mddev->sync_speed_max ? "local": "system"); } static ssize_t sync_max_store(struct mddev *mddev, const char *buf, size_t len) { unsigned int max; int rv; if (strncmp(buf, "system", 6)==0) { max = 0; } else { rv = kstrtouint(buf, 10, &max); if (rv < 0) return rv; if (max == 0) return -EINVAL; } mddev->sync_speed_max = max; return len; } static struct md_sysfs_entry md_sync_max = __ATTR(sync_speed_max, S_IRUGO|S_IWUSR, sync_max_show, sync_max_store); static ssize_t degraded_show(struct mddev *mddev, char *page) { return sprintf(page, "%d\n", mddev->degraded); } static struct md_sysfs_entry md_degraded = __ATTR_RO(degraded); static ssize_t sync_force_parallel_show(struct mddev *mddev, char *page) { return sprintf(page, "%d\n", mddev->parallel_resync); } static ssize_t sync_force_parallel_store(struct mddev *mddev, const char *buf, size_t len) { long n; if (kstrtol(buf, 10, &n)) return -EINVAL; if (n != 0 && n != 1) return -EINVAL; mddev->parallel_resync = n; if (mddev->sync_thread) wake_up(&resync_wait); return len; } /* force parallel resync, even with shared block devices */ static struct md_sysfs_entry md_sync_force_parallel = __ATTR(sync_force_parallel, S_IRUGO|S_IWUSR, sync_force_parallel_show, sync_force_parallel_store); static ssize_t sync_speed_show(struct mddev *mddev, char *page) { unsigned long resync, dt, db; if (mddev->curr_resync == MD_RESYNC_NONE) return sprintf(page, "none\n"); resync = mddev->curr_mark_cnt - atomic_read(&mddev->recovery_active); dt = (jiffies - mddev->resync_mark) / HZ; if (!dt) dt++; db = resync - mddev->resync_mark_cnt; return sprintf(page, "%lu\n", db/dt/2); /* K/sec */ } static struct md_sysfs_entry md_sync_speed = __ATTR_RO(sync_speed); static ssize_t sync_completed_show(struct mddev *mddev, char *page) { unsigned long long max_sectors, resync; if (!test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) return sprintf(page, "none\n"); if (mddev->curr_resync == MD_RESYNC_YIELDED || mddev->curr_resync == MD_RESYNC_DELAYED) return sprintf(page, "delayed\n"); if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) || test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) max_sectors = mddev->resync_max_sectors; else max_sectors = mddev->dev_sectors; resync = mddev->curr_resync_completed; return sprintf(page, "%llu / %llu\n", resync, max_sectors); } static struct md_sysfs_entry md_sync_completed = __ATTR_PREALLOC(sync_completed, S_IRUGO, sync_completed_show, NULL); static ssize_t min_sync_show(struct mddev *mddev, char *page) { return sprintf(page, "%llu\n", (unsigned long long)mddev->resync_min); } static ssize_t min_sync_store(struct mddev *mddev, const char *buf, size_t len) { unsigned long long min; int err; if (kstrtoull(buf, 10, &min)) return -EINVAL; spin_lock(&mddev->lock); err = -EINVAL; if (min > mddev->resync_max) goto out_unlock; err = -EBUSY; if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) goto out_unlock; /* Round down to multiple of 4K for safety */ mddev->resync_min = round_down(min, 8); err = 0; out_unlock: spin_unlock(&mddev->lock); return err ?: len; } static struct md_sysfs_entry md_min_sync = __ATTR(sync_min, S_IRUGO|S_IWUSR, min_sync_show, min_sync_store); static ssize_t max_sync_show(struct mddev *mddev, char *page) { if (mddev->resync_max == MaxSector) return sprintf(page, "max\n"); else return sprintf(page, "%llu\n", (unsigned long long)mddev->resync_max); } static ssize_t max_sync_store(struct mddev *mddev, const char *buf, size_t len) { int err; spin_lock(&mddev->lock); if (strncmp(buf, "max", 3) == 0) mddev->resync_max = MaxSector; else { unsigned long long max; int chunk; err = -EINVAL; if (kstrtoull(buf, 10, &max)) goto out_unlock; if (max < mddev->resync_min) goto out_unlock; err = -EBUSY; if (max < mddev->resync_max && md_is_rdwr(mddev) && test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) goto out_unlock; /* Must be a multiple of chunk_size */ chunk = mddev->chunk_sectors; if (chunk) { sector_t temp = max; err = -EINVAL; if (sector_div(temp, chunk)) goto out_unlock; } mddev->resync_max = max; } wake_up(&mddev->recovery_wait); err = 0; out_unlock: spin_unlock(&mddev->lock); return err ?: len; } static struct md_sysfs_entry md_max_sync = __ATTR(sync_max, S_IRUGO|S_IWUSR, max_sync_show, max_sync_store); static ssize_t suspend_lo_show(struct mddev *mddev, char *page) { return sprintf(page, "%llu\n", (unsigned long long)READ_ONCE(mddev->suspend_lo)); } static ssize_t suspend_lo_store(struct mddev *mddev, const char *buf, size_t len) { unsigned long long new; int err; err = kstrtoull(buf, 10, &new); if (err < 0) return err; if (new != (sector_t)new) return -EINVAL; err = mddev_suspend(mddev, true); if (err) return err; WRITE_ONCE(mddev->suspend_lo, new); mddev_resume(mddev); return len; } static struct md_sysfs_entry md_suspend_lo = __ATTR(suspend_lo, S_IRUGO|S_IWUSR, suspend_lo_show, suspend_lo_store); static ssize_t suspend_hi_show(struct mddev *mddev, char *page) { return sprintf(page, "%llu\n", (unsigned long long)READ_ONCE(mddev->suspend_hi)); } static ssize_t suspend_hi_store(struct mddev *mddev, const char *buf, size_t len) { unsigned long long new; int err; err = kstrtoull(buf, 10, &new); if (err < 0) return err; if (new != (sector_t)new) return -EINVAL; err = mddev_suspend(mddev, true); if (err) return err; WRITE_ONCE(mddev->suspend_hi, new); mddev_resume(mddev); return len; } static struct md_sysfs_entry md_suspend_hi = __ATTR(suspend_hi, S_IRUGO|S_IWUSR, suspend_hi_show, suspend_hi_store); static ssize_t reshape_position_show(struct mddev *mddev, char *page) { if (mddev->reshape_position != MaxSector) return sprintf(page, "%llu\n", (unsigned long long)mddev->reshape_position); strcpy(page, "none\n"); return 5; } static ssize_t reshape_position_store(struct mddev *mddev, const char *buf, size_t len) { struct md_rdev *rdev; unsigned long long new; int err; err = kstrtoull(buf, 10, &new); if (err < 0) return err; if (new != (sector_t)new) return -EINVAL; err = mddev_lock(mddev); if (err) return err; err = -EBUSY; if (mddev->pers) goto unlock; mddev->reshape_position = new; mddev->delta_disks = 0; mddev->reshape_backwards = 0; mddev->new_level = mddev->level; mddev->new_layout = mddev->layout; mddev->new_chunk_sectors = mddev->chunk_sectors; rdev_for_each(rdev, mddev) rdev->new_data_offset = rdev->data_offset; err = 0; unlock: mddev_unlock(mddev); return err ?: len; } static struct md_sysfs_entry md_reshape_position = __ATTR(reshape_position, S_IRUGO|S_IWUSR, reshape_position_show, reshape_position_store); static ssize_t reshape_direction_show(struct mddev *mddev, char *page) { return sprintf(page, "%s\n", mddev->reshape_backwards ? "backwards" : "forwards"); } static ssize_t reshape_direction_store(struct mddev *mddev, const char *buf, size_t len) { int backwards = 0; int err; if (cmd_match(buf, "forwards")) backwards = 0; else if (cmd_match(buf, "backwards")) backwards = 1; else return -EINVAL; if (mddev->reshape_backwards == backwards) return len; err = mddev_lock(mddev); if (err) return err; /* check if we are allowed to change */ if (mddev->delta_disks) err = -EBUSY; else if (mddev->persistent && mddev->major_version == 0) err = -EINVAL; else mddev->reshape_backwards = backwards; mddev_unlock(mddev); return err ?: len; } static struct md_sysfs_entry md_reshape_direction = __ATTR(reshape_direction, S_IRUGO|S_IWUSR, reshape_direction_show, reshape_direction_store); static ssize_t array_size_show(struct mddev *mddev, char *page) { if (mddev->external_size) return sprintf(page, "%llu\n", (unsigned long long)mddev->array_sectors/2); else return sprintf(page, "default\n"); } static ssize_t array_size_store(struct mddev *mddev, const char *buf, size_t len) { sector_t sectors; int err; err = mddev_lock(mddev); if (err) return err; /* cluster raid doesn't support change array_sectors */ if (mddev_is_clustered(mddev)) { mddev_unlock(mddev); return -EINVAL; } if (strncmp(buf, "default", 7) == 0) { if (mddev->pers) sectors = mddev->pers->size(mddev, 0, 0); else sectors = mddev->array_sectors; mddev->external_size = 0; } else { if (strict_blocks_to_sectors(buf, &sectors) < 0) err = -EINVAL; else if (mddev->pers && mddev->pers->size(mddev, 0, 0) < sectors) err = -E2BIG; else mddev->external_size = 1; } if (!err) { mddev->array_sectors = sectors; if (mddev->pers) set_capacity_and_notify(mddev->gendisk, mddev->array_sectors); } mddev_unlock(mddev); return err ?: len; } static struct md_sysfs_entry md_array_size = __ATTR(array_size, S_IRUGO|S_IWUSR, array_size_show, array_size_store); static ssize_t consistency_policy_show(struct mddev *mddev, char *page) { int ret; if (test_bit(MD_HAS_JOURNAL, &mddev->flags)) { ret = sprintf(page, "journal\n"); } else if (test_bit(MD_HAS_PPL, &mddev->flags)) { ret = sprintf(page, "ppl\n"); } else if (mddev->bitmap) { ret = sprintf(page, "bitmap\n"); } else if (mddev->pers) { if (mddev->pers->sync_request) ret = sprintf(page, "resync\n"); else ret = sprintf(page, "none\n"); } else { ret = sprintf(page, "unknown\n"); } return ret; } static ssize_t consistency_policy_store(struct mddev *mddev, const char *buf, size_t len) { int err = 0; if (mddev->pers) { if (mddev->pers->change_consistency_policy) err = mddev->pers->change_consistency_policy(mddev, buf); else err = -EBUSY; } else if (mddev->external && strncmp(buf, "ppl", 3) == 0) { set_bit(MD_HAS_PPL, &mddev->flags); } else { err = -EINVAL; } return err ? err : len; } static struct md_sysfs_entry md_consistency_policy = __ATTR(consistency_policy, S_IRUGO | S_IWUSR, consistency_policy_show, consistency_policy_store); static ssize_t fail_last_dev_show(struct mddev *mddev, char *page) { return sprintf(page, "%d\n", mddev->fail_last_dev); } /* * Setting fail_last_dev to true to allow last device to be forcibly removed * from RAID1/RAID10. */ static ssize_t fail_last_dev_store(struct mddev *mddev, const char *buf, size_t len) { int ret; bool value; ret = kstrtobool(buf, &value); if (ret) return ret; if (value != mddev->fail_last_dev) mddev->fail_last_dev = value; return len; } static struct md_sysfs_entry md_fail_last_dev = __ATTR(fail_last_dev, S_IRUGO | S_IWUSR, fail_last_dev_show, fail_last_dev_store); static ssize_t serialize_policy_show(struct mddev *mddev, char *page) { if (mddev->pers == NULL || (mddev->pers->level != 1)) return sprintf(page, "n/a\n"); else return sprintf(page, "%d\n", mddev->serialize_policy); } /* * Setting serialize_policy to true to enforce write IO is not reordered * for raid1. */ static ssize_t serialize_policy_store(struct mddev *mddev, const char *buf, size_t len) { int err; bool value; err = kstrtobool(buf, &value); if (err) return err; if (value == mddev->serialize_policy) return len; err = mddev_suspend_and_lock(mddev); if (err) return err; if (mddev->pers == NULL || (mddev->pers->level != 1)) { pr_err("md: serialize_policy is only effective for raid1\n"); err = -EINVAL; goto unlock; } if (value) mddev_create_serial_pool(mddev, NULL); else mddev_destroy_serial_pool(mddev, NULL); mddev->serialize_policy = value; unlock: mddev_unlock_and_resume(mddev); return err ?: len; } static struct md_sysfs_entry md_serialize_policy = __ATTR(serialize_policy, S_IRUGO | S_IWUSR, serialize_policy_show, serialize_policy_store); static struct attribute *md_default_attrs[] = { &md_level.attr, &md_new_level.attr, &md_layout.attr, &md_raid_disks.attr, &md_uuid.attr, &md_chunk_size.attr, &md_size.attr, &md_resync_start.attr, &md_metadata.attr, &md_new_device.attr, &md_safe_delay.attr, &md_array_state.attr, &md_reshape_position.attr, &md_reshape_direction.attr, &md_array_size.attr, &max_corr_read_errors.attr, &md_consistency_policy.attr, &md_fail_last_dev.attr, &md_serialize_policy.attr, NULL, }; static const struct attribute_group md_default_group = { .attrs = md_default_attrs, }; static struct attribute *md_redundancy_attrs[] = { &md_scan_mode.attr, &md_last_scan_mode.attr, &md_mismatches.attr, &md_sync_min.attr, &md_sync_max.attr, &md_sync_speed.attr, &md_sync_force_parallel.attr, &md_sync_completed.attr, &md_min_sync.attr, &md_max_sync.attr, &md_suspend_lo.attr, &md_suspend_hi.attr, &md_bitmap.attr, &md_degraded.attr, NULL, }; static const struct attribute_group md_redundancy_group = { .name = NULL, .attrs = md_redundancy_attrs, }; static const struct attribute_group *md_attr_groups[] = { &md_default_group, &md_bitmap_group, NULL, }; static ssize_t md_attr_show(struct kobject *kobj, struct attribute *attr, char *page) { struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr); struct mddev *mddev = container_of(kobj, struct mddev, kobj); ssize_t rv; if (!entry->show) return -EIO; spin_lock(&all_mddevs_lock); if (!mddev_get(mddev)) { spin_unlock(&all_mddevs_lock); return -EBUSY; } spin_unlock(&all_mddevs_lock); rv = entry->show(mddev, page); mddev_put(mddev); return rv; } static ssize_t md_attr_store(struct kobject *kobj, struct attribute *attr, const char *page, size_t length) { struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr); struct mddev *mddev = container_of(kobj, struct mddev, kobj); ssize_t rv; if (!entry->store) return -EIO; if (!capable(CAP_SYS_ADMIN)) return -EACCES; spin_lock(&all_mddevs_lock); if (!mddev_get(mddev)) { spin_unlock(&all_mddevs_lock); return -EBUSY; } spin_unlock(&all_mddevs_lock); rv = entry->store(mddev, page, length); mddev_put(mddev); return rv; } static void md_kobj_release(struct kobject *ko) { struct mddev *mddev = container_of(ko, struct mddev, kobj); if (mddev->sysfs_state) sysfs_put(mddev->sysfs_state); if (mddev->sysfs_level) sysfs_put(mddev->sysfs_level); del_gendisk(mddev->gendisk); put_disk(mddev->gendisk); } static const struct sysfs_ops md_sysfs_ops = { .show = md_attr_show, .store = md_attr_store, }; static const struct kobj_type md_ktype = { .release = md_kobj_release, .sysfs_ops = &md_sysfs_ops, .default_groups = md_attr_groups, }; int mdp_major = 0; /* stack the limit for all rdevs into lim */ int mddev_stack_rdev_limits(struct mddev *mddev, struct queue_limits *lim, unsigned int flags) { struct md_rdev *rdev; rdev_for_each(rdev, mddev) { queue_limits_stack_bdev(lim, rdev->bdev, rdev->data_offset, mddev->gendisk->disk_name); if ((flags & MDDEV_STACK_INTEGRITY) && !queue_limits_stack_integrity_bdev(lim, rdev->bdev)) return -EINVAL; } return 0; } EXPORT_SYMBOL_GPL(mddev_stack_rdev_limits); /* apply the extra stacking limits from a new rdev into mddev */ int mddev_stack_new_rdev(struct mddev *mddev, struct md_rdev *rdev) { struct queue_limits lim; if (mddev_is_dm(mddev)) return 0; lim = queue_limits_start_update(mddev->gendisk->queue); queue_limits_stack_bdev(&lim, rdev->bdev, rdev->data_offset, mddev->gendisk->disk_name); if (!queue_limits_stack_integrity_bdev(&lim, rdev->bdev)) { pr_err("%s: incompatible integrity profile for %pg\n", mdname(mddev), rdev->bdev); queue_limits_cancel_update(mddev->gendisk->queue); return -ENXIO; } return queue_limits_commit_update(mddev->gendisk->queue, &lim); } EXPORT_SYMBOL_GPL(mddev_stack_new_rdev); /* update the optimal I/O size after a reshape */ void mddev_update_io_opt(struct mddev *mddev, unsigned int nr_stripes) { struct queue_limits lim; if (mddev_is_dm(mddev)) return; /* don't bother updating io_opt if we can't suspend the array */ if (mddev_suspend(mddev, false) < 0) return; lim = queue_limits_start_update(mddev->gendisk->queue); lim.io_opt = lim.io_min * nr_stripes; queue_limits_commit_update(mddev->gendisk->queue, &lim); mddev_resume(mddev); } EXPORT_SYMBOL_GPL(mddev_update_io_opt); static void mddev_delayed_delete(struct work_struct *ws) { struct mddev *mddev = container_of(ws, struct mddev, del_work); kobject_put(&mddev->kobj); } void md_init_stacking_limits(struct queue_limits *lim) { blk_set_stacking_limits(lim); lim->features = BLK_FEAT_WRITE_CACHE | BLK_FEAT_FUA | BLK_FEAT_IO_STAT | BLK_FEAT_NOWAIT; } EXPORT_SYMBOL_GPL(md_init_stacking_limits); struct mddev *md_alloc(dev_t dev, char *name) { /* * If dev is zero, name is the name of a device to allocate with * an arbitrary minor number. It will be "md_???" * If dev is non-zero it must be a device number with a MAJOR of * MD_MAJOR or mdp_major. In this case, if "name" is NULL, then * the device is being created by opening a node in /dev. * If "name" is not NULL, the device is being created by * writing to /sys/module/md_mod/parameters/new_array. */ static DEFINE_MUTEX(disks_mutex); struct mddev *mddev; struct gendisk *disk; int partitioned; int shift; int unit; int error; /* * Wait for any previous instance of this device to be completely * removed (mddev_delayed_delete). */ flush_workqueue(md_misc_wq); mutex_lock(&disks_mutex); mddev = mddev_alloc(dev); if (IS_ERR(mddev)) { error = PTR_ERR(mddev); goto out_unlock; } partitioned = (MAJOR(mddev->unit) != MD_MAJOR); shift = partitioned ? MdpMinorShift : 0; unit = MINOR(mddev->unit) >> shift; if (name && !dev) { /* Need to ensure that 'name' is not a duplicate. */ struct mddev *mddev2; spin_lock(&all_mddevs_lock); list_for_each_entry(mddev2, &all_mddevs, all_mddevs) if (mddev2->gendisk && strcmp(mddev2->gendisk->disk_name, name) == 0) { spin_unlock(&all_mddevs_lock); error = -EEXIST; goto out_free_mddev; } spin_unlock(&all_mddevs_lock); } if (name && dev) /* * Creating /dev/mdNNN via "newarray", so adjust hold_active. */ mddev->hold_active = UNTIL_STOP; disk = blk_alloc_disk(NULL, NUMA_NO_NODE); if (IS_ERR(disk)) { error = PTR_ERR(disk); goto out_free_mddev; } disk->major = MAJOR(mddev->unit); disk->first_minor = unit << shift; disk->minors = 1 << shift; if (name) strcpy(disk->disk_name, name); else if (partitioned) sprintf(disk->disk_name, "md_d%d", unit); else sprintf(disk->disk_name, "md%d", unit); disk->fops = &md_fops; disk->private_data = mddev; disk->events |= DISK_EVENT_MEDIA_CHANGE; mddev->gendisk = disk; error = add_disk(disk); if (error) goto out_put_disk; kobject_init(&mddev->kobj, &md_ktype); error = kobject_add(&mddev->kobj, &disk_to_dev(disk)->kobj, "%s", "md"); if (error) { /* * The disk is already live at this point. Clear the hold flag * and let mddev_put take care of the deletion, as it isn't any * different from a normal close on last release now. */ mddev->hold_active = 0; mutex_unlock(&disks_mutex); mddev_put(mddev); return ERR_PTR(error); } kobject_uevent(&mddev->kobj, KOBJ_ADD); mddev->sysfs_state = sysfs_get_dirent_safe(mddev->kobj.sd, "array_state"); mddev->sysfs_level = sysfs_get_dirent_safe(mddev->kobj.sd, "level"); mutex_unlock(&disks_mutex); return mddev; out_put_disk: put_disk(disk); out_free_mddev: mddev_free(mddev); out_unlock: mutex_unlock(&disks_mutex); return ERR_PTR(error); } static int md_alloc_and_put(dev_t dev, char *name) { struct mddev *mddev = md_alloc(dev, name); if (IS_ERR(mddev)) return PTR_ERR(mddev); mddev_put(mddev); return 0; } static void md_probe(dev_t dev) { if (MAJOR(dev) == MD_MAJOR && MINOR(dev) >= 512) return; if (create_on_open) md_alloc_and_put(dev, NULL); } static int add_named_array(const char *val, const struct kernel_param *kp) { /* * val must be "md_*" or "mdNNN". * For "md_*" we allocate an array with a large free minor number, and * set the name to val. val must not already be an active name. * For "mdNNN" we allocate an array with the minor number NNN * which must not already be in use. */ int len = strlen(val); char buf[DISK_NAME_LEN]; unsigned long devnum; while (len && val[len-1] == '\n') len--; if (len >= DISK_NAME_LEN) return -E2BIG; strscpy(buf, val, len+1); if (strncmp(buf, "md_", 3) == 0) return md_alloc_and_put(0, buf); if (strncmp(buf, "md", 2) == 0 && isdigit(buf[2]) && kstrtoul(buf+2, 10, &devnum) == 0 && devnum <= MINORMASK) return md_alloc_and_put(MKDEV(MD_MAJOR, devnum), NULL); return -EINVAL; } static void md_safemode_timeout(struct timer_list *t) { struct mddev *mddev = from_timer(mddev, t, safemode_timer); mddev->safemode = 1; if (mddev->external) sysfs_notify_dirent_safe(mddev->sysfs_state); md_wakeup_thread(mddev->thread); } static int start_dirty_degraded; int md_run(struct mddev *mddev) { int err; struct md_rdev *rdev; struct md_personality *pers; bool nowait = true; if (list_empty(&mddev->disks)) /* cannot run an array with no devices.. */ return -EINVAL; if (mddev->pers) return -EBUSY; /* Cannot run until previous stop completes properly */ if (mddev->sysfs_active) return -EBUSY; /* * Analyze all RAID superblock(s) */ if (!mddev->raid_disks) { if (!mddev->persistent) return -EINVAL; err = analyze_sbs(mddev); if (err) return -EINVAL; } if (mddev->level != LEVEL_NONE) request_module("md-level-%d", mddev->level); else if (mddev->clevel[0]) request_module("md-%s", mddev->clevel); /* * Drop all container device buffers, from now on * the only valid external interface is through the md * device. */ mddev->has_superblocks = false; rdev_for_each(rdev, mddev) { if (test_bit(Faulty, &rdev->flags)) continue; sync_blockdev(rdev->bdev); invalidate_bdev(rdev->bdev); if (mddev->ro != MD_RDONLY && rdev_read_only(rdev)) { mddev->ro = MD_RDONLY; if (!mddev_is_dm(mddev)) set_disk_ro(mddev->gendisk, 1); } if (rdev->sb_page) mddev->has_superblocks = true; /* perform some consistency tests on the device. * We don't want the data to overlap the metadata, * Internal Bitmap issues have been handled elsewhere. */ if (rdev->meta_bdev) { /* Nothing to check */; } else if (rdev->data_offset < rdev->sb_start) { if (mddev->dev_sectors && rdev->data_offset + mddev->dev_sectors > rdev->sb_start) { pr_warn("md: %s: data overlaps metadata\n", mdname(mddev)); return -EINVAL; } } else { if (rdev->sb_start + rdev->sb_size/512 > rdev->data_offset) { pr_warn("md: %s: metadata overlaps data\n", mdname(mddev)); return -EINVAL; } } sysfs_notify_dirent_safe(rdev->sysfs_state); nowait = nowait && bdev_nowait(rdev->bdev); } if (!bioset_initialized(&mddev->bio_set)) { err = bioset_init(&mddev->bio_set, BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS); if (err) return err; } if (!bioset_initialized(&mddev->sync_set)) { err = bioset_init(&mddev->sync_set, BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS); if (err) goto exit_bio_set; } if (!bioset_initialized(&mddev->io_clone_set)) { err = bioset_init(&mddev->io_clone_set, BIO_POOL_SIZE, offsetof(struct md_io_clone, bio_clone), 0); if (err) goto exit_sync_set; } spin_lock(&pers_lock); pers = find_pers(mddev->level, mddev->clevel); if (!pers || !try_module_get(pers->owner)) { spin_unlock(&pers_lock); if (mddev->level != LEVEL_NONE) pr_warn("md: personality for level %d is not loaded!\n", mddev->level); else pr_warn("md: personality for level %s is not loaded!\n", mddev->clevel); err = -EINVAL; goto abort; } spin_unlock(&pers_lock); if (mddev->level != pers->level) { mddev->level = pers->level; mddev->new_level = pers->level; } strscpy(mddev->clevel, pers->name, sizeof(mddev->clevel)); if (mddev->reshape_position != MaxSector && pers->start_reshape == NULL) { /* This personality cannot handle reshaping... */ module_put(pers->owner); err = -EINVAL; goto abort; } if (pers->sync_request) { /* Warn if this is a potentially silly * configuration. */ struct md_rdev *rdev2; int warned = 0; rdev_for_each(rdev, mddev) rdev_for_each(rdev2, mddev) { if (rdev < rdev2 && rdev->bdev->bd_disk == rdev2->bdev->bd_disk) { pr_warn("%s: WARNING: %pg appears to be on the same physical disk as %pg.\n", mdname(mddev), rdev->bdev, rdev2->bdev); warned = 1; } } if (warned) pr_warn("True protection against single-disk failure might be compromised.\n"); } /* dm-raid expect sync_thread to be frozen until resume */ if (mddev->gendisk) mddev->recovery = 0; /* may be over-ridden by personality */ mddev->resync_max_sectors = mddev->dev_sectors; mddev->ok_start_degraded = start_dirty_degraded; if (start_readonly && md_is_rdwr(mddev)) mddev->ro = MD_AUTO_READ; /* read-only, but switch on first write */ err = pers->run(mddev); if (err) pr_warn("md: pers->run() failed ...\n"); else if (pers->size(mddev, 0, 0) < mddev->array_sectors) { WARN_ONCE(!mddev->external_size, "%s: default size too small, but 'external_size' not in effect?\n", __func__); pr_warn("md: invalid array_size %llu > default size %llu\n", (unsigned long long)mddev->array_sectors / 2, (unsigned long long)pers->size(mddev, 0, 0) / 2); err = -EINVAL; } if (err == 0 && pers->sync_request && (mddev->bitmap_info.file || mddev->bitmap_info.offset)) { err = mddev->bitmap_ops->create(mddev, -1); if (err) pr_warn("%s: failed to create bitmap (%d)\n", mdname(mddev), err); } if (err) goto bitmap_abort; if (mddev->bitmap_info.max_write_behind > 0) { bool create_pool = false; rdev_for_each(rdev, mddev) { if (test_bit(WriteMostly, &rdev->flags) && rdev_init_serial(rdev)) create_pool = true; } if (create_pool && mddev->serial_info_pool == NULL) { mddev->serial_info_pool = mempool_create_kmalloc_pool(NR_SERIAL_INFOS, sizeof(struct serial_info)); if (!mddev->serial_info_pool) { err = -ENOMEM; goto bitmap_abort; } } } if (pers->sync_request) { if (mddev->kobj.sd && sysfs_create_group(&mddev->kobj, &md_redundancy_group)) pr_warn("md: cannot register extra attributes for %s\n", mdname(mddev)); mddev->sysfs_action = sysfs_get_dirent_safe(mddev->kobj.sd, "sync_action"); mddev->sysfs_completed = sysfs_get_dirent_safe(mddev->kobj.sd, "sync_completed"); mddev->sysfs_degraded = sysfs_get_dirent_safe(mddev->kobj.sd, "degraded"); } else if (mddev->ro == MD_AUTO_READ) mddev->ro = MD_RDWR; atomic_set(&mddev->max_corr_read_errors, MD_DEFAULT_MAX_CORRECTED_READ_ERRORS); mddev->safemode = 0; if (mddev_is_clustered(mddev)) mddev->safemode_delay = 0; else mddev->safemode_delay = DEFAULT_SAFEMODE_DELAY; mddev->in_sync = 1; smp_wmb(); spin_lock(&mddev->lock); mddev->pers = pers; spin_unlock(&mddev->lock); rdev_for_each(rdev, mddev) if (rdev->raid_disk >= 0) sysfs_link_rdev(mddev, rdev); /* failure here is OK */ if (mddev->degraded && md_is_rdwr(mddev)) /* This ensures that recovering status is reported immediately * via sysfs - until a lack of spares is confirmed. */ set_bit(MD_RECOVERY_RECOVER, &mddev->recovery); set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); if (mddev->sb_flags) md_update_sb(mddev, 0); md_new_event(); return 0; bitmap_abort: mddev_detach(mddev); if (mddev->private) pers->free(mddev, mddev->private); mddev->private = NULL; module_put(pers->owner); mddev->bitmap_ops->destroy(mddev); abort: bioset_exit(&mddev->io_clone_set); exit_sync_set: bioset_exit(&mddev->sync_set); exit_bio_set: bioset_exit(&mddev->bio_set); return err; } EXPORT_SYMBOL_GPL(md_run); int do_md_run(struct mddev *mddev) { int err; set_bit(MD_NOT_READY, &mddev->flags); err = md_run(mddev); if (err) goto out; err = mddev->bitmap_ops->load(mddev); if (err) { mddev->bitmap_ops->destroy(mddev); goto out; } if (mddev_is_clustered(mddev)) md_allow_write(mddev); /* run start up tasks that require md_thread */ md_start(mddev); md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */ set_capacity_and_notify(mddev->gendisk, mddev->array_sectors); clear_bit(MD_NOT_READY, &mddev->flags); mddev->changed = 1; kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE); sysfs_notify_dirent_safe(mddev->sysfs_state); sysfs_notify_dirent_safe(mddev->sysfs_action); sysfs_notify_dirent_safe(mddev->sysfs_degraded); out: clear_bit(MD_NOT_READY, &mddev->flags); return err; } int md_start(struct mddev *mddev) { int ret = 0; if (mddev->pers->start) { set_bit(MD_RECOVERY_WAIT, &mddev->recovery); ret = mddev->pers->start(mddev); clear_bit(MD_RECOVERY_WAIT, &mddev->recovery); md_wakeup_thread(mddev->sync_thread); } return ret; } EXPORT_SYMBOL_GPL(md_start); static int restart_array(struct mddev *mddev) { struct gendisk *disk = mddev->gendisk; struct md_rdev *rdev; bool has_journal = false; bool has_readonly = false; /* Complain if it has no devices */ if (list_empty(&mddev->disks)) return -ENXIO; if (!mddev->pers) return -EINVAL; if (md_is_rdwr(mddev)) return -EBUSY; rcu_read_lock(); rdev_for_each_rcu(rdev, mddev) { if (test_bit(Journal, &rdev->flags) && !test_bit(Faulty, &rdev->flags)) has_journal = true; if (rdev_read_only(rdev)) has_readonly = true; } rcu_read_unlock(); if (test_bit(MD_HAS_JOURNAL, &mddev->flags) && !has_journal) /* Don't restart rw with journal missing/faulty */ return -EINVAL; if (has_readonly) return -EROFS; mddev->safemode = 0; mddev->ro = MD_RDWR; set_disk_ro(disk, 0); pr_debug("md: %s switched to read-write mode.\n", mdname(mddev)); /* Kick recovery or resync if necessary */ set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); md_wakeup_thread(mddev->sync_thread); sysfs_notify_dirent_safe(mddev->sysfs_state); return 0; } static void md_clean(struct mddev *mddev) { mddev->array_sectors = 0; mddev->external_size = 0; mddev->dev_sectors = 0; mddev->raid_disks = 0; mddev->recovery_cp = 0; mddev->resync_min = 0; mddev->resync_max = MaxSector; mddev->reshape_position = MaxSector; /* we still need mddev->external in export_rdev, do not clear it yet */ mddev->persistent = 0; mddev->level = LEVEL_NONE; mddev->clevel[0] = 0; /* * Don't clear MD_CLOSING, or mddev can be opened again. * 'hold_active != 0' means mddev is still in the creation * process and will be used later. */ if (mddev->hold_active) mddev->flags = 0; else mddev->flags &= BIT_ULL_MASK(MD_CLOSING); mddev->sb_flags = 0; mddev->ro = MD_RDWR; mddev->metadata_type[0] = 0; mddev->chunk_sectors = 0; mddev->ctime = mddev->utime = 0; mddev->layout = 0; mddev->max_disks = 0; mddev->events = 0; mddev->can_decrease_events = 0; mddev->delta_disks = 0; mddev->reshape_backwards = 0; mddev->new_level = LEVEL_NONE; mddev->new_layout = 0; mddev->new_chunk_sectors = 0; mddev->curr_resync = MD_RESYNC_NONE; atomic64_set(&mddev->resync_mismatches, 0); mddev->suspend_lo = mddev->suspend_hi = 0; mddev->sync_speed_min = mddev->sync_speed_max = 0; mddev->recovery = 0; mddev->in_sync = 0; mddev->changed = 0; mddev->degraded = 0; mddev->safemode = 0; mddev->private = NULL; mddev->cluster_info = NULL; mddev->bitmap_info.offset = 0; mddev->bitmap_info.default_offset = 0; mddev->bitmap_info.default_space = 0; mddev->bitmap_info.chunksize = 0; mddev->bitmap_info.daemon_sleep = 0; mddev->bitmap_info.max_write_behind = 0; mddev->bitmap_info.nodes = 0; } static void __md_stop_writes(struct mddev *mddev) { del_timer_sync(&mddev->safemode_timer); if (mddev->pers && mddev->pers->quiesce) { mddev->pers->quiesce(mddev, 1); mddev->pers->quiesce(mddev, 0); } mddev->bitmap_ops->flush(mddev); if (md_is_rdwr(mddev) && ((!mddev->in_sync && !mddev_is_clustered(mddev)) || mddev->sb_flags)) { /* mark array as shutdown cleanly */ if (!mddev_is_clustered(mddev)) mddev->in_sync = 1; md_update_sb(mddev, 1); } /* disable policy to guarantee rdevs free resources for serialization */ mddev->serialize_policy = 0; mddev_destroy_serial_pool(mddev, NULL); } void md_stop_writes(struct mddev *mddev) { mddev_lock_nointr(mddev); set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); stop_sync_thread(mddev, true); __md_stop_writes(mddev); mddev_unlock(mddev); } EXPORT_SYMBOL_GPL(md_stop_writes); static void mddev_detach(struct mddev *mddev) { mddev->bitmap_ops->wait_behind_writes(mddev); if (mddev->pers && mddev->pers->quiesce && !is_md_suspended(mddev)) { mddev->pers->quiesce(mddev, 1); mddev->pers->quiesce(mddev, 0); } md_unregister_thread(mddev, &mddev->thread); /* the unplug fn references 'conf' */ if (!mddev_is_dm(mddev)) blk_sync_queue(mddev->gendisk->queue); } static void __md_stop(struct mddev *mddev) { struct md_personality *pers = mddev->pers; mddev->bitmap_ops->destroy(mddev); mddev_detach(mddev); spin_lock(&mddev->lock); mddev->pers = NULL; spin_unlock(&mddev->lock); if (mddev->private) pers->free(mddev, mddev->private); mddev->private = NULL; if (pers->sync_request && mddev->to_remove == NULL) mddev->to_remove = &md_redundancy_group; module_put(pers->owner); clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); bioset_exit(&mddev->bio_set); bioset_exit(&mddev->sync_set); bioset_exit(&mddev->io_clone_set); } void md_stop(struct mddev *mddev) { lockdep_assert_held(&mddev->reconfig_mutex); /* stop the array and free an attached data structures. * This is called from dm-raid */ __md_stop_writes(mddev); __md_stop(mddev); } EXPORT_SYMBOL_GPL(md_stop); /* ensure 'mddev->pers' exist before calling md_set_readonly() */ static int md_set_readonly(struct mddev *mddev) { int err = 0; int did_freeze = 0; if (mddev->external && test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) return -EBUSY; if (!test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) { did_freeze = 1; set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); } stop_sync_thread(mddev, false); wait_event(mddev->sb_wait, !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)); mddev_lock_nointr(mddev); if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) { pr_warn("md: %s still in use.\n",mdname(mddev)); err = -EBUSY; goto out; } __md_stop_writes(mddev); if (mddev->ro == MD_RDONLY) { err = -ENXIO; goto out; } mddev->ro = MD_RDONLY; set_disk_ro(mddev->gendisk, 1); out: if (!err || did_freeze) { clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); sysfs_notify_dirent_safe(mddev->sysfs_state); } return err; } /* mode: * 0 - completely stop and dis-assemble array * 2 - stop but do not disassemble array */ static int do_md_stop(struct mddev *mddev, int mode) { struct gendisk *disk = mddev->gendisk; struct md_rdev *rdev; int did_freeze = 0; if (!test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) { did_freeze = 1; set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); } stop_sync_thread(mddev, true); if (mddev->sysfs_active || test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) { pr_warn("md: %s still in use.\n",mdname(mddev)); if (did_freeze) { clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); } return -EBUSY; } if (mddev->pers) { if (!md_is_rdwr(mddev)) set_disk_ro(disk, 0); __md_stop_writes(mddev); __md_stop(mddev); /* tell userspace to handle 'inactive' */ sysfs_notify_dirent_safe(mddev->sysfs_state); rdev_for_each(rdev, mddev) if (rdev->raid_disk >= 0) sysfs_unlink_rdev(mddev, rdev); set_capacity_and_notify(disk, 0); mddev->changed = 1; if (!md_is_rdwr(mddev)) mddev->ro = MD_RDWR; } /* * Free resources if final stop */ if (mode == 0) { pr_info("md: %s stopped.\n", mdname(mddev)); if (mddev->bitmap_info.file) { struct file *f = mddev->bitmap_info.file; spin_lock(&mddev->lock); mddev->bitmap_info.file = NULL; spin_unlock(&mddev->lock); fput(f); } mddev->bitmap_info.offset = 0; export_array(mddev); md_clean(mddev); if (mddev->hold_active == UNTIL_STOP) mddev->hold_active = 0; } md_new_event(); sysfs_notify_dirent_safe(mddev->sysfs_state); return 0; } #ifndef MODULE static void autorun_array(struct mddev *mddev) { struct md_rdev *rdev; int err; if (list_empty(&mddev->disks)) return; pr_info("md: running: "); rdev_for_each(rdev, mddev) { pr_cont("<%pg>", rdev->bdev); } pr_cont("\n"); err = do_md_run(mddev); if (err) { pr_warn("md: do_md_run() returned %d\n", err); do_md_stop(mddev, 0); } } /* * lets try to run arrays based on all disks that have arrived * until now. (those are in pending_raid_disks) * * the method: pick the first pending disk, collect all disks with * the same UUID, remove all from the pending list and put them into * the 'same_array' list. Then order this list based on superblock * update time (freshest comes first), kick out 'old' disks and * compare superblocks. If everything's fine then run it. * * If "unit" is allocated, then bump its reference count */ static void autorun_devices(int part) { struct md_rdev *rdev0, *rdev, *tmp; struct mddev *mddev; pr_info("md: autorun ...\n"); while (!list_empty(&pending_raid_disks)) { int unit; dev_t dev; LIST_HEAD(candidates); rdev0 = list_entry(pending_raid_disks.next, struct md_rdev, same_set); pr_debug("md: considering %pg ...\n", rdev0->bdev); INIT_LIST_HEAD(&candidates); rdev_for_each_list(rdev, tmp, &pending_raid_disks) if (super_90_load(rdev, rdev0, 0) >= 0) { pr_debug("md: adding %pg ...\n", rdev->bdev); list_move(&rdev->same_set, &candidates); } /* * now we have a set of devices, with all of them having * mostly sane superblocks. It's time to allocate the * mddev. */ if (part) { dev = MKDEV(mdp_major, rdev0->preferred_minor << MdpMinorShift); unit = MINOR(dev) >> MdpMinorShift; } else { dev = MKDEV(MD_MAJOR, rdev0->preferred_minor); unit = MINOR(dev); } if (rdev0->preferred_minor != unit) { pr_warn("md: unit number in %pg is bad: %d\n", rdev0->bdev, rdev0->preferred_minor); break; } mddev = md_alloc(dev, NULL); if (IS_ERR(mddev)) break; if (mddev_suspend_and_lock(mddev)) pr_warn("md: %s locked, cannot run\n", mdname(mddev)); else if (mddev->raid_disks || mddev->major_version || !list_empty(&mddev->disks)) { pr_warn("md: %s already running, cannot run %pg\n", mdname(mddev), rdev0->bdev); mddev_unlock_and_resume(mddev); } else { pr_debug("md: created %s\n", mdname(mddev)); mddev->persistent = 1; rdev_for_each_list(rdev, tmp, &candidates) { list_del_init(&rdev->same_set); if (bind_rdev_to_array(rdev, mddev)) export_rdev(rdev, mddev); } autorun_array(mddev); mddev_unlock_and_resume(mddev); } /* on success, candidates will be empty, on error * it won't... */ rdev_for_each_list(rdev, tmp, &candidates) { list_del_init(&rdev->same_set); export_rdev(rdev, mddev); } mddev_put(mddev); } pr_info("md: ... autorun DONE.\n"); } #endif /* !MODULE */ static int get_version(void __user *arg) { mdu_version_t ver; ver.major = MD_MAJOR_VERSION; ver.minor = MD_MINOR_VERSION; ver.patchlevel = MD_PATCHLEVEL_VERSION; if (copy_to_user(arg, &ver, sizeof(ver))) return -EFAULT; return 0; } static int get_array_info(struct mddev *mddev, void __user *arg) { mdu_array_info_t info; int nr,working,insync,failed,spare; struct md_rdev *rdev; nr = working = insync = failed = spare = 0; rcu_read_lock(); rdev_for_each_rcu(rdev, mddev) { nr++; if (test_bit(Faulty, &rdev->flags)) failed++; else { working++; if (test_bit(In_sync, &rdev->flags)) insync++; else if (test_bit(Journal, &rdev->flags)) /* TODO: add journal count to md_u.h */ ; else spare++; } } rcu_read_unlock(); info.major_version = mddev->major_version; info.minor_version = mddev->minor_version; info.patch_version = MD_PATCHLEVEL_VERSION; info.ctime = clamp_t(time64_t, mddev->ctime, 0, U32_MAX); info.level = mddev->level; info.size = mddev->dev_sectors / 2; if (info.size != mddev->dev_sectors / 2) /* overflow */ info.size = -1; info.nr_disks = nr; info.raid_disks = mddev->raid_disks; info.md_minor = mddev->md_minor; info.not_persistent= !mddev->persistent; info.utime = clamp_t(time64_t, mddev->utime, 0, U32_MAX); info.state = 0; if (mddev->in_sync) info.state = (1<<MD_SB_CLEAN); if (mddev->bitmap && mddev->bitmap_info.offset) info.state |= (1<<MD_SB_BITMAP_PRESENT); if (mddev_is_clustered(mddev)) info.state |= (1<<MD_SB_CLUSTERED); info.active_disks = insync; info.working_disks = working; info.failed_disks = failed; info.spare_disks = spare; info.layout = mddev->layout; info.chunk_size = mddev->chunk_sectors << 9; if (copy_to_user(arg, &info, sizeof(info))) return -EFAULT; return 0; } static int get_bitmap_file(struct mddev *mddev, void __user * arg) { mdu_bitmap_file_t *file = NULL; /* too big for stack allocation */ char *ptr; int err; file = kzalloc(sizeof(*file), GFP_NOIO); if (!file) return -ENOMEM; err = 0; spin_lock(&mddev->lock); /* bitmap enabled */ if (mddev->bitmap_info.file) { ptr = file_path(mddev->bitmap_info.file, file->pathname, sizeof(file->pathname)); if (IS_ERR(ptr)) err = PTR_ERR(ptr); else memmove(file->pathname, ptr, sizeof(file->pathname)-(ptr-file->pathname)); } spin_unlock(&mddev->lock); if (err == 0 && copy_to_user(arg, file, sizeof(*file))) err = -EFAULT; kfree(file); return err; } static int get_disk_info(struct mddev *mddev, void __user * arg) { mdu_disk_info_t info; struct md_rdev *rdev; if (copy_from_user(&info, arg, sizeof(info))) return -EFAULT; rcu_read_lock(); rdev = md_find_rdev_nr_rcu(mddev, info.number); if (rdev) { info.major = MAJOR(rdev->bdev->bd_dev); info.minor = MINOR(rdev->bdev->bd_dev); info.raid_disk = rdev->raid_disk; info.state = 0; if (test_bit(Faulty, &rdev->flags)) info.state |= (1<<MD_DISK_FAULTY); else if (test_bit(In_sync, &rdev->flags)) { info.state |= (1<<MD_DISK_ACTIVE); info.state |= (1<<MD_DISK_SYNC); } if (test_bit(Journal, &rdev->flags)) info.state |= (1<<MD_DISK_JOURNAL); if (test_bit(WriteMostly, &rdev->flags)) info.state |= (1<<MD_DISK_WRITEMOSTLY); if (test_bit(FailFast, &rdev->flags)) info.state |= (1<<MD_DISK_FAILFAST); } else { info.major = info.minor = 0; info.raid_disk = -1; info.state = (1<<MD_DISK_REMOVED); } rcu_read_unlock(); if (copy_to_user(arg, &info, sizeof(info))) return -EFAULT; return 0; } int md_add_new_disk(struct mddev *mddev, struct mdu_disk_info_s *info) { struct md_rdev *rdev; dev_t dev = MKDEV(info->major,info->minor); if (mddev_is_clustered(mddev) && !(info->state & ((1 << MD_DISK_CLUSTER_ADD) | (1 << MD_DISK_CANDIDATE)))) { pr_warn("%s: Cannot add to clustered mddev.\n", mdname(mddev)); return -EINVAL; } if (info->major != MAJOR(dev) || info->minor != MINOR(dev)) return -EOVERFLOW; if (!mddev->raid_disks) { int err; /* expecting a device which has a superblock */ rdev = md_import_device(dev, mddev->major_version, mddev->minor_version); if (IS_ERR(rdev)) { pr_warn("md: md_import_device returned %ld\n", PTR_ERR(rdev)); return PTR_ERR(rdev); } if (!list_empty(&mddev->disks)) { struct md_rdev *rdev0 = list_entry(mddev->disks.next, struct md_rdev, same_set); err = super_types[mddev->major_version] .load_super(rdev, rdev0, mddev->minor_version); if (err < 0) { pr_warn("md: %pg has different UUID to %pg\n", rdev->bdev, rdev0->bdev); export_rdev(rdev, mddev); return -EINVAL; } } err = bind_rdev_to_array(rdev, mddev); if (err) export_rdev(rdev, mddev); return err; } /* * md_add_new_disk can be used once the array is assembled * to add "hot spares". They must already have a superblock * written */ if (mddev->pers) { int err; if (!mddev->pers->hot_add_disk) { pr_warn("%s: personality does not support diskops!\n", mdname(mddev)); return -EINVAL; } if (mddev->persistent) rdev = md_import_device(dev, mddev->major_version, mddev->minor_version); else rdev = md_import_device(dev, -1, -1); if (IS_ERR(rdev)) { pr_warn("md: md_import_device returned %ld\n", PTR_ERR(rdev)); return PTR_ERR(rdev); } /* set saved_raid_disk if appropriate */ if (!mddev->persistent) { if (info->state & (1<<MD_DISK_SYNC) && info->raid_disk < mddev->raid_disks) { rdev->raid_disk = info->raid_disk; clear_bit(Bitmap_sync, &rdev->flags); } else rdev->raid_disk = -1; rdev->saved_raid_disk = rdev->raid_disk; } else super_types[mddev->major_version]. validate_super(mddev, NULL/*freshest*/, rdev); if ((info->state & (1<<MD_DISK_SYNC)) && rdev->raid_disk != info->raid_disk) { /* This was a hot-add request, but events doesn't * match, so reject it. */ export_rdev(rdev, mddev); return -EINVAL; } clear_bit(In_sync, &rdev->flags); /* just to be sure */ if (info->state & (1<<MD_DISK_WRITEMOSTLY)) set_bit(WriteMostly, &rdev->flags); else clear_bit(WriteMostly, &rdev->flags); if (info->state & (1<<MD_DISK_FAILFAST)) set_bit(FailFast, &rdev->flags); else clear_bit(FailFast, &rdev->flags); if (info->state & (1<<MD_DISK_JOURNAL)) { struct md_rdev *rdev2; bool has_journal = false; /* make sure no existing journal disk */ rdev_for_each(rdev2, mddev) { if (test_bit(Journal, &rdev2->flags)) { has_journal = true; break; } } if (has_journal || mddev->bitmap) { export_rdev(rdev, mddev); return -EBUSY; } set_bit(Journal, &rdev->flags); } /* * check whether the device shows up in other nodes */ if (mddev_is_clustered(mddev)) { if (info->state & (1 << MD_DISK_CANDIDATE)) set_bit(Candidate, &rdev->flags); else if (info->state & (1 << MD_DISK_CLUSTER_ADD)) { /* --add initiated by this node */ err = md_cluster_ops->add_new_disk(mddev, rdev); if (err) { export_rdev(rdev, mddev); return err; } } } rdev->raid_disk = -1; err = bind_rdev_to_array(rdev, mddev); if (err) export_rdev(rdev, mddev); if (mddev_is_clustered(mddev)) { if (info->state & (1 << MD_DISK_CANDIDATE)) { if (!err) { err = md_cluster_ops->new_disk_ack(mddev, err == 0); if (err) md_kick_rdev_from_array(rdev); } } else { if (err) md_cluster_ops->add_new_disk_cancel(mddev); else err = add_bound_rdev(rdev); } } else if (!err) err = add_bound_rdev(rdev); return err; } /* otherwise, md_add_new_disk is only allowed * for major_version==0 superblocks */ if (mddev->major_version != 0) { pr_warn("%s: ADD_NEW_DISK not supported\n", mdname(mddev)); return -EINVAL; } if (!(info->state & (1<<MD_DISK_FAULTY))) { int err; rdev = md_import_device(dev, -1, 0); if (IS_ERR(rdev)) { pr_warn("md: error, md_import_device() returned %ld\n", PTR_ERR(rdev)); return PTR_ERR(rdev); } rdev->desc_nr = info->number; if (info->raid_disk < mddev->raid_disks) rdev->raid_disk = info->raid_disk; else rdev->raid_disk = -1; if (rdev->raid_disk < mddev->raid_disks) if (info->state & (1<<MD_DISK_SYNC)) set_bit(In_sync, &rdev->flags); if (info->state & (1<<MD_DISK_WRITEMOSTLY)) set_bit(WriteMostly, &rdev->flags); if (info->state & (1<<MD_DISK_FAILFAST)) set_bit(FailFast, &rdev->flags); if (!mddev->persistent) { pr_debug("md: nonpersistent superblock ...\n"); rdev->sb_start = bdev_nr_sectors(rdev->bdev); } else rdev->sb_start = calc_dev_sboffset(rdev); rdev->sectors = rdev->sb_start; err = bind_rdev_to_array(rdev, mddev); if (err) { export_rdev(rdev, mddev); return err; } } return 0; } static int hot_remove_disk(struct mddev *mddev, dev_t dev) { struct md_rdev *rdev; if (!mddev->pers) return -ENODEV; rdev = find_rdev(mddev, dev); if (!rdev) return -ENXIO; if (rdev->raid_disk < 0) goto kick_rdev; clear_bit(Blocked, &rdev->flags); remove_and_add_spares(mddev, rdev); if (rdev->raid_disk >= 0) goto busy; kick_rdev: if (mddev_is_clustered(mddev)) { if (md_cluster_ops->remove_disk(mddev, rdev)) goto busy; } md_kick_rdev_from_array(rdev); set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); if (!mddev->thread) md_update_sb(mddev, 1); md_new_event(); return 0; busy: pr_debug("md: cannot remove active disk %pg from %s ...\n", rdev->bdev, mdname(mddev)); return -EBUSY; } static int hot_add_disk(struct mddev *mddev, dev_t dev) { int err; struct md_rdev *rdev; if (!mddev->pers) return -ENODEV; if (mddev->major_version != 0) { pr_warn("%s: HOT_ADD may only be used with version-0 superblocks.\n", mdname(mddev)); return -EINVAL; } if (!mddev->pers->hot_add_disk) { pr_warn("%s: personality does not support diskops!\n", mdname(mddev)); return -EINVAL; } rdev = md_import_device(dev, -1, 0); if (IS_ERR(rdev)) { pr_warn("md: error, md_import_device() returned %ld\n", PTR_ERR(rdev)); return -EINVAL; } if (mddev->persistent) rdev->sb_start = calc_dev_sboffset(rdev); else rdev->sb_start = bdev_nr_sectors(rdev->bdev); rdev->sectors = rdev->sb_start; if (test_bit(Faulty, &rdev->flags)) { pr_warn("md: can not hot-add faulty %pg disk to %s!\n", rdev->bdev, mdname(mddev)); err = -EINVAL; goto abort_export; } clear_bit(In_sync, &rdev->flags); rdev->desc_nr = -1; rdev->saved_raid_disk = -1; err = bind_rdev_to_array(rdev, mddev); if (err) goto abort_export; /* * The rest should better be atomic, we can have disk failures * noticed in interrupt contexts ... */ rdev->raid_disk = -1; set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); if (!mddev->thread) md_update_sb(mddev, 1); /* * Kick recovery, maybe this spare has to be added to the * array immediately. */ set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); md_new_event(); return 0; abort_export: export_rdev(rdev, mddev); return err; } static int set_bitmap_file(struct mddev *mddev, int fd) { int err = 0; if (mddev->pers) { if (!mddev->pers->quiesce || !mddev->thread) return -EBUSY; if (mddev->recovery || mddev->sync_thread) return -EBUSY; /* we should be able to change the bitmap.. */ } if (fd >= 0) { struct inode *inode; struct file *f; if (mddev->bitmap || mddev->bitmap_info.file) return -EEXIST; /* cannot add when bitmap is present */ if (!IS_ENABLED(CONFIG_MD_BITMAP_FILE)) { pr_warn("%s: bitmap files not supported by this kernel\n", mdname(mddev)); return -EINVAL; } pr_warn("%s: using deprecated bitmap file support\n", mdname(mddev)); f = fget(fd); if (f == NULL) { pr_warn("%s: error: failed to get bitmap file\n", mdname(mddev)); return -EBADF; } inode = f->f_mapping->host; if (!S_ISREG(inode->i_mode)) { pr_warn("%s: error: bitmap file must be a regular file\n", mdname(mddev)); err = -EBADF; } else if (!(f->f_mode & FMODE_WRITE)) { pr_warn("%s: error: bitmap file must open for write\n", mdname(mddev)); err = -EBADF; } else if (atomic_read(&inode->i_writecount) != 1) { pr_warn("%s: error: bitmap file is already in use\n", mdname(mddev)); err = -EBUSY; } if (err) { fput(f); return err; } mddev->bitmap_info.file = f; mddev->bitmap_info.offset = 0; /* file overrides offset */ } else if (mddev->bitmap == NULL) return -ENOENT; /* cannot remove what isn't there */ err = 0; if (mddev->pers) { if (fd >= 0) { err = mddev->bitmap_ops->create(mddev, -1); if (!err) err = mddev->bitmap_ops->load(mddev); if (err) { mddev->bitmap_ops->destroy(mddev); fd = -1; } } else if (fd < 0) { mddev->bitmap_ops->destroy(mddev); } } if (fd < 0) { struct file *f = mddev->bitmap_info.file; if (f) { spin_lock(&mddev->lock); mddev->bitmap_info.file = NULL; spin_unlock(&mddev->lock); fput(f); } } return err; } /* * md_set_array_info is used two different ways * The original usage is when creating a new array. * In this usage, raid_disks is > 0 and it together with * level, size, not_persistent,layout,chunksize determine the * shape of the array. * This will always create an array with a type-0.90.0 superblock. * The newer usage is when assembling an array. * In this case raid_disks will be 0, and the major_version field is * use to determine which style super-blocks are to be found on the devices. * The minor and patch _version numbers are also kept incase the * super_block handler wishes to interpret them. */ int md_set_array_info(struct mddev *mddev, struct mdu_array_info_s *info) { if (info->raid_disks == 0) { /* just setting version number for superblock loading */ if (info->major_version < 0 || info->major_version >= ARRAY_SIZE(super_types) || super_types[info->major_version].name == NULL) { /* maybe try to auto-load a module? */ pr_warn("md: superblock version %d not known\n", info->major_version); return -EINVAL; } mddev->major_version = info->major_version; mddev->minor_version = info->minor_version; mddev->patch_version = info->patch_version; mddev->persistent = !info->not_persistent; /* ensure mddev_put doesn't delete this now that there * is some minimal configuration. */ mddev->ctime = ktime_get_real_seconds(); return 0; } mddev->major_version = MD_MAJOR_VERSION; mddev->minor_version = MD_MINOR_VERSION; mddev->patch_version = MD_PATCHLEVEL_VERSION; mddev->ctime = ktime_get_real_seconds(); mddev->level = info->level; mddev->clevel[0] = 0; mddev->dev_sectors = 2 * (sector_t)info->size; mddev->raid_disks = info->raid_disks; /* don't set md_minor, it is determined by which /dev/md* was * openned */ if (info->state & (1<<MD_SB_CLEAN)) mddev->recovery_cp = MaxSector; else mddev->recovery_cp = 0; mddev->persistent = ! info->not_persistent; mddev->external = 0; mddev->layout = info->layout; if (mddev->level == 0) /* Cannot trust RAID0 layout info here */ mddev->layout = -1; mddev->chunk_sectors = info->chunk_size >> 9; if (mddev->persistent) { mddev->max_disks = MD_SB_DISKS; mddev->flags = 0; mddev->sb_flags = 0; } set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9; mddev->bitmap_info.default_space = 64*2 - (MD_SB_BYTES >> 9); mddev->bitmap_info.offset = 0; mddev->reshape_position = MaxSector; /* * Generate a 128 bit UUID */ get_random_bytes(mddev->uuid, 16); mddev->new_level = mddev->level; mddev->new_chunk_sectors = mddev->chunk_sectors; mddev->new_layout = mddev->layout; mddev->delta_disks = 0; mddev->reshape_backwards = 0; return 0; } void md_set_array_sectors(struct mddev *mddev, sector_t array_sectors) { lockdep_assert_held(&mddev->reconfig_mutex); if (mddev->external_size) return; mddev->array_sectors = array_sectors; } EXPORT_SYMBOL(md_set_array_sectors); static int update_size(struct mddev *mddev, sector_t num_sectors) { struct md_rdev *rdev; int rv; int fit = (num_sectors == 0); sector_t old_dev_sectors = mddev->dev_sectors; if (mddev->pers->resize == NULL) return -EINVAL; /* The "num_sectors" is the number of sectors of each device that * is used. This can only make sense for arrays with redundancy. * linear and raid0 always use whatever space is available. We can only * consider changing this number if no resync or reconstruction is * happening, and if the new size is acceptable. It must fit before the * sb_start or, if that is <data_offset, it must fit before the size * of each device. If num_sectors is zero, we find the largest size * that fits. */ if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) return -EBUSY; if (!md_is_rdwr(mddev)) return -EROFS; rdev_for_each(rdev, mddev) { sector_t avail = rdev->sectors; if (fit && (num_sectors == 0 || num_sectors > avail)) num_sectors = avail; if (avail < num_sectors) return -ENOSPC; } rv = mddev->pers->resize(mddev, num_sectors); if (!rv) { if (mddev_is_clustered(mddev)) md_cluster_ops->update_size(mddev, old_dev_sectors); else if (!mddev_is_dm(mddev)) set_capacity_and_notify(mddev->gendisk, mddev->array_sectors); } return rv; } static int update_raid_disks(struct mddev *mddev, int raid_disks) { int rv; struct md_rdev *rdev; /* change the number of raid disks */ if (mddev->pers->check_reshape == NULL) return -EINVAL; if (!md_is_rdwr(mddev)) return -EROFS; if (raid_disks <= 0 || (mddev->max_disks && raid_disks >= mddev->max_disks)) return -EINVAL; if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) || test_bit(MD_RESYNCING_REMOTE, &mddev->recovery) || mddev->reshape_position != MaxSector) return -EBUSY; rdev_for_each(rdev, mddev) { if (mddev->raid_disks < raid_disks && rdev->data_offset < rdev->new_data_offset) return -EINVAL; if (mddev->raid_disks > raid_disks && rdev->data_offset > rdev->new_data_offset) return -EINVAL; } mddev->delta_disks = raid_disks - mddev->raid_disks; if (mddev->delta_disks < 0) mddev->reshape_backwards = 1; else if (mddev->delta_disks > 0) mddev->reshape_backwards = 0; rv = mddev->pers->check_reshape(mddev); if (rv < 0) { mddev->delta_disks = 0; mddev->reshape_backwards = 0; } return rv; } /* * update_array_info is used to change the configuration of an * on-line array. * The version, ctime,level,size,raid_disks,not_persistent, layout,chunk_size * fields in the info are checked against the array. * Any differences that cannot be handled will cause an error. * Normally, only one change can be managed at a time. */ static int update_array_info(struct mddev *mddev, mdu_array_info_t *info) { int rv = 0; int cnt = 0; int state = 0; /* calculate expected state,ignoring low bits */ if (mddev->bitmap && mddev->bitmap_info.offset) state |= (1 << MD_SB_BITMAP_PRESENT); if (mddev->major_version != info->major_version || mddev->minor_version != info->minor_version || /* mddev->patch_version != info->patch_version || */ mddev->ctime != info->ctime || mddev->level != info->level || /* mddev->layout != info->layout || */ mddev->persistent != !info->not_persistent || mddev->chunk_sectors != info->chunk_size >> 9 || /* ignore bottom 8 bits of state, and allow SB_BITMAP_PRESENT to change */ ((state^info->state) & 0xfffffe00) ) return -EINVAL; /* Check there is only one change */ if (info->size >= 0 && mddev->dev_sectors / 2 != info->size) cnt++; if (mddev->raid_disks != info->raid_disks) cnt++; if (mddev->layout != info->layout) cnt++; if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT)) cnt++; if (cnt == 0) return 0; if (cnt > 1) return -EINVAL; if (mddev->layout != info->layout) { /* Change layout * we don't need to do anything at the md level, the * personality will take care of it all. */ if (mddev->pers->check_reshape == NULL) return -EINVAL; else { mddev->new_layout = info->layout; rv = mddev->pers->check_reshape(mddev); if (rv) mddev->new_layout = mddev->layout; return rv; } } if (info->size >= 0 && mddev->dev_sectors / 2 != info->size) rv = update_size(mddev, (sector_t)info->size * 2); if (mddev->raid_disks != info->raid_disks) rv = update_raid_disks(mddev, info->raid_disks); if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT)) { if (mddev->pers->quiesce == NULL || mddev->thread == NULL) { rv = -EINVAL; goto err; } if (mddev->recovery || mddev->sync_thread) { rv = -EBUSY; goto err; } if (info->state & (1<<MD_SB_BITMAP_PRESENT)) { /* add the bitmap */ if (mddev->bitmap) { rv = -EEXIST; goto err; } if (mddev->bitmap_info.default_offset == 0) { rv = -EINVAL; goto err; } mddev->bitmap_info.offset = mddev->bitmap_info.default_offset; mddev->bitmap_info.space = mddev->bitmap_info.default_space; rv = mddev->bitmap_ops->create(mddev, -1); if (!rv) rv = mddev->bitmap_ops->load(mddev); if (rv) mddev->bitmap_ops->destroy(mddev); } else { struct md_bitmap_stats stats; rv = mddev->bitmap_ops->get_stats(mddev->bitmap, &stats); if (rv) goto err; if (stats.file) { rv = -EINVAL; goto err; } if (mddev->bitmap_info.nodes) { /* hold PW on all the bitmap lock */ if (md_cluster_ops->lock_all_bitmaps(mddev) <= 0) { pr_warn("md: can't change bitmap to none since the array is in use by more than one node\n"); rv = -EPERM; md_cluster_ops->unlock_all_bitmaps(mddev); goto err; } mddev->bitmap_info.nodes = 0; md_cluster_ops->leave(mddev); module_put(md_cluster_mod); mddev->safemode_delay = DEFAULT_SAFEMODE_DELAY; } mddev->bitmap_ops->destroy(mddev); mddev->bitmap_info.offset = 0; } } md_update_sb(mddev, 1); return rv; err: return rv; } static int set_disk_faulty(struct mddev *mddev, dev_t dev) { struct md_rdev *rdev; int err = 0; if (mddev->pers == NULL) return -ENODEV; rcu_read_lock(); rdev = md_find_rdev_rcu(mddev, dev); if (!rdev) err = -ENODEV; else { md_error(mddev, rdev); if (test_bit(MD_BROKEN, &mddev->flags)) err = -EBUSY; } rcu_read_unlock(); return err; } /* * We have a problem here : there is no easy way to give a CHS * virtual geometry. We currently pretend that we have a 2 heads * 4 sectors (with a BIG number of cylinders...). This drives * dosfs just mad... ;-) */ static int md_getgeo(struct block_device *bdev, struct hd_geometry *geo) { struct mddev *mddev = bdev->bd_disk->private_data; geo->heads = 2; geo->sectors = 4; geo->cylinders = mddev->array_sectors / 8; return 0; } static inline int md_ioctl_valid(unsigned int cmd) { switch (cmd) { case GET_ARRAY_INFO: case GET_DISK_INFO: case RAID_VERSION: return 0; case ADD_NEW_DISK: case GET_BITMAP_FILE: case HOT_ADD_DISK: case HOT_REMOVE_DISK: case RESTART_ARRAY_RW: case RUN_ARRAY: case SET_ARRAY_INFO: case SET_BITMAP_FILE: case SET_DISK_FAULTY: case STOP_ARRAY: case STOP_ARRAY_RO: case CLUSTERED_DISK_NACK: if (!capable(CAP_SYS_ADMIN)) return -EACCES; return 0; default: return -ENOTTY; } } static bool md_ioctl_need_suspend(unsigned int cmd) { switch (cmd) { case ADD_NEW_DISK: case HOT_ADD_DISK: case HOT_REMOVE_DISK: case SET_BITMAP_FILE: case SET_ARRAY_INFO: return true; default: return false; } } static int __md_set_array_info(struct mddev *mddev, void __user *argp) { mdu_array_info_t info; int err; if (!argp) memset(&info, 0, sizeof(info)); else if (copy_from_user(&info, argp, sizeof(info))) return -EFAULT; if (mddev->pers) { err = update_array_info(mddev, &info); if (err) pr_warn("md: couldn't update array info. %d\n", err); return err; } if (!list_empty(&mddev->disks)) { pr_warn("md: array %s already has disks!\n", mdname(mddev)); return -EBUSY; } if (mddev->raid_disks) { pr_warn("md: array %s already initialised!\n", mdname(mddev)); return -EBUSY; } err = md_set_array_info(mddev, &info); if (err) pr_warn("md: couldn't set array info. %d\n", err); return err; } static int md_ioctl(struct block_device *bdev, blk_mode_t mode, unsigned int cmd, unsigned long arg) { int err = 0; void __user *argp = (void __user *)arg; struct mddev *mddev = NULL; err = md_ioctl_valid(cmd); if (err) return err; /* * Commands dealing with the RAID driver but not any * particular array: */ if (cmd == RAID_VERSION) return get_version(argp); /* * Commands creating/starting a new array: */ mddev = bdev->bd_disk->private_data; /* Some actions do not requires the mutex */ switch (cmd) { case GET_ARRAY_INFO: if (!mddev->raid_disks && !mddev->external) return -ENODEV; return get_array_info(mddev, argp); case GET_DISK_INFO: if (!mddev->raid_disks && !mddev->external) return -ENODEV; return get_disk_info(mddev, argp); case SET_DISK_FAULTY: return set_disk_faulty(mddev, new_decode_dev(arg)); case GET_BITMAP_FILE: return get_bitmap_file(mddev, argp); } if (cmd == STOP_ARRAY || cmd == STOP_ARRAY_RO) { /* Need to flush page cache, and ensure no-one else opens * and writes */ err = mddev_set_closing_and_sync_blockdev(mddev, 1); if (err) return err; } if (!md_is_rdwr(mddev)) flush_work(&mddev->sync_work); err = md_ioctl_need_suspend(cmd) ? mddev_suspend_and_lock(mddev) : mddev_lock(mddev); if (err) { pr_debug("md: ioctl lock interrupted, reason %d, cmd %d\n", err, cmd); goto out; } if (cmd == SET_ARRAY_INFO) { err = __md_set_array_info(mddev, argp); goto unlock; } /* * Commands querying/configuring an existing array: */ /* if we are not initialised yet, only ADD_NEW_DISK, STOP_ARRAY, * RUN_ARRAY, and GET_ and SET_BITMAP_FILE are allowed */ if ((!mddev->raid_disks && !mddev->external) && cmd != ADD_NEW_DISK && cmd != STOP_ARRAY && cmd != RUN_ARRAY && cmd != SET_BITMAP_FILE && cmd != GET_BITMAP_FILE) { err = -ENODEV; goto unlock; } /* * Commands even a read-only array can execute: */ switch (cmd) { case RESTART_ARRAY_RW: err = restart_array(mddev); goto unlock; case STOP_ARRAY: err = do_md_stop(mddev, 0); goto unlock; case STOP_ARRAY_RO: if (mddev->pers) err = md_set_readonly(mddev); goto unlock; case HOT_REMOVE_DISK: err = hot_remove_disk(mddev, new_decode_dev(arg)); goto unlock; case ADD_NEW_DISK: /* We can support ADD_NEW_DISK on read-only arrays * only if we are re-adding a preexisting device. * So require mddev->pers and MD_DISK_SYNC. */ if (mddev->pers) { mdu_disk_info_t info; if (copy_from_user(&info, argp, sizeof(info))) err = -EFAULT; else if (!(info.state & (1<<MD_DISK_SYNC))) /* Need to clear read-only for this */ break; else err = md_add_new_disk(mddev, &info); goto unlock; } break; } /* * The remaining ioctls are changing the state of the * superblock, so we do not allow them on read-only arrays. */ if (!md_is_rdwr(mddev) && mddev->pers) { if (mddev->ro != MD_AUTO_READ) { err = -EROFS; goto unlock; } mddev->ro = MD_RDWR; sysfs_notify_dirent_safe(mddev->sysfs_state); set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); /* mddev_unlock will wake thread */ /* If a device failed while we were read-only, we * need to make sure the metadata is updated now. */ if (test_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags)) { mddev_unlock(mddev); wait_event(mddev->sb_wait, !test_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags) && !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)); mddev_lock_nointr(mddev); } } switch (cmd) { case ADD_NEW_DISK: { mdu_disk_info_t info; if (copy_from_user(&info, argp, sizeof(info))) err = -EFAULT; else err = md_add_new_disk(mddev, &info); goto unlock; } case CLUSTERED_DISK_NACK: if (mddev_is_clustered(mddev)) md_cluster_ops->new_disk_ack(mddev, false); else err = -EINVAL; goto unlock; case HOT_ADD_DISK: err = hot_add_disk(mddev, new_decode_dev(arg)); goto unlock; case RUN_ARRAY: err = do_md_run(mddev); goto unlock; case SET_BITMAP_FILE: err = set_bitmap_file(mddev, (int)arg); goto unlock; default: err = -EINVAL; goto unlock; } unlock: if (mddev->hold_active == UNTIL_IOCTL && err != -EINVAL) mddev->hold_active = 0; md_ioctl_need_suspend(cmd) ? mddev_unlock_and_resume(mddev) : mddev_unlock(mddev); out: if (cmd == STOP_ARRAY_RO || (err && cmd == STOP_ARRAY)) clear_bit(MD_CLOSING, &mddev->flags); return err; } #ifdef CONFIG_COMPAT static int md_compat_ioctl(struct block_device *bdev, blk_mode_t mode, unsigned int cmd, unsigned long arg) { switch (cmd) { case HOT_REMOVE_DISK: case HOT_ADD_DISK: case SET_DISK_FAULTY: case SET_BITMAP_FILE: /* These take in integer arg, do not convert */ break; default: arg = (unsigned long)compat_ptr(arg); break; } return md_ioctl(bdev, mode, cmd, arg); } #endif /* CONFIG_COMPAT */ static int md_set_read_only(struct block_device *bdev, bool ro) { struct mddev *mddev = bdev->bd_disk->private_data; int err; err = mddev_lock(mddev); if (err) return err; if (!mddev->raid_disks && !mddev->external) { err = -ENODEV; goto out_unlock; } /* * Transitioning to read-auto need only happen for arrays that call * md_write_start and which are not ready for writes yet. */ if (!ro && mddev->ro == MD_RDONLY && mddev->pers) { err = restart_array(mddev); if (err) goto out_unlock; mddev->ro = MD_AUTO_READ; } out_unlock: mddev_unlock(mddev); return err; } static int md_open(struct gendisk *disk, blk_mode_t mode) { struct mddev *mddev; int err; spin_lock(&all_mddevs_lock); mddev = mddev_get(disk->private_data); spin_unlock(&all_mddevs_lock); if (!mddev) return -ENODEV; err = mutex_lock_interruptible(&mddev->open_mutex); if (err) goto out; err = -ENODEV; if (test_bit(MD_CLOSING, &mddev->flags)) goto out_unlock; atomic_inc(&mddev->openers); mutex_unlock(&mddev->open_mutex); disk_check_media_change(disk); return 0; out_unlock: mutex_unlock(&mddev->open_mutex); out: mddev_put(mddev); return err; } static void md_release(struct gendisk *disk) { struct mddev *mddev = disk->private_data; BUG_ON(!mddev); atomic_dec(&mddev->openers); mddev_put(mddev); } static unsigned int md_check_events(struct gendisk *disk, unsigned int clearing) { struct mddev *mddev = disk->private_data; unsigned int ret = 0; if (mddev->changed) ret = DISK_EVENT_MEDIA_CHANGE; mddev->changed = 0; return ret; } static void md_free_disk(struct gendisk *disk) { struct mddev *mddev = disk->private_data; mddev_free(mddev); } const struct block_device_operations md_fops = { .owner = THIS_MODULE, .submit_bio = md_submit_bio, .open = md_open, .release = md_release, .ioctl = md_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = md_compat_ioctl, #endif .getgeo = md_getgeo, .check_events = md_check_events, .set_read_only = md_set_read_only, .free_disk = md_free_disk, }; static int md_thread(void *arg) { struct md_thread *thread = arg; /* * md_thread is a 'system-thread', it's priority should be very * high. We avoid resource deadlocks individually in each * raid personality. (RAID5 does preallocation) We also use RR and * the very same RT priority as kswapd, thus we will never get * into a priority inversion deadlock. * * we definitely have to have equal or higher priority than * bdflush, otherwise bdflush will deadlock if there are too * many dirty RAID5 blocks. */ allow_signal(SIGKILL); while (!kthread_should_stop()) { /* We need to wait INTERRUPTIBLE so that * we don't add to the load-average. * That means we need to be sure no signals are * pending */ if (signal_pending(current)) flush_signals(current); wait_event_interruptible_timeout (thread->wqueue, test_bit(THREAD_WAKEUP, &thread->flags) || kthread_should_stop() || kthread_should_park(), thread->timeout); clear_bit(THREAD_WAKEUP, &thread->flags); if (kthread_should_park()) kthread_parkme(); if (!kthread_should_stop()) thread->run(thread); } return 0; } static void md_wakeup_thread_directly(struct md_thread __rcu *thread) { struct md_thread *t; rcu_read_lock(); t = rcu_dereference(thread); if (t) wake_up_process(t->tsk); rcu_read_unlock(); } void md_wakeup_thread(struct md_thread __rcu *thread) { struct md_thread *t; rcu_read_lock(); t = rcu_dereference(thread); if (t) { pr_debug("md: waking up MD thread %s.\n", t->tsk->comm); set_bit(THREAD_WAKEUP, &t->flags); if (wq_has_sleeper(&t->wqueue)) wake_up(&t->wqueue); } rcu_read_unlock(); } EXPORT_SYMBOL(md_wakeup_thread); struct md_thread *md_register_thread(void (*run) (struct md_thread *), struct mddev *mddev, const char *name) { struct md_thread *thread; thread = kzalloc(sizeof(struct md_thread), GFP_KERNEL); if (!thread) return NULL; init_waitqueue_head(&thread->wqueue); thread->run = run; thread->mddev = mddev; thread->timeout = MAX_SCHEDULE_TIMEOUT; thread->tsk = kthread_run(md_thread, thread, "%s_%s", mdname(thread->mddev), name); if (IS_ERR(thread->tsk)) { kfree(thread); return NULL; } return thread; } EXPORT_SYMBOL(md_register_thread); void md_unregister_thread(struct mddev *mddev, struct md_thread __rcu **threadp) { struct md_thread *thread = rcu_dereference_protected(*threadp, lockdep_is_held(&mddev->reconfig_mutex)); if (!thread) return; rcu_assign_pointer(*threadp, NULL); synchronize_rcu(); pr_debug("interrupting MD-thread pid %d\n", task_pid_nr(thread->tsk)); kthread_stop(thread->tsk); kfree(thread); } EXPORT_SYMBOL(md_unregister_thread); void md_error(struct mddev *mddev, struct md_rdev *rdev) { if (!rdev || test_bit(Faulty, &rdev->flags)) return; if (!mddev->pers || !mddev->pers->error_handler) return; mddev->pers->error_handler(mddev, rdev); if (mddev->pers->level == 0) return; if (mddev->degraded && !test_bit(MD_BROKEN, &mddev->flags)) set_bit(MD_RECOVERY_RECOVER, &mddev->recovery); sysfs_notify_dirent_safe(rdev->sysfs_state); set_bit(MD_RECOVERY_INTR, &mddev->recovery); if (!test_bit(MD_BROKEN, &mddev->flags)) { set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); md_wakeup_thread(mddev->thread); } if (mddev->event_work.func) queue_work(md_misc_wq, &mddev->event_work); md_new_event(); } EXPORT_SYMBOL(md_error); /* seq_file implementation /proc/mdstat */ static void status_unused(struct seq_file *seq) { int i = 0; struct md_rdev *rdev; seq_printf(seq, "unused devices: "); list_for_each_entry(rdev, &pending_raid_disks, same_set) { i++; seq_printf(seq, "%pg ", rdev->bdev); } if (!i) seq_printf(seq, "<none>"); seq_printf(seq, "\n"); } static void status_personalities(struct seq_file *seq) { struct md_personality *pers; seq_puts(seq, "Personalities : "); spin_lock(&pers_lock); list_for_each_entry(pers, &pers_list, list) seq_printf(seq, "[%s] ", pers->name); spin_unlock(&pers_lock); seq_puts(seq, "\n"); } static int status_resync(struct seq_file *seq, struct mddev *mddev) { sector_t max_sectors, resync, res; unsigned long dt, db = 0; sector_t rt, curr_mark_cnt, resync_mark_cnt; int scale, recovery_active; unsigned int per_milli; if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) || test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) max_sectors = mddev->resync_max_sectors; else max_sectors = mddev->dev_sectors; resync = mddev->curr_resync; if (resync < MD_RESYNC_ACTIVE) { if (test_bit(MD_RECOVERY_DONE, &mddev->recovery)) /* Still cleaning up */ resync = max_sectors; } else if (resync > max_sectors) { resync = max_sectors; } else { res = atomic_read(&mddev->recovery_active); /* * Resync has started, but the subtraction has overflowed or * yielded one of the special values. Force it to active to * ensure the status reports an active resync. */ if (resync < res || resync - res < MD_RESYNC_ACTIVE) resync = MD_RESYNC_ACTIVE; else resync -= res; } if (resync == MD_RESYNC_NONE) { if (test_bit(MD_RESYNCING_REMOTE, &mddev->recovery)) { struct md_rdev *rdev; rdev_for_each(rdev, mddev) if (rdev->raid_disk >= 0 && !test_bit(Faulty, &rdev->flags) && rdev->recovery_offset != MaxSector && rdev->recovery_offset) { seq_printf(seq, "\trecover=REMOTE"); return 1; } if (mddev->reshape_position != MaxSector) seq_printf(seq, "\treshape=REMOTE"); else seq_printf(seq, "\tresync=REMOTE"); return 1; } if (mddev->recovery_cp < MaxSector) { seq_printf(seq, "\tresync=PENDING"); return 1; } return 0; } if (resync < MD_RESYNC_ACTIVE) { seq_printf(seq, "\tresync=DELAYED"); return 1; } WARN_ON(max_sectors == 0); /* Pick 'scale' such that (resync>>scale)*1000 will fit * in a sector_t, and (max_sectors>>scale) will fit in a * u32, as those are the requirements for sector_div. * Thus 'scale' must be at least 10 */ scale = 10; if (sizeof(sector_t) > sizeof(unsigned long)) { while ( max_sectors/2 > (1ULL<<(scale+32))) scale++; } res = (resync>>scale)*1000; sector_div(res, (u32)((max_sectors>>scale)+1)); per_milli = res; { int i, x = per_milli/50, y = 20-x; seq_printf(seq, "["); for (i = 0; i < x; i++) seq_printf(seq, "="); seq_printf(seq, ">"); for (i = 0; i < y; i++) seq_printf(seq, "."); seq_printf(seq, "] "); } seq_printf(seq, " %s =%3u.%u%% (%llu/%llu)", (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)? "reshape" : (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)? "check" : (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ? "resync" : "recovery"))), per_milli/10, per_milli % 10, (unsigned long long) resync/2, (unsigned long long) max_sectors/2); /* * dt: time from mark until now * db: blocks written from mark until now * rt: remaining time * * rt is a sector_t, which is always 64bit now. We are keeping * the original algorithm, but it is not really necessary. * * Original algorithm: * So we divide before multiply in case it is 32bit and close * to the limit. * We scale the divisor (db) by 32 to avoid losing precision * near the end of resync when the number of remaining sectors * is close to 'db'. * We then divide rt by 32 after multiplying by db to compensate. * The '+1' avoids division by zero if db is very small. */ dt = ((jiffies - mddev->resync_mark) / HZ); if (!dt) dt++; curr_mark_cnt = mddev->curr_mark_cnt; recovery_active = atomic_read(&mddev->recovery_active); resync_mark_cnt = mddev->resync_mark_cnt; if (curr_mark_cnt >= (recovery_active + resync_mark_cnt)) db = curr_mark_cnt - (recovery_active + resync_mark_cnt); rt = max_sectors - resync; /* number of remaining sectors */ rt = div64_u64(rt, db/32+1); rt *= dt; rt >>= 5; seq_printf(seq, " finish=%lu.%lumin", (unsigned long)rt / 60, ((unsigned long)rt % 60)/6); seq_printf(seq, " speed=%ldK/sec", db/2/dt); return 1; } static void *md_seq_start(struct seq_file *seq, loff_t *pos) __acquires(&all_mddevs_lock) { seq->poll_event = atomic_read(&md_event_count); spin_lock(&all_mddevs_lock); return seq_list_start_head(&all_mddevs, *pos); } static void *md_seq_next(struct seq_file *seq, void *v, loff_t *pos) { return seq_list_next(v, &all_mddevs, pos); } static void md_seq_stop(struct seq_file *seq, void *v) __releases(&all_mddevs_lock) { spin_unlock(&all_mddevs_lock); } static void md_bitmap_status(struct seq_file *seq, struct mddev *mddev) { struct md_bitmap_stats stats; unsigned long used_pages; unsigned long chunk_kb; int err; err = mddev->bitmap_ops->get_stats(mddev->bitmap, &stats); if (err) return; chunk_kb = mddev->bitmap_info.chunksize >> 10; used_pages = stats.pages - stats.missing_pages; seq_printf(seq, "bitmap: %lu/%lu pages [%luKB], %lu%s chunk", used_pages, stats.pages, used_pages << (PAGE_SHIFT - 10), chunk_kb ? chunk_kb : mddev->bitmap_info.chunksize, chunk_kb ? "KB" : "B"); if (stats.file) { seq_puts(seq, ", file: "); seq_file_path(seq, stats.file, " \t\n"); } seq_putc(seq, '\n'); } static int md_seq_show(struct seq_file *seq, void *v) { struct mddev *mddev; sector_t sectors; struct md_rdev *rdev; if (v == &all_mddevs) { status_personalities(seq); if (list_empty(&all_mddevs)) status_unused(seq); return 0; } mddev = list_entry(v, struct mddev, all_mddevs); if (!mddev_get(mddev)) return 0; spin_unlock(&all_mddevs_lock); spin_lock(&mddev->lock); if (mddev->pers || mddev->raid_disks || !list_empty(&mddev->disks)) { seq_printf(seq, "%s : ", mdname(mddev)); if (mddev->pers) { if (test_bit(MD_BROKEN, &mddev->flags)) seq_printf(seq, "broken"); else seq_printf(seq, "active"); if (mddev->ro == MD_RDONLY) seq_printf(seq, " (read-only)"); if (mddev->ro == MD_AUTO_READ) seq_printf(seq, " (auto-read-only)"); seq_printf(seq, " %s", mddev->pers->name); } else { seq_printf(seq, "inactive"); } sectors = 0; rcu_read_lock(); rdev_for_each_rcu(rdev, mddev) { seq_printf(seq, " %pg[%d]", rdev->bdev, rdev->desc_nr); if (test_bit(WriteMostly, &rdev->flags)) seq_printf(seq, "(W)"); if (test_bit(Journal, &rdev->flags)) seq_printf(seq, "(J)"); if (test_bit(Faulty, &rdev->flags)) { seq_printf(seq, "(F)"); continue; } if (rdev->raid_disk < 0) seq_printf(seq, "(S)"); /* spare */ if (test_bit(Replacement, &rdev->flags)) seq_printf(seq, "(R)"); sectors += rdev->sectors; } rcu_read_unlock(); if (!list_empty(&mddev->disks)) { if (mddev->pers) seq_printf(seq, "\n %llu blocks", (unsigned long long) mddev->array_sectors / 2); else seq_printf(seq, "\n %llu blocks", (unsigned long long)sectors / 2); } if (mddev->persistent) { if (mddev->major_version != 0 || mddev->minor_version != 90) { seq_printf(seq," super %d.%d", mddev->major_version, mddev->minor_version); } } else if (mddev->external) seq_printf(seq, " super external:%s", mddev->metadata_type); else seq_printf(seq, " super non-persistent"); if (mddev->pers) { mddev->pers->status(seq, mddev); seq_printf(seq, "\n "); if (mddev->pers->sync_request) { if (status_resync(seq, mddev)) seq_printf(seq, "\n "); } } else seq_printf(seq, "\n "); md_bitmap_status(seq, mddev); seq_printf(seq, "\n"); } spin_unlock(&mddev->lock); spin_lock(&all_mddevs_lock); if (mddev == list_last_entry(&all_mddevs, struct mddev, all_mddevs)) status_unused(seq); if (atomic_dec_and_test(&mddev->active)) __mddev_put(mddev); return 0; } static const struct seq_operations md_seq_ops = { .start = md_seq_start, .next = md_seq_next, .stop = md_seq_stop, .show = md_seq_show, }; static int md_seq_open(struct inode *inode, struct file *file) { struct seq_file *seq; int error; error = seq_open(file, &md_seq_ops); if (error) return error; seq = file->private_data; seq->poll_event = atomic_read(&md_event_count); return error; } static int md_unloading; static __poll_t mdstat_poll(struct file *filp, poll_table *wait) { struct seq_file *seq = filp->private_data; __poll_t mask; if (md_unloading) return EPOLLIN|EPOLLRDNORM|EPOLLERR|EPOLLPRI; poll_wait(filp, &md_event_waiters, wait); /* always allow read */ mask = EPOLLIN | EPOLLRDNORM; if (seq->poll_event != atomic_read(&md_event_count)) mask |= EPOLLERR | EPOLLPRI; return mask; } static const struct proc_ops mdstat_proc_ops = { .proc_open = md_seq_open, .proc_read = seq_read, .proc_lseek = seq_lseek, .proc_release = seq_release, .proc_poll = mdstat_poll, }; int register_md_personality(struct md_personality *p) { pr_debug("md: %s personality registered for level %d\n", p->name, p->level); spin_lock(&pers_lock); list_add_tail(&p->list, &pers_list); spin_unlock(&pers_lock); return 0; } EXPORT_SYMBOL(register_md_personality); int unregister_md_personality(struct md_personality *p) { pr_debug("md: %s personality unregistered\n", p->name); spin_lock(&pers_lock); list_del_init(&p->list); spin_unlock(&pers_lock); return 0; } EXPORT_SYMBOL(unregister_md_personality); int register_md_cluster_operations(const struct md_cluster_operations *ops, struct module *module) { int ret = 0; spin_lock(&pers_lock); if (md_cluster_ops != NULL) ret = -EALREADY; else { md_cluster_ops = ops; md_cluster_mod = module; } spin_unlock(&pers_lock); return ret; } EXPORT_SYMBOL(register_md_cluster_operations); int unregister_md_cluster_operations(void) { spin_lock(&pers_lock); md_cluster_ops = NULL; spin_unlock(&pers_lock); return 0; } EXPORT_SYMBOL(unregister_md_cluster_operations); int md_setup_cluster(struct mddev *mddev, int nodes) { int ret; if (!md_cluster_ops) request_module("md-cluster"); spin_lock(&pers_lock); /* ensure module won't be unloaded */ if (!md_cluster_ops || !try_module_get(md_cluster_mod)) { pr_warn("can't find md-cluster module or get its reference.\n"); spin_unlock(&pers_lock); return -ENOENT; } spin_unlock(&pers_lock); ret = md_cluster_ops->join(mddev, nodes); if (!ret) mddev->safemode_delay = 0; return ret; } void md_cluster_stop(struct mddev *mddev) { if (!md_cluster_ops) return; md_cluster_ops->leave(mddev); module_put(md_cluster_mod); } static int is_mddev_idle(struct mddev *mddev, int init) { struct md_rdev *rdev; int idle; int curr_events; idle = 1; rcu_read_lock(); rdev_for_each_rcu(rdev, mddev) { struct gendisk *disk = rdev->bdev->bd_disk; if (!init && !blk_queue_io_stat(disk->queue)) continue; curr_events = (int)part_stat_read_accum(disk->part0, sectors) - atomic_read(&disk->sync_io); /* sync IO will cause sync_io to increase before the disk_stats * as sync_io is counted when a request starts, and * disk_stats is counted when it completes. * So resync activity will cause curr_events to be smaller than * when there was no such activity. * non-sync IO will cause disk_stat to increase without * increasing sync_io so curr_events will (eventually) * be larger than it was before. Once it becomes * substantially larger, the test below will cause * the array to appear non-idle, and resync will slow * down. * If there is a lot of outstanding resync activity when * we set last_event to curr_events, then all that activity * completing might cause the array to appear non-idle * and resync will be slowed down even though there might * not have been non-resync activity. This will only * happen once though. 'last_events' will soon reflect * the state where there is little or no outstanding * resync requests, and further resync activity will * always make curr_events less than last_events. * */ if (init || curr_events - rdev->last_events > 64) { rdev->last_events = curr_events; idle = 0; } } rcu_read_unlock(); return idle; } void md_done_sync(struct mddev *mddev, int blocks, int ok) { /* another "blocks" (512byte) blocks have been synced */ atomic_sub(blocks, &mddev->recovery_active); wake_up(&mddev->recovery_wait); if (!ok) { set_bit(MD_RECOVERY_INTR, &mddev->recovery); set_bit(MD_RECOVERY_ERROR, &mddev->recovery); md_wakeup_thread(mddev->thread); // stop recovery, signal do_sync .... } } EXPORT_SYMBOL(md_done_sync); /* md_write_start(mddev, bi) * If we need to update some array metadata (e.g. 'active' flag * in superblock) before writing, schedule a superblock update * and wait for it to complete. * A return value of 'false' means that the write wasn't recorded * and cannot proceed as the array is being suspend. */ void md_write_start(struct mddev *mddev, struct bio *bi) { int did_change = 0; if (bio_data_dir(bi) != WRITE) return; BUG_ON(mddev->ro == MD_RDONLY); if (mddev->ro == MD_AUTO_READ) { /* need to switch to read/write */ mddev->ro = MD_RDWR; set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); md_wakeup_thread(mddev->thread); md_wakeup_thread(mddev->sync_thread); did_change = 1; } rcu_read_lock(); percpu_ref_get(&mddev->writes_pending); smp_mb(); /* Match smp_mb in set_in_sync() */ if (mddev->safemode == 1) mddev->safemode = 0; /* sync_checkers is always 0 when writes_pending is in per-cpu mode */ if (mddev->in_sync || mddev->sync_checkers) { spin_lock(&mddev->lock); if (mddev->in_sync) { mddev->in_sync = 0; set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags); set_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags); md_wakeup_thread(mddev->thread); did_change = 1; } spin_unlock(&mddev->lock); } rcu_read_unlock(); if (did_change) sysfs_notify_dirent_safe(mddev->sysfs_state); if (!mddev->has_superblocks) return; wait_event(mddev->sb_wait, !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)); } EXPORT_SYMBOL(md_write_start); /* md_write_inc can only be called when md_write_start() has * already been called at least once of the current request. * It increments the counter and is useful when a single request * is split into several parts. Each part causes an increment and * so needs a matching md_write_end(). * Unlike md_write_start(), it is safe to call md_write_inc() inside * a spinlocked region. */ void md_write_inc(struct mddev *mddev, struct bio *bi) { if (bio_data_dir(bi) != WRITE) return; WARN_ON_ONCE(mddev->in_sync || !md_is_rdwr(mddev)); percpu_ref_get(&mddev->writes_pending); } EXPORT_SYMBOL(md_write_inc); void md_write_end(struct mddev *mddev) { percpu_ref_put(&mddev->writes_pending); if (mddev->safemode == 2) md_wakeup_thread(mddev->thread); else if (mddev->safemode_delay) /* The roundup() ensures this only performs locking once * every ->safemode_delay jiffies */ mod_timer(&mddev->safemode_timer, roundup(jiffies, mddev->safemode_delay) + mddev->safemode_delay); } EXPORT_SYMBOL(md_write_end); /* This is used by raid0 and raid10 */ void md_submit_discard_bio(struct mddev *mddev, struct md_rdev *rdev, struct bio *bio, sector_t start, sector_t size) { struct bio *discard_bio = NULL; if (__blkdev_issue_discard(rdev->bdev, start, size, GFP_NOIO, &discard_bio) || !discard_bio) return; bio_chain(discard_bio, bio); bio_clone_blkg_association(discard_bio, bio); mddev_trace_remap(mddev, discard_bio, bio->bi_iter.bi_sector); submit_bio_noacct(discard_bio); } EXPORT_SYMBOL_GPL(md_submit_discard_bio); static void md_end_clone_io(struct bio *bio) { struct md_io_clone *md_io_clone = bio->bi_private; struct bio *orig_bio = md_io_clone->orig_bio; struct mddev *mddev = md_io_clone->mddev; if (bio->bi_status && !orig_bio->bi_status) orig_bio->bi_status = bio->bi_status; if (md_io_clone->start_time) bio_end_io_acct(orig_bio, md_io_clone->start_time); bio_put(bio); bio_endio(orig_bio); percpu_ref_put(&mddev->active_io); } static void md_clone_bio(struct mddev *mddev, struct bio **bio) { struct block_device *bdev = (*bio)->bi_bdev; struct md_io_clone *md_io_clone; struct bio *clone = bio_alloc_clone(bdev, *bio, GFP_NOIO, &mddev->io_clone_set); md_io_clone = container_of(clone, struct md_io_clone, bio_clone); md_io_clone->orig_bio = *bio; md_io_clone->mddev = mddev; if (blk_queue_io_stat(bdev->bd_disk->queue)) md_io_clone->start_time = bio_start_io_acct(*bio); clone->bi_end_io = md_end_clone_io; clone->bi_private = md_io_clone; *bio = clone; } void md_account_bio(struct mddev *mddev, struct bio **bio) { percpu_ref_get(&mddev->active_io); md_clone_bio(mddev, bio); } EXPORT_SYMBOL_GPL(md_account_bio); void md_free_cloned_bio(struct bio *bio) { struct md_io_clone *md_io_clone = bio->bi_private; struct bio *orig_bio = md_io_clone->orig_bio; struct mddev *mddev = md_io_clone->mddev; if (bio->bi_status && !orig_bio->bi_status) orig_bio->bi_status = bio->bi_status; if (md_io_clone->start_time) bio_end_io_acct(orig_bio, md_io_clone->start_time); bio_put(bio); percpu_ref_put(&mddev->active_io); } EXPORT_SYMBOL_GPL(md_free_cloned_bio); /* md_allow_write(mddev) * Calling this ensures that the array is marked 'active' so that writes * may proceed without blocking. It is important to call this before * attempting a GFP_KERNEL allocation while holding the mddev lock. * Must be called with mddev_lock held. */ void md_allow_write(struct mddev *mddev) { if (!mddev->pers) return; if (!md_is_rdwr(mddev)) return; if (!mddev->pers->sync_request) return; spin_lock(&mddev->lock); if (mddev->in_sync) { mddev->in_sync = 0; set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags); set_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags); if (mddev->safemode_delay && mddev->safemode == 0) mddev->safemode = 1; spin_unlock(&mddev->lock); md_update_sb(mddev, 0); sysfs_notify_dirent_safe(mddev->sysfs_state); /* wa