3 3 3 3 1 1 1 6 4 1 2 1 1 1 2 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 | // SPDX-License-Identifier: GPL-2.0-only /* * * Authors: * Alexander Aring <aar@pengutronix.de> * * Based on: net/mac80211/cfg.c */ #include <net/rtnetlink.h> #include <net/cfg802154.h> #include "ieee802154_i.h" #include "driver-ops.h" #include "cfg.h" static struct net_device * ieee802154_add_iface_deprecated(struct wpan_phy *wpan_phy, const char *name, unsigned char name_assign_type, int type) { struct ieee802154_local *local = wpan_phy_priv(wpan_phy); struct net_device *dev; rtnl_lock(); dev = ieee802154_if_add(local, name, name_assign_type, type, cpu_to_le64(0x0000000000000000ULL)); rtnl_unlock(); return dev; } static void ieee802154_del_iface_deprecated(struct wpan_phy *wpan_phy, struct net_device *dev) { struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev); ieee802154_if_remove(sdata); } #ifdef CONFIG_PM static int ieee802154_suspend(struct wpan_phy *wpan_phy) { struct ieee802154_local *local = wpan_phy_priv(wpan_phy); if (!local->open_count) goto suspend; ieee802154_sync_and_hold_queue(local); synchronize_net(); /* stop hardware - this must stop RX */ ieee802154_stop_device(local); suspend: local->suspended = true; return 0; } static int ieee802154_resume(struct wpan_phy *wpan_phy) { struct ieee802154_local *local = wpan_phy_priv(wpan_phy); int ret; /* nothing to do if HW shouldn't run */ if (!local->open_count) goto wake_up; /* restart hardware */ ret = drv_start(local, local->phy->filtering, &local->addr_filt); if (ret) return ret; wake_up: ieee802154_release_queue(local); local->suspended = false; return 0; } #else #define ieee802154_suspend NULL #define ieee802154_resume NULL #endif static int ieee802154_add_iface(struct wpan_phy *phy, const char *name, unsigned char name_assign_type, enum nl802154_iftype type, __le64 extended_addr) { struct ieee802154_local *local = wpan_phy_priv(phy); struct net_device *err; err = ieee802154_if_add(local, name, name_assign_type, type, extended_addr); return PTR_ERR_OR_ZERO(err); } static int ieee802154_del_iface(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev) { ieee802154_if_remove(IEEE802154_WPAN_DEV_TO_SUB_IF(wpan_dev)); return 0; } static int ieee802154_set_channel(struct wpan_phy *wpan_phy, u8 page, u8 channel) { struct ieee802154_local *local = wpan_phy_priv(wpan_phy); int ret; ASSERT_RTNL(); if (wpan_phy->current_page == page && wpan_phy->current_channel == channel) return 0; /* Refuse to change channels during scanning or beaconing */ if (mac802154_is_scanning(local) || mac802154_is_beaconing(local)) return -EBUSY; ret = drv_set_channel(local, page, channel); if (!ret) { wpan_phy->current_page = page; wpan_phy->current_channel = channel; ieee802154_configure_durations(wpan_phy, page, channel); } return ret; } static int ieee802154_set_cca_mode(struct wpan_phy *wpan_phy, const struct wpan_phy_cca *cca) { struct ieee802154_local *local = wpan_phy_priv(wpan_phy); int ret; ASSERT_RTNL(); if (wpan_phy_cca_cmp(&wpan_phy->cca, cca)) return 0; ret = drv_set_cca_mode(local, cca); if (!ret) wpan_phy->cca = *cca; return ret; } static int ieee802154_set_cca_ed_level(struct wpan_phy *wpan_phy, s32 ed_level) { struct ieee802154_local *local = wpan_phy_priv(wpan_phy); int ret; ASSERT_RTNL(); if (wpan_phy->cca_ed_level == ed_level) return 0; ret = drv_set_cca_ed_level(local, ed_level); if (!ret) wpan_phy->cca_ed_level = ed_level; return ret; } static int ieee802154_set_tx_power(struct wpan_phy *wpan_phy, s32 power) { struct ieee802154_local *local = wpan_phy_priv(wpan_phy); int ret; ASSERT_RTNL(); if (wpan_phy->transmit_power == power) return 0; ret = drv_set_tx_power(local, power); if (!ret) wpan_phy->transmit_power = power; return ret; } static int ieee802154_set_pan_id(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev, __le16 pan_id) { int ret; ASSERT_RTNL(); if (wpan_dev->pan_id == pan_id) return 0; ret = mac802154_wpan_update_llsec(wpan_dev->netdev); if (!ret) wpan_dev->pan_id = pan_id; return ret; } static int ieee802154_set_backoff_exponent(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev, u8 min_be, u8 max_be) { ASSERT_RTNL(); wpan_dev->min_be = min_be; wpan_dev->max_be = max_be; return 0; } static int ieee802154_set_short_addr(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev, __le16 short_addr) { ASSERT_RTNL(); wpan_dev->short_addr = short_addr; return 0; } static int ieee802154_set_max_csma_backoffs(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev, u8 max_csma_backoffs) { ASSERT_RTNL(); wpan_dev->csma_retries = max_csma_backoffs; return 0; } static int ieee802154_set_max_frame_retries(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev, s8 max_frame_retries) { ASSERT_RTNL(); wpan_dev->frame_retries = max_frame_retries; return 0; } static int ieee802154_set_lbt_mode(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev, bool mode) { ASSERT_RTNL(); wpan_dev->lbt = mode; return 0; } static int ieee802154_set_ackreq_default(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev, bool ackreq) { ASSERT_RTNL(); wpan_dev->ackreq = ackreq; return 0; } static int mac802154_trigger_scan(struct wpan_phy *wpan_phy, struct cfg802154_scan_request *request) { struct ieee802154_sub_if_data *sdata; sdata = IEEE802154_WPAN_DEV_TO_SUB_IF(request->wpan_dev); ASSERT_RTNL(); return mac802154_trigger_scan_locked(sdata, request); } static int mac802154_abort_scan(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev) { struct ieee802154_local *local = wpan_phy_priv(wpan_phy); struct ieee802154_sub_if_data *sdata; sdata = IEEE802154_WPAN_DEV_TO_SUB_IF(wpan_dev); ASSERT_RTNL(); return mac802154_abort_scan_locked(local, sdata); } static int mac802154_send_beacons(struct wpan_phy *wpan_phy, struct cfg802154_beacon_request *request) { struct ieee802154_sub_if_data *sdata; sdata = IEEE802154_WPAN_DEV_TO_SUB_IF(request->wpan_dev); ASSERT_RTNL(); return mac802154_send_beacons_locked(sdata, request); } static int mac802154_stop_beacons(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev) { struct ieee802154_local *local = wpan_phy_priv(wpan_phy); struct ieee802154_sub_if_data *sdata; sdata = IEEE802154_WPAN_DEV_TO_SUB_IF(wpan_dev); ASSERT_RTNL(); return mac802154_stop_beacons_locked(local, sdata); } static int mac802154_associate(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev, struct ieee802154_addr *coord) { struct ieee802154_local *local = wpan_phy_priv(wpan_phy); u64 ceaddr = swab64((__force u64)coord->extended_addr); struct ieee802154_sub_if_data *sdata; struct ieee802154_pan_device *parent; __le16 short_addr; int ret; ASSERT_RTNL(); sdata = IEEE802154_WPAN_DEV_TO_SUB_IF(wpan_dev); if (wpan_dev->parent) { dev_err(&sdata->dev->dev, "Device %8phC is already associated\n", &ceaddr); return -EPERM; } if (coord->mode == IEEE802154_SHORT_ADDRESSING) return -EINVAL; parent = kzalloc(sizeof(*parent), GFP_KERNEL); if (!parent) return -ENOMEM; parent->pan_id = coord->pan_id; parent->mode = coord->mode; parent->extended_addr = coord->extended_addr; parent->short_addr = cpu_to_le16(IEEE802154_ADDR_SHORT_BROADCAST); /* Set the PAN ID hardware address filter beforehand to avoid dropping * the association response with a destination PAN ID field set to the * "new" PAN ID. */ if (local->hw.flags & IEEE802154_HW_AFILT) { ret = drv_set_pan_id(local, coord->pan_id); if (ret < 0) goto free_parent; } ret = mac802154_perform_association(sdata, parent, &short_addr); if (ret) goto reset_panid; if (local->hw.flags & IEEE802154_HW_AFILT) { ret = drv_set_short_addr(local, short_addr); if (ret < 0) goto reset_panid; } wpan_dev->pan_id = coord->pan_id; wpan_dev->short_addr = short_addr; wpan_dev->parent = parent; return 0; reset_panid: if (local->hw.flags & IEEE802154_HW_AFILT) drv_set_pan_id(local, cpu_to_le16(IEEE802154_PAN_ID_BROADCAST)); free_parent: kfree(parent); return ret; } static int mac802154_disassociate_from_parent(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev) { struct ieee802154_local *local = wpan_phy_priv(wpan_phy); struct ieee802154_pan_device *child, *tmp; struct ieee802154_sub_if_data *sdata; unsigned int max_assoc; u64 eaddr; int ret; sdata = IEEE802154_WPAN_DEV_TO_SUB_IF(wpan_dev); /* Start by disassociating all the children and preventing new ones to * attempt associations. */ max_assoc = cfg802154_set_max_associations(wpan_dev, 0); list_for_each_entry_safe(child, tmp, &wpan_dev->children, node) { ret = mac802154_send_disassociation_notif(sdata, child, IEEE802154_COORD_WISHES_DEVICE_TO_LEAVE); if (ret) { eaddr = swab64((__force u64)child->extended_addr); dev_err(&sdata->dev->dev, "Disassociation with %8phC may have failed (%d)\n", &eaddr, ret); } list_del(&child->node); } ret = mac802154_send_disassociation_notif(sdata, wpan_dev->parent, IEEE802154_DEVICE_WISHES_TO_LEAVE); if (ret) { eaddr = swab64((__force u64)wpan_dev->parent->extended_addr); dev_err(&sdata->dev->dev, "Disassociation from %8phC may have failed (%d)\n", &eaddr, ret); } ret = 0; kfree(wpan_dev->parent); wpan_dev->parent = NULL; wpan_dev->pan_id = cpu_to_le16(IEEE802154_PAN_ID_BROADCAST); wpan_dev->short_addr = cpu_to_le16(IEEE802154_ADDR_SHORT_BROADCAST); if (local->hw.flags & IEEE802154_HW_AFILT) { ret = drv_set_pan_id(local, wpan_dev->pan_id); if (ret < 0) goto reset_mac_assoc; ret = drv_set_short_addr(local, wpan_dev->short_addr); if (ret < 0) goto reset_mac_assoc; } reset_mac_assoc: cfg802154_set_max_associations(wpan_dev, max_assoc); return ret; } static int mac802154_disassociate_child(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev, struct ieee802154_pan_device *child) { struct ieee802154_sub_if_data *sdata; int ret; sdata = IEEE802154_WPAN_DEV_TO_SUB_IF(wpan_dev); ret = mac802154_send_disassociation_notif(sdata, child, IEEE802154_COORD_WISHES_DEVICE_TO_LEAVE); if (ret) return ret; list_del(&child->node); wpan_dev->nchildren--; kfree(child); return 0; } static int mac802154_disassociate(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev, struct ieee802154_addr *target) { u64 teaddr = swab64((__force u64)target->extended_addr); struct ieee802154_pan_device *pan_device; ASSERT_RTNL(); if (cfg802154_device_is_parent(wpan_dev, target)) return mac802154_disassociate_from_parent(wpan_phy, wpan_dev); pan_device = cfg802154_device_is_child(wpan_dev, target); if (pan_device) return mac802154_disassociate_child(wpan_phy, wpan_dev, pan_device); dev_err(&wpan_dev->netdev->dev, "Device %8phC is not associated with us\n", &teaddr); return -EINVAL; } #ifdef CONFIG_IEEE802154_NL802154_EXPERIMENTAL static void ieee802154_get_llsec_table(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev, struct ieee802154_llsec_table **table) { struct net_device *dev = wpan_dev->netdev; struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev); *table = &sdata->sec.table; } static void ieee802154_lock_llsec_table(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev) { struct net_device *dev = wpan_dev->netdev; struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev); mutex_lock(&sdata->sec_mtx); } static void ieee802154_unlock_llsec_table(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev) { struct net_device *dev = wpan_dev->netdev; struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev); mutex_unlock(&sdata->sec_mtx); } static int ieee802154_set_llsec_params(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev, const struct ieee802154_llsec_params *params, int changed) { struct net_device *dev = wpan_dev->netdev; struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev); int res; mutex_lock(&sdata->sec_mtx); res = mac802154_llsec_set_params(&sdata->sec, params, changed); mutex_unlock(&sdata->sec_mtx); return res; } static int ieee802154_get_llsec_params(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev, struct ieee802154_llsec_params *params) { struct net_device *dev = wpan_dev->netdev; struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev); int res; mutex_lock(&sdata->sec_mtx); res = mac802154_llsec_get_params(&sdata->sec, params); mutex_unlock(&sdata->sec_mtx); return res; } static int ieee802154_add_llsec_key(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev, const struct ieee802154_llsec_key_id *id, const struct ieee802154_llsec_key *key) { struct net_device *dev = wpan_dev->netdev; struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev); int res; mutex_lock(&sdata->sec_mtx); res = mac802154_llsec_key_add(&sdata->sec, id, key); mutex_unlock(&sdata->sec_mtx); return res; } static int ieee802154_del_llsec_key(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev, const struct ieee802154_llsec_key_id *id) { struct net_device *dev = wpan_dev->netdev; struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev); int res; mutex_lock(&sdata->sec_mtx); res = mac802154_llsec_key_del(&sdata->sec, id); mutex_unlock(&sdata->sec_mtx); return res; } static int ieee802154_add_seclevel(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev, const struct ieee802154_llsec_seclevel *sl) { struct net_device *dev = wpan_dev->netdev; struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev); int res; mutex_lock(&sdata->sec_mtx); res = mac802154_llsec_seclevel_add(&sdata->sec, sl); mutex_unlock(&sdata->sec_mtx); return res; } static int ieee802154_del_seclevel(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev, const struct ieee802154_llsec_seclevel *sl) { struct net_device *dev = wpan_dev->netdev; struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev); int res; mutex_lock(&sdata->sec_mtx); res = mac802154_llsec_seclevel_del(&sdata->sec, sl); mutex_unlock(&sdata->sec_mtx); return res; } static int ieee802154_add_device(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev, const struct ieee802154_llsec_device *dev_desc) { struct net_device *dev = wpan_dev->netdev; struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev); int res; mutex_lock(&sdata->sec_mtx); res = mac802154_llsec_dev_add(&sdata->sec, dev_desc); mutex_unlock(&sdata->sec_mtx); return res; } static int ieee802154_del_device(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev, __le64 extended_addr) { struct net_device *dev = wpan_dev->netdev; struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev); int res; mutex_lock(&sdata->sec_mtx); res = mac802154_llsec_dev_del(&sdata->sec, extended_addr); mutex_unlock(&sdata->sec_mtx); return res; } static int ieee802154_add_devkey(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev, __le64 extended_addr, const struct ieee802154_llsec_device_key *key) { struct net_device *dev = wpan_dev->netdev; struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev); int res; mutex_lock(&sdata->sec_mtx); res = mac802154_llsec_devkey_add(&sdata->sec, extended_addr, key); mutex_unlock(&sdata->sec_mtx); return res; } static int ieee802154_del_devkey(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev, __le64 extended_addr, const struct ieee802154_llsec_device_key *key) { struct net_device *dev = wpan_dev->netdev; struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev); int res; mutex_lock(&sdata->sec_mtx); res = mac802154_llsec_devkey_del(&sdata->sec, extended_addr, key); mutex_unlock(&sdata->sec_mtx); return res; } #endif /* CONFIG_IEEE802154_NL802154_EXPERIMENTAL */ const struct cfg802154_ops mac802154_config_ops = { .add_virtual_intf_deprecated = ieee802154_add_iface_deprecated, .del_virtual_intf_deprecated = ieee802154_del_iface_deprecated, .suspend = ieee802154_suspend, .resume = ieee802154_resume, .add_virtual_intf = ieee802154_add_iface, .del_virtual_intf = ieee802154_del_iface, .set_channel = ieee802154_set_channel, .set_cca_mode = ieee802154_set_cca_mode, .set_cca_ed_level = ieee802154_set_cca_ed_level, .set_tx_power = ieee802154_set_tx_power, .set_pan_id = ieee802154_set_pan_id, .set_short_addr = ieee802154_set_short_addr, .set_backoff_exponent = ieee802154_set_backoff_exponent, .set_max_csma_backoffs = ieee802154_set_max_csma_backoffs, .set_max_frame_retries = ieee802154_set_max_frame_retries, .set_lbt_mode = ieee802154_set_lbt_mode, .set_ackreq_default = ieee802154_set_ackreq_default, .trigger_scan = mac802154_trigger_scan, .abort_scan = mac802154_abort_scan, .send_beacons = mac802154_send_beacons, .stop_beacons = mac802154_stop_beacons, .associate = mac802154_associate, .disassociate = mac802154_disassociate, #ifdef CONFIG_IEEE802154_NL802154_EXPERIMENTAL .get_llsec_table = ieee802154_get_llsec_table, .lock_llsec_table = ieee802154_lock_llsec_table, .unlock_llsec_table = ieee802154_unlock_llsec_table, /* TODO above */ .set_llsec_params = ieee802154_set_llsec_params, .get_llsec_params = ieee802154_get_llsec_params, .add_llsec_key = ieee802154_add_llsec_key, .del_llsec_key = ieee802154_del_llsec_key, .add_seclevel = ieee802154_add_seclevel, .del_seclevel = ieee802154_del_seclevel, .add_device = ieee802154_add_device, .del_device = ieee802154_del_device, .add_devkey = ieee802154_add_devkey, .del_devkey = ieee802154_del_devkey, #endif /* CONFIG_IEEE802154_NL802154_EXPERIMENTAL */ }; |
27 5 18 29 29 29 29 29 29 29 689 689 32 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 | /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_PTRACE_H #define _LINUX_PTRACE_H #include <linux/compiler.h> /* For unlikely. */ #include <linux/sched.h> /* For struct task_struct. */ #include <linux/sched/signal.h> /* For send_sig(), same_thread_group(), etc. */ #include <linux/err.h> /* for IS_ERR_VALUE */ #include <linux/bug.h> /* For BUG_ON. */ #include <linux/pid_namespace.h> /* For task_active_pid_ns. */ #include <uapi/linux/ptrace.h> #include <linux/seccomp.h> /* Add sp to seccomp_data, as seccomp is user API, we don't want to modify it */ struct syscall_info { __u64 sp; struct seccomp_data data; }; extern int ptrace_access_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, unsigned int gup_flags); /* * Ptrace flags * * The owner ship rules for task->ptrace which holds the ptrace * flags is simple. When a task is running it owns it's task->ptrace * flags. When the a task is stopped the ptracer owns task->ptrace. */ #define PT_SEIZED 0x00010000 /* SEIZE used, enable new behavior */ #define PT_PTRACED 0x00000001 #define PT_OPT_FLAG_SHIFT 3 /* PT_TRACE_* event enable flags */ #define PT_EVENT_FLAG(event) (1 << (PT_OPT_FLAG_SHIFT + (event))) #define PT_TRACESYSGOOD PT_EVENT_FLAG(0) #define PT_TRACE_FORK PT_EVENT_FLAG(PTRACE_EVENT_FORK) #define PT_TRACE_VFORK PT_EVENT_FLAG(PTRACE_EVENT_VFORK) #define PT_TRACE_CLONE PT_EVENT_FLAG(PTRACE_EVENT_CLONE) #define PT_TRACE_EXEC PT_EVENT_FLAG(PTRACE_EVENT_EXEC) #define PT_TRACE_VFORK_DONE PT_EVENT_FLAG(PTRACE_EVENT_VFORK_DONE) #define PT_TRACE_EXIT PT_EVENT_FLAG(PTRACE_EVENT_EXIT) #define PT_TRACE_SECCOMP PT_EVENT_FLAG(PTRACE_EVENT_SECCOMP) #define PT_EXITKILL (PTRACE_O_EXITKILL << PT_OPT_FLAG_SHIFT) #define PT_SUSPEND_SECCOMP (PTRACE_O_SUSPEND_SECCOMP << PT_OPT_FLAG_SHIFT) extern long arch_ptrace(struct task_struct *child, long request, unsigned long addr, unsigned long data); extern int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len); extern int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long dst, int len); extern void ptrace_disable(struct task_struct *); extern int ptrace_request(struct task_struct *child, long request, unsigned long addr, unsigned long data); extern int ptrace_notify(int exit_code, unsigned long message); extern void __ptrace_link(struct task_struct *child, struct task_struct *new_parent, const struct cred *ptracer_cred); extern void __ptrace_unlink(struct task_struct *child); extern void exit_ptrace(struct task_struct *tracer, struct list_head *dead); #define PTRACE_MODE_READ 0x01 #define PTRACE_MODE_ATTACH 0x02 #define PTRACE_MODE_NOAUDIT 0x04 #define PTRACE_MODE_FSCREDS 0x08 #define PTRACE_MODE_REALCREDS 0x10 /* shorthands for READ/ATTACH and FSCREDS/REALCREDS combinations */ #define PTRACE_MODE_READ_FSCREDS (PTRACE_MODE_READ | PTRACE_MODE_FSCREDS) #define PTRACE_MODE_READ_REALCREDS (PTRACE_MODE_READ | PTRACE_MODE_REALCREDS) #define PTRACE_MODE_ATTACH_FSCREDS (PTRACE_MODE_ATTACH | PTRACE_MODE_FSCREDS) #define PTRACE_MODE_ATTACH_REALCREDS (PTRACE_MODE_ATTACH | PTRACE_MODE_REALCREDS) /** * ptrace_may_access - check whether the caller is permitted to access * a target task. * @task: target task * @mode: selects type of access and caller credentials * * Returns true on success, false on denial. * * One of the flags PTRACE_MODE_FSCREDS and PTRACE_MODE_REALCREDS must * be set in @mode to specify whether the access was requested through * a filesystem syscall (should use effective capabilities and fsuid * of the caller) or through an explicit syscall such as * process_vm_writev or ptrace (and should use the real credentials). */ extern bool ptrace_may_access(struct task_struct *task, unsigned int mode); static inline int ptrace_reparented(struct task_struct *child) { return !same_thread_group(child->real_parent, child->parent); } static inline void ptrace_unlink(struct task_struct *child) { if (unlikely(child->ptrace)) __ptrace_unlink(child); } int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr, unsigned long data); int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr, unsigned long data); /** * ptrace_parent - return the task that is tracing the given task * @task: task to consider * * Returns %NULL if no one is tracing @task, or the &struct task_struct * pointer to its tracer. * * Must called under rcu_read_lock(). The pointer returned might be kept * live only by RCU. During exec, this may be called with task_lock() held * on @task, still held from when check_unsafe_exec() was called. */ static inline struct task_struct *ptrace_parent(struct task_struct *task) { if (unlikely(task->ptrace)) return rcu_dereference(task->parent); return NULL; } /** * ptrace_event_enabled - test whether a ptrace event is enabled * @task: ptracee of interest * @event: %PTRACE_EVENT_* to test * * Test whether @event is enabled for ptracee @task. * * Returns %true if @event is enabled, %false otherwise. */ static inline bool ptrace_event_enabled(struct task_struct *task, int event) { return task->ptrace & PT_EVENT_FLAG(event); } /** * ptrace_event - possibly stop for a ptrace event notification * @event: %PTRACE_EVENT_* value to report * @message: value for %PTRACE_GETEVENTMSG to return * * Check whether @event is enabled and, if so, report @event and @message * to the ptrace parent. * * Called without locks. */ static inline void ptrace_event(int event, unsigned long message) { if (unlikely(ptrace_event_enabled(current, event))) { ptrace_notify((event << 8) | SIGTRAP, message); } else if (event == PTRACE_EVENT_EXEC) { /* legacy EXEC report via SIGTRAP */ if ((current->ptrace & (PT_PTRACED|PT_SEIZED)) == PT_PTRACED) send_sig(SIGTRAP, current, 0); } } /** * ptrace_event_pid - possibly stop for a ptrace event notification * @event: %PTRACE_EVENT_* value to report * @pid: process identifier for %PTRACE_GETEVENTMSG to return * * Check whether @event is enabled and, if so, report @event and @pid * to the ptrace parent. @pid is reported as the pid_t seen from the * ptrace parent's pid namespace. * * Called without locks. */ static inline void ptrace_event_pid(int event, struct pid *pid) { /* * FIXME: There's a potential race if a ptracer in a different pid * namespace than parent attaches between computing message below and * when we acquire tasklist_lock in ptrace_stop(). If this happens, * the ptracer will get a bogus pid from PTRACE_GETEVENTMSG. */ unsigned long message = 0; struct pid_namespace *ns; rcu_read_lock(); ns = task_active_pid_ns(rcu_dereference(current->parent)); if (ns) message = pid_nr_ns(pid, ns); rcu_read_unlock(); ptrace_event(event, message); } /** * ptrace_init_task - initialize ptrace state for a new child * @child: new child task * @ptrace: true if child should be ptrace'd by parent's tracer * * This is called immediately after adding @child to its parent's children * list. @ptrace is false in the normal case, and true to ptrace @child. * * Called with current's siglock and write_lock_irq(&tasklist_lock) held. */ static inline void ptrace_init_task(struct task_struct *child, bool ptrace) { INIT_LIST_HEAD(&child->ptrace_entry); INIT_LIST_HEAD(&child->ptraced); child->jobctl = 0; child->ptrace = 0; child->parent = child->real_parent; if (unlikely(ptrace) && current->ptrace) { child->ptrace = current->ptrace; __ptrace_link(child, current->parent, current->ptracer_cred); if (child->ptrace & PT_SEIZED) task_set_jobctl_pending(child, JOBCTL_TRAP_STOP); else sigaddset(&child->pending.signal, SIGSTOP); } else child->ptracer_cred = NULL; } /** * ptrace_release_task - final ptrace-related cleanup of a zombie being reaped * @task: task in %EXIT_DEAD state * * Called with write_lock(&tasklist_lock) held. */ static inline void ptrace_release_task(struct task_struct *task) { BUG_ON(!list_empty(&task->ptraced)); ptrace_unlink(task); BUG_ON(!list_empty(&task->ptrace_entry)); } #ifndef force_successful_syscall_return /* * System call handlers that, upon successful completion, need to return a * negative value should call force_successful_syscall_return() right before * returning. On architectures where the syscall convention provides for a * separate error flag (e.g., alpha, ia64, ppc{,64}, sparc{,64}, possibly * others), this macro can be used to ensure that the error flag will not get * set. On architectures which do not support a separate error flag, the macro * is a no-op and the spurious error condition needs to be filtered out by some * other means (e.g., in user-level, by passing an extra argument to the * syscall handler, or something along those lines). */ #define force_successful_syscall_return() do { } while (0) #endif #ifndef is_syscall_success /* * On most systems we can tell if a syscall is a success based on if the retval * is an error value. On some systems like ia64 and powerpc they have different * indicators of success/failure and must define their own. */ #define is_syscall_success(regs) (!IS_ERR_VALUE((unsigned long)(regs_return_value(regs)))) #endif /* * <asm/ptrace.h> should define the following things inside #ifdef __KERNEL__. * * These do-nothing inlines are used when the arch does not * implement single-step. The kerneldoc comments are here * to document the interface for all arch definitions. */ #ifndef arch_has_single_step /** * arch_has_single_step - does this CPU support user-mode single-step? * * If this is defined, then there must be function declarations or * inlines for user_enable_single_step() and user_disable_single_step(). * arch_has_single_step() should evaluate to nonzero iff the machine * supports instruction single-step for user mode. * It can be a constant or it can test a CPU feature bit. */ #define arch_has_single_step() (0) /** * user_enable_single_step - single-step in user-mode task * @task: either current or a task stopped in %TASK_TRACED * * This can only be called when arch_has_single_step() has returned nonzero. * Set @task so that when it returns to user mode, it will trap after the * next single instruction executes. If arch_has_block_step() is defined, * this must clear the effects of user_enable_block_step() too. */ static inline void user_enable_single_step(struct task_struct *task) { BUG(); /* This can never be called. */ } /** * user_disable_single_step - cancel user-mode single-step * @task: either current or a task stopped in %TASK_TRACED * * Clear @task of the effects of user_enable_single_step() and * user_enable_block_step(). This can be called whether or not either * of those was ever called on @task, and even if arch_has_single_step() * returned zero. */ static inline void user_disable_single_step(struct task_struct *task) { } #else extern void user_enable_single_step(struct task_struct *); extern void user_disable_single_step(struct task_struct *); #endif /* arch_has_single_step */ #ifndef arch_has_block_step /** * arch_has_block_step - does this CPU support user-mode block-step? * * If this is defined, then there must be a function declaration or inline * for user_enable_block_step(), and arch_has_single_step() must be defined * too. arch_has_block_step() should evaluate to nonzero iff the machine * supports step-until-branch for user mode. It can be a constant or it * can test a CPU feature bit. */ #define arch_has_block_step() (0) /** * user_enable_block_step - step until branch in user-mode task * @task: either current or a task stopped in %TASK_TRACED * * This can only be called when arch_has_block_step() has returned nonzero, * and will never be called when single-instruction stepping is being used. * Set @task so that when it returns to user mode, it will trap after the * next branch or trap taken. */ static inline void user_enable_block_step(struct task_struct *task) { BUG(); /* This can never be called. */ } #else extern void user_enable_block_step(struct task_struct *); #endif /* arch_has_block_step */ #ifdef ARCH_HAS_USER_SINGLE_STEP_REPORT extern void user_single_step_report(struct pt_regs *regs); #else static inline void user_single_step_report(struct pt_regs *regs) { kernel_siginfo_t info; clear_siginfo(&info); info.si_signo = SIGTRAP; info.si_errno = 0; info.si_code = SI_USER; info.si_pid = 0; info.si_uid = 0; force_sig_info(&info); } #endif #ifndef arch_ptrace_stop_needed /** * arch_ptrace_stop_needed - Decide whether arch_ptrace_stop() should be called * * This is called with the siglock held, to decide whether or not it's * necessary to release the siglock and call arch_ptrace_stop(). It can be * defined to a constant if arch_ptrace_stop() is never required, or always * is. On machines where this makes sense, it should be defined to a quick * test to optimize out calling arch_ptrace_stop() when it would be * superfluous. For example, if the thread has not been back to user mode * since the last stop, the thread state might indicate that nothing needs * to be done. * * This is guaranteed to be invoked once before a task stops for ptrace and * may include arch-specific operations necessary prior to a ptrace stop. */ #define arch_ptrace_stop_needed() (0) #endif #ifndef arch_ptrace_stop /** * arch_ptrace_stop - Do machine-specific work before stopping for ptrace * * This is called with no locks held when arch_ptrace_stop_needed() has * just returned nonzero. It is allowed to block, e.g. for user memory * access. The arch can have machine-specific work to be done before * ptrace stops. On ia64, register backing store gets written back to user * memory here. Since this can be costly (requires dropping the siglock), * we only do it when the arch requires it for this particular stop, as * indicated by arch_ptrace_stop_needed(). */ #define arch_ptrace_stop() do { } while (0) #endif #ifndef current_pt_regs #define current_pt_regs() task_pt_regs(current) #endif #ifndef current_user_stack_pointer #define current_user_stack_pointer() user_stack_pointer(current_pt_regs()) #endif #ifndef exception_ip #define exception_ip(x) instruction_pointer(x) #endif extern int task_current_syscall(struct task_struct *target, struct syscall_info *info); extern void sigaction_compat_abi(struct k_sigaction *act, struct k_sigaction *oact); /* * ptrace report for syscall entry and exit looks identical. */ static inline int ptrace_report_syscall(unsigned long message) { int ptrace = current->ptrace; int signr; if (!(ptrace & PT_PTRACED)) return 0; signr = ptrace_notify(SIGTRAP | ((ptrace & PT_TRACESYSGOOD) ? 0x80 : 0), message); /* * this isn't the same as continuing with a signal, but it will do * for normal use. strace only continues with a signal if the * stopping signal is not SIGTRAP. -brl */ if (signr) send_sig(signr, current, 1); return fatal_signal_pending(current); } /** * ptrace_report_syscall_entry - task is about to attempt a system call * @regs: user register state of current task * * This will be called if %SYSCALL_WORK_SYSCALL_TRACE or * %SYSCALL_WORK_SYSCALL_EMU have been set, when the current task has just * entered the kernel for a system call. Full user register state is * available here. Changing the values in @regs can affect the system * call number and arguments to be tried. It is safe to block here, * preventing the system call from beginning. * * Returns zero normally, or nonzero if the calling arch code should abort * the system call. That must prevent normal entry so no system call is * made. If @task ever returns to user mode after this, its register state * is unspecified, but should be something harmless like an %ENOSYS error * return. It should preserve enough information so that syscall_rollback() * can work (see asm-generic/syscall.h). * * Called without locks, just after entering kernel mode. */ static inline __must_check int ptrace_report_syscall_entry( struct pt_regs *regs) { return ptrace_report_syscall(PTRACE_EVENTMSG_SYSCALL_ENTRY); } /** * ptrace_report_syscall_exit - task has just finished a system call * @regs: user register state of current task * @step: nonzero if simulating single-step or block-step * * This will be called if %SYSCALL_WORK_SYSCALL_TRACE has been set, when * the current task has just finished an attempted system call. Full * user register state is available here. It is safe to block here, * preventing signals from being processed. * * If @step is nonzero, this report is also in lieu of the normal * trap that would follow the system call instruction because * user_enable_block_step() or user_enable_single_step() was used. * In this case, %SYSCALL_WORK_SYSCALL_TRACE might not be set. * * Called without locks, just before checking for pending signals. */ static inline void ptrace_report_syscall_exit(struct pt_regs *regs, int step) { if (step) user_single_step_report(regs); else ptrace_report_syscall(PTRACE_EVENTMSG_SYSCALL_EXIT); } #endif |
2334 123 386 2207 2205 2207 2206 61 61 4 1034 39 1034 1062 30 1033 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 | // SPDX-License-Identifier: GPL-2.0 /* sysfs entries for device PM */ #include <linux/device.h> #include <linux/kobject.h> #include <linux/string.h> #include <linux/export.h> #include <linux/pm_qos.h> #include <linux/pm_runtime.h> #include <linux/pm_wakeup.h> #include <linux/atomic.h> #include <linux/jiffies.h> #include "power.h" /* * control - Report/change current runtime PM setting of the device * * Runtime power management of a device can be blocked with the help of * this attribute. All devices have one of the following two values for * the power/control file: * * + "auto\n" to allow the device to be power managed at run time; * + "on\n" to prevent the device from being power managed at run time; * * The default for all devices is "auto", which means that devices may be * subject to automatic power management, depending on their drivers. * Changing this attribute to "on" prevents the driver from power managing * the device at run time. Doing that while the device is suspended causes * it to be woken up. * * wakeup - Report/change current wakeup option for device * * Some devices support "wakeup" events, which are hardware signals * used to activate devices from suspended or low power states. Such * devices have one of three values for the sysfs power/wakeup file: * * + "enabled\n" to issue the events; * + "disabled\n" not to do so; or * + "\n" for temporary or permanent inability to issue wakeup. * * (For example, unconfigured USB devices can't issue wakeups.) * * Familiar examples of devices that can issue wakeup events include * keyboards and mice (both PS2 and USB styles), power buttons, modems, * "Wake-On-LAN" Ethernet links, GPIO lines, and more. Some events * will wake the entire system from a suspend state; others may just * wake up the device (if the system as a whole is already active). * Some wakeup events use normal IRQ lines; other use special out * of band signaling. * * It is the responsibility of device drivers to enable (or disable) * wakeup signaling as part of changing device power states, respecting * the policy choices provided through the driver model. * * Devices may not be able to generate wakeup events from all power * states. Also, the events may be ignored in some configurations; * for example, they might need help from other devices that aren't * active, or which may have wakeup disabled. Some drivers rely on * wakeup events internally (unless they are disabled), keeping * their hardware in low power modes whenever they're unused. This * saves runtime power, without requiring system-wide sleep states. * * async - Report/change current async suspend setting for the device * * Asynchronous suspend and resume of the device during system-wide power * state transitions can be enabled by writing "enabled" to this file. * Analogously, if "disabled" is written to this file, the device will be * suspended and resumed synchronously. * * All devices have one of the following two values for power/async: * * + "enabled\n" to permit the asynchronous suspend/resume of the device; * + "disabled\n" to forbid it; * * NOTE: It generally is unsafe to permit the asynchronous suspend/resume * of a device unless it is certain that all of the PM dependencies of the * device are known to the PM core. However, for some devices this * attribute is set to "enabled" by bus type code or device drivers and in * that cases it should be safe to leave the default value. * * autosuspend_delay_ms - Report/change a device's autosuspend_delay value * * Some drivers don't want to carry out a runtime suspend as soon as a * device becomes idle; they want it always to remain idle for some period * of time before suspending it. This period is the autosuspend_delay * value (expressed in milliseconds) and it can be controlled by the user. * If the value is negative then the device will never be runtime * suspended. * * NOTE: The autosuspend_delay_ms attribute and the autosuspend_delay * value are used only if the driver calls pm_runtime_use_autosuspend(). * * wakeup_count - Report the number of wakeup events related to the device */ const char power_group_name[] = "power"; EXPORT_SYMBOL_GPL(power_group_name); static const char ctrl_auto[] = "auto"; static const char ctrl_on[] = "on"; static ssize_t control_show(struct device *dev, struct device_attribute *attr, char *buf) { return sysfs_emit(buf, "%s\n", dev->power.runtime_auto ? ctrl_auto : ctrl_on); } static ssize_t control_store(struct device * dev, struct device_attribute *attr, const char * buf, size_t n) { device_lock(dev); if (sysfs_streq(buf, ctrl_auto)) pm_runtime_allow(dev); else if (sysfs_streq(buf, ctrl_on)) pm_runtime_forbid(dev); else n = -EINVAL; device_unlock(dev); return n; } static DEVICE_ATTR_RW(control); static ssize_t runtime_active_time_show(struct device *dev, struct device_attribute *attr, char *buf) { u64 tmp = pm_runtime_active_time(dev); do_div(tmp, NSEC_PER_MSEC); return sysfs_emit(buf, "%llu\n", tmp); } static DEVICE_ATTR_RO(runtime_active_time); static ssize_t runtime_suspended_time_show(struct device *dev, struct device_attribute *attr, char *buf) { u64 tmp = pm_runtime_suspended_time(dev); do_div(tmp, NSEC_PER_MSEC); return sysfs_emit(buf, "%llu\n", tmp); } static DEVICE_ATTR_RO(runtime_suspended_time); static ssize_t runtime_status_show(struct device *dev, struct device_attribute *attr, char *buf) { const char *output; if (dev->power.runtime_error) { output = "error"; } else if (dev->power.disable_depth) { output = "unsupported"; } else { switch (dev->power.runtime_status) { case RPM_SUSPENDED: output = "suspended"; break; case RPM_SUSPENDING: output = "suspending"; break; case RPM_RESUMING: output = "resuming"; break; case RPM_ACTIVE: output = "active"; break; default: return -EIO; } } return sysfs_emit(buf, "%s\n", output); } static DEVICE_ATTR_RO(runtime_status); static ssize_t autosuspend_delay_ms_show(struct device *dev, struct device_attribute *attr, char *buf) { if (!dev->power.use_autosuspend) return -EIO; return sysfs_emit(buf, "%d\n", dev->power.autosuspend_delay); } static ssize_t autosuspend_delay_ms_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t n) { long delay; if (!dev->power.use_autosuspend) return -EIO; if (kstrtol(buf, 10, &delay) != 0 || delay != (int) delay) return -EINVAL; device_lock(dev); pm_runtime_set_autosuspend_delay(dev, delay); device_unlock(dev); return n; } static DEVICE_ATTR_RW(autosuspend_delay_ms); static ssize_t pm_qos_resume_latency_us_show(struct device *dev, struct device_attribute *attr, char *buf) { s32 value = dev_pm_qos_requested_resume_latency(dev); if (value == 0) return sysfs_emit(buf, "n/a\n"); if (value == PM_QOS_RESUME_LATENCY_NO_CONSTRAINT) value = 0; return sysfs_emit(buf, "%d\n", value); } static ssize_t pm_qos_resume_latency_us_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t n) { s32 value; int ret; if (!kstrtos32(buf, 0, &value)) { /* * Prevent users from writing negative or "no constraint" values * directly. */ if (value < 0 || value == PM_QOS_RESUME_LATENCY_NO_CONSTRAINT) return -EINVAL; if (value == 0) value = PM_QOS_RESUME_LATENCY_NO_CONSTRAINT; } else if (sysfs_streq(buf, "n/a")) { value = 0; } else { return -EINVAL; } ret = dev_pm_qos_update_request(dev->power.qos->resume_latency_req, value); return ret < 0 ? ret : n; } static DEVICE_ATTR_RW(pm_qos_resume_latency_us); static ssize_t pm_qos_latency_tolerance_us_show(struct device *dev, struct device_attribute *attr, char *buf) { s32 value = dev_pm_qos_get_user_latency_tolerance(dev); if (value < 0) return sysfs_emit(buf, "%s\n", "auto"); if (value == PM_QOS_LATENCY_ANY) return sysfs_emit(buf, "%s\n", "any"); return sysfs_emit(buf, "%d\n", value); } static ssize_t pm_qos_latency_tolerance_us_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t n) { s32 value; int ret; if (kstrtos32(buf, 0, &value) == 0) { /* Users can't write negative values directly */ if (value < 0) return -EINVAL; } else { if (sysfs_streq(buf, "auto")) value = PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT; else if (sysfs_streq(buf, "any")) value = PM_QOS_LATENCY_ANY; else return -EINVAL; } ret = dev_pm_qos_update_user_latency_tolerance(dev, value); return ret < 0 ? ret : n; } static DEVICE_ATTR_RW(pm_qos_latency_tolerance_us); static ssize_t pm_qos_no_power_off_show(struct device *dev, struct device_attribute *attr, char *buf) { return sysfs_emit(buf, "%d\n", !!(dev_pm_qos_requested_flags(dev) & PM_QOS_FLAG_NO_POWER_OFF)); } static ssize_t pm_qos_no_power_off_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t n) { int ret; if (kstrtoint(buf, 0, &ret)) return -EINVAL; if (ret != 0 && ret != 1) return -EINVAL; ret = dev_pm_qos_update_flags(dev, PM_QOS_FLAG_NO_POWER_OFF, ret); return ret < 0 ? ret : n; } static DEVICE_ATTR_RW(pm_qos_no_power_off); #ifdef CONFIG_PM_SLEEP static const char _enabled[] = "enabled"; static const char _disabled[] = "disabled"; static ssize_t wakeup_show(struct device *dev, struct device_attribute *attr, char *buf) { return sysfs_emit(buf, "%s\n", device_can_wakeup(dev) ? (device_may_wakeup(dev) ? _enabled : _disabled) : ""); } static ssize_t wakeup_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t n) { if (!device_can_wakeup(dev)) return -EINVAL; if (sysfs_streq(buf, _enabled)) device_set_wakeup_enable(dev, 1); else if (sysfs_streq(buf, _disabled)) device_set_wakeup_enable(dev, 0); else return -EINVAL; return n; } static DEVICE_ATTR_RW(wakeup); static ssize_t wakeup_count_show(struct device *dev, struct device_attribute *attr, char *buf) { unsigned long count; bool enabled = false; spin_lock_irq(&dev->power.lock); if (dev->power.wakeup) { count = dev->power.wakeup->wakeup_count; enabled = true; } spin_unlock_irq(&dev->power.lock); if (!enabled) return sysfs_emit(buf, "\n"); return sysfs_emit(buf, "%lu\n", count); } static DEVICE_ATTR_RO(wakeup_count); static ssize_t wakeup_active_count_show(struct device *dev, struct device_attribute *attr, char *buf) { unsigned long count; bool enabled = false; spin_lock_irq(&dev->power.lock); if (dev->power.wakeup) { count = dev->power.wakeup->active_count; enabled = true; } spin_unlock_irq(&dev->power.lock); if (!enabled) return sysfs_emit(buf, "\n"); return sysfs_emit(buf, "%lu\n", count); } static DEVICE_ATTR_RO(wakeup_active_count); static ssize_t wakeup_abort_count_show(struct device *dev, struct device_attribute *attr, char *buf) { unsigned long count; bool enabled = false; spin_lock_irq(&dev->power.lock); if (dev->power.wakeup) { count = dev->power.wakeup->wakeup_count; enabled = true; } spin_unlock_irq(&dev->power.lock); if (!enabled) return sysfs_emit(buf, "\n"); return sysfs_emit(buf, "%lu\n", count); } static DEVICE_ATTR_RO(wakeup_abort_count); static ssize_t wakeup_expire_count_show(struct device *dev, struct device_attribute *attr, char *buf) { unsigned long count; bool enabled = false; spin_lock_irq(&dev->power.lock); if (dev->power.wakeup) { count = dev->power.wakeup->expire_count; enabled = true; } spin_unlock_irq(&dev->power.lock); if (!enabled) return sysfs_emit(buf, "\n"); return sysfs_emit(buf, "%lu\n", count); } static DEVICE_ATTR_RO(wakeup_expire_count); static ssize_t wakeup_active_show(struct device *dev, struct device_attribute *attr, char *buf) { unsigned int active; bool enabled = false; spin_lock_irq(&dev->power.lock); if (dev->power.wakeup) { active = dev->power.wakeup->active; enabled = true; } spin_unlock_irq(&dev->power.lock); if (!enabled) return sysfs_emit(buf, "\n"); return sysfs_emit(buf, "%u\n", active); } static DEVICE_ATTR_RO(wakeup_active); static ssize_t wakeup_total_time_ms_show(struct device *dev, struct device_attribute *attr, char *buf) { s64 msec; bool enabled = false; spin_lock_irq(&dev->power.lock); if (dev->power.wakeup) { msec = ktime_to_ms(dev->power.wakeup->total_time); enabled = true; } spin_unlock_irq(&dev->power.lock); if (!enabled) return sysfs_emit(buf, "\n"); return sysfs_emit(buf, "%lld\n", msec); } static DEVICE_ATTR_RO(wakeup_total_time_ms); static ssize_t wakeup_max_time_ms_show(struct device *dev, struct device_attribute *attr, char *buf) { s64 msec; bool enabled = false; spin_lock_irq(&dev->power.lock); if (dev->power.wakeup) { msec = ktime_to_ms(dev->power.wakeup->max_time); enabled = true; } spin_unlock_irq(&dev->power.lock); if (!enabled) return sysfs_emit(buf, "\n"); return sysfs_emit(buf, "%lld\n", msec); } static DEVICE_ATTR_RO(wakeup_max_time_ms); static ssize_t wakeup_last_time_ms_show(struct device *dev, struct device_attribute *attr, char *buf) { s64 msec; bool enabled = false; spin_lock_irq(&dev->power.lock); if (dev->power.wakeup) { msec = ktime_to_ms(dev->power.wakeup->last_time); enabled = true; } spin_unlock_irq(&dev->power.lock); if (!enabled) return sysfs_emit(buf, "\n"); return sysfs_emit(buf, "%lld\n", msec); } static inline int dpm_sysfs_wakeup_change_owner(struct device *dev, kuid_t kuid, kgid_t kgid) { if (dev->power.wakeup && dev->power.wakeup->dev) return device_change_owner(dev->power.wakeup->dev, kuid, kgid); return 0; } static DEVICE_ATTR_RO(wakeup_last_time_ms); #ifdef CONFIG_PM_AUTOSLEEP static ssize_t wakeup_prevent_sleep_time_ms_show(struct device *dev, struct device_attribute *attr, char *buf) { s64 msec; bool enabled = false; spin_lock_irq(&dev->power.lock); if (dev->power.wakeup) { msec = ktime_to_ms(dev->power.wakeup->prevent_sleep_time); enabled = true; } spin_unlock_irq(&dev->power.lock); if (!enabled) return sysfs_emit(buf, "\n"); return sysfs_emit(buf, "%lld\n", msec); } static DEVICE_ATTR_RO(wakeup_prevent_sleep_time_ms); #endif /* CONFIG_PM_AUTOSLEEP */ #else /* CONFIG_PM_SLEEP */ static inline int dpm_sysfs_wakeup_change_owner(struct device *dev, kuid_t kuid, kgid_t kgid) { return 0; } #endif #ifdef CONFIG_PM_ADVANCED_DEBUG static ssize_t runtime_usage_show(struct device *dev, struct device_attribute *attr, char *buf) { return sysfs_emit(buf, "%d\n", atomic_read(&dev->power.usage_count)); } static DEVICE_ATTR_RO(runtime_usage); static ssize_t runtime_active_kids_show(struct device *dev, struct device_attribute *attr, char *buf) { return sysfs_emit(buf, "%d\n", dev->power.ignore_children ? 0 : atomic_read(&dev->power.child_count)); } static DEVICE_ATTR_RO(runtime_active_kids); static ssize_t runtime_enabled_show(struct device *dev, struct device_attribute *attr, char *buf) { const char *output; if (dev->power.disable_depth && !dev->power.runtime_auto) output = "disabled & forbidden"; else if (dev->power.disable_depth) output = "disabled"; else if (!dev->power.runtime_auto) output = "forbidden"; else output = "enabled"; return sysfs_emit(buf, "%s\n", output); } static DEVICE_ATTR_RO(runtime_enabled); #ifdef CONFIG_PM_SLEEP static ssize_t async_show(struct device *dev, struct device_attribute *attr, char *buf) { return sysfs_emit(buf, "%s\n", device_async_suspend_enabled(dev) ? _enabled : _disabled); } static ssize_t async_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t n) { if (sysfs_streq(buf, _enabled)) device_enable_async_suspend(dev); else if (sysfs_streq(buf, _disabled)) device_disable_async_suspend(dev); else return -EINVAL; return n; } static DEVICE_ATTR_RW(async); #endif /* CONFIG_PM_SLEEP */ #endif /* CONFIG_PM_ADVANCED_DEBUG */ static struct attribute *power_attrs[] = { #ifdef CONFIG_PM_ADVANCED_DEBUG #ifdef CONFIG_PM_SLEEP &dev_attr_async.attr, #endif &dev_attr_runtime_status.attr, &dev_attr_runtime_usage.attr, &dev_attr_runtime_active_kids.attr, &dev_attr_runtime_enabled.attr, #endif /* CONFIG_PM_ADVANCED_DEBUG */ NULL, }; static const struct attribute_group pm_attr_group = { .name = power_group_name, .attrs = power_attrs, }; static struct attribute *wakeup_attrs[] = { #ifdef CONFIG_PM_SLEEP &dev_attr_wakeup.attr, &dev_attr_wakeup_count.attr, &dev_attr_wakeup_active_count.attr, &dev_attr_wakeup_abort_count.attr, &dev_attr_wakeup_expire_count.attr, &dev_attr_wakeup_active.attr, &dev_attr_wakeup_total_time_ms.attr, &dev_attr_wakeup_max_time_ms.attr, &dev_attr_wakeup_last_time_ms.attr, #ifdef CONFIG_PM_AUTOSLEEP &dev_attr_wakeup_prevent_sleep_time_ms.attr, #endif #endif NULL, }; static const struct attribute_group pm_wakeup_attr_group = { .name = power_group_name, .attrs = wakeup_attrs, }; static struct attribute *runtime_attrs[] = { #ifndef CONFIG_PM_ADVANCED_DEBUG &dev_attr_runtime_status.attr, #endif &dev_attr_control.attr, &dev_attr_runtime_suspended_time.attr, &dev_attr_runtime_active_time.attr, &dev_attr_autosuspend_delay_ms.attr, NULL, }; static const struct attribute_group pm_runtime_attr_group = { .name = power_group_name, .attrs = runtime_attrs, }; static struct attribute *pm_qos_resume_latency_attrs[] = { &dev_attr_pm_qos_resume_latency_us.attr, NULL, }; static const struct attribute_group pm_qos_resume_latency_attr_group = { .name = power_group_name, .attrs = pm_qos_resume_latency_attrs, }; static struct attribute *pm_qos_latency_tolerance_attrs[] = { &dev_attr_pm_qos_latency_tolerance_us.attr, NULL, }; static const struct attribute_group pm_qos_latency_tolerance_attr_group = { .name = power_group_name, .attrs = pm_qos_latency_tolerance_attrs, }; static struct attribute *pm_qos_flags_attrs[] = { &dev_attr_pm_qos_no_power_off.attr, NULL, }; static const struct attribute_group pm_qos_flags_attr_group = { .name = power_group_name, .attrs = pm_qos_flags_attrs, }; int dpm_sysfs_add(struct device *dev) { int rc; /* No need to create PM sysfs if explicitly disabled. */ if (device_pm_not_required(dev)) return 0; rc = sysfs_create_group(&dev->kobj, &pm_attr_group); if (rc) return rc; if (!pm_runtime_has_no_callbacks(dev)) { rc = sysfs_merge_group(&dev->kobj, &pm_runtime_attr_group); if (rc) goto err_out; } if (device_can_wakeup(dev)) { rc = sysfs_merge_group(&dev->kobj, &pm_wakeup_attr_group); if (rc) goto err_runtime; } if (dev->power.set_latency_tolerance) { rc = sysfs_merge_group(&dev->kobj, &pm_qos_latency_tolerance_attr_group); if (rc) goto err_wakeup; } rc = pm_wakeup_source_sysfs_add(dev); if (rc) goto err_latency; return 0; err_latency: sysfs_unmerge_group(&dev->kobj, &pm_qos_latency_tolerance_attr_group); err_wakeup: sysfs_unmerge_group(&dev->kobj, &pm_wakeup_attr_group); err_runtime: sysfs_unmerge_group(&dev->kobj, &pm_runtime_attr_group); err_out: sysfs_remove_group(&dev->kobj, &pm_attr_group); return rc; } int dpm_sysfs_change_owner(struct device *dev, kuid_t kuid, kgid_t kgid) { int rc; if (device_pm_not_required(dev)) return 0; rc = sysfs_group_change_owner(&dev->kobj, &pm_attr_group, kuid, kgid); if (rc) return rc; if (!pm_runtime_has_no_callbacks(dev)) { rc = sysfs_group_change_owner( &dev->kobj, &pm_runtime_attr_group, kuid, kgid); if (rc) return rc; } if (device_can_wakeup(dev)) { rc = sysfs_group_change_owner(&dev->kobj, &pm_wakeup_attr_group, kuid, kgid); if (rc) return rc; rc = dpm_sysfs_wakeup_change_owner(dev, kuid, kgid); if (rc) return rc; } if (dev->power.set_latency_tolerance) { rc = sysfs_group_change_owner( &dev->kobj, &pm_qos_latency_tolerance_attr_group, kuid, kgid); if (rc) return rc; } return 0; } int wakeup_sysfs_add(struct device *dev) { int ret = sysfs_merge_group(&dev->kobj, &pm_wakeup_attr_group); if (!ret) kobject_uevent(&dev->kobj, KOBJ_CHANGE); return ret; } void wakeup_sysfs_remove(struct device *dev) { sysfs_unmerge_group(&dev->kobj, &pm_wakeup_attr_group); kobject_uevent(&dev->kobj, KOBJ_CHANGE); } int pm_qos_sysfs_add_resume_latency(struct device *dev) { return sysfs_merge_group(&dev->kobj, &pm_qos_resume_latency_attr_group); } void pm_qos_sysfs_remove_resume_latency(struct device *dev) { sysfs_unmerge_group(&dev->kobj, &pm_qos_resume_latency_attr_group); } int pm_qos_sysfs_add_flags(struct device *dev) { return sysfs_merge_group(&dev->kobj, &pm_qos_flags_attr_group); } void pm_qos_sysfs_remove_flags(struct device *dev) { sysfs_unmerge_group(&dev->kobj, &pm_qos_flags_attr_group); } int pm_qos_sysfs_add_latency_tolerance(struct device *dev) { return sysfs_merge_group(&dev->kobj, &pm_qos_latency_tolerance_attr_group); } void pm_qos_sysfs_remove_latency_tolerance(struct device *dev) { sysfs_unmerge_group(&dev->kobj, &pm_qos_latency_tolerance_attr_group); } void rpm_sysfs_remove(struct device *dev) { sysfs_unmerge_group(&dev->kobj, &pm_runtime_attr_group); } void dpm_sysfs_remove(struct device *dev) { if (device_pm_not_required(dev)) return; sysfs_unmerge_group(&dev->kobj, &pm_qos_latency_tolerance_attr_group); dev_pm_qos_constraints_destroy(dev); rpm_sysfs_remove(dev); sysfs_unmerge_group(&dev->kobj, &pm_wakeup_attr_group); sysfs_remove_group(&dev->kobj, &pm_attr_group); } |
1 1 2 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 | /* * linux/fs/nls/nls_iso8859-7.c * * Charset iso8859-7 translation tables. * Generated automatically from the Unicode and charset * tables from the Unicode Organization (www.unicode.org). * The Unicode to charset table has only exact mappings. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/nls.h> #include <linux/errno.h> static const wchar_t charset2uni[256] = { /* 0x00*/ 0x0000, 0x0001, 0x0002, 0x0003, 0x0004, 0x0005, 0x0006, 0x0007, 0x0008, 0x0009, 0x000a, 0x000b, 0x000c, 0x000d, 0x000e, 0x000f, /* 0x10*/ 0x0010, 0x0011, 0x0012, 0x0013, 0x0014, 0x0015, 0x0016, 0x0017, 0x0018, 0x0019, 0x001a, 0x001b, 0x001c, 0x001d, 0x001e, 0x001f, /* 0x20*/ 0x0020, 0x0021, 0x0022, 0x0023, 0x0024, 0x0025, 0x0026, 0x0027, 0x0028, 0x0029, 0x002a, 0x002b, 0x002c, 0x002d, 0x002e, 0x002f, /* 0x30*/ 0x0030, 0x0031, 0x0032, 0x0033, 0x0034, 0x0035, 0x0036, 0x0037, 0x0038, 0x0039, 0x003a, 0x003b, 0x003c, 0x003d, 0x003e, 0x003f, /* 0x40*/ 0x0040, 0x0041, 0x0042, 0x0043, 0x0044, 0x0045, 0x0046, 0x0047, 0x0048, 0x0049, 0x004a, 0x004b, 0x004c, 0x004d, 0x004e, 0x004f, /* 0x50*/ 0x0050, 0x0051, 0x0052, 0x0053, 0x0054, 0x0055, 0x0056, 0x0057, 0x0058, 0x0059, 0x005a, 0x005b, 0x005c, 0x005d, 0x005e, 0x005f, /* 0x60*/ 0x0060, 0x0061, 0x0062, 0x0063, 0x0064, 0x0065, 0x0066, 0x0067, 0x0068, 0x0069, 0x006a, 0x006b, 0x006c, 0x006d, 0x006e, 0x006f, /* 0x70*/ 0x0070, 0x0071, 0x0072, 0x0073, 0x0074, 0x0075, 0x0076, 0x0077, 0x0078, 0x0079, 0x007a, 0x007b, 0x007c, 0x007d, 0x007e, 0x007f, /* 0x80*/ 0x0080, 0x0081, 0x0082, 0x0083, 0x0084, 0x0085, 0x0086, 0x0087, 0x0088, 0x0089, 0x008a, 0x008b, 0x008c, 0x008d, 0x008e, 0x008f, /* 0x90*/ 0x0090, 0x0091, 0x0092, 0x0093, 0x0094, 0x0095, 0x0096, 0x0097, 0x0098, 0x0099, 0x009a, 0x009b, 0x009c, 0x009d, 0x009e, 0x009f, /* 0xa0*/ 0x00a0, 0x02bd, 0x02bc, 0x00a3, 0x0000, 0x0000, 0x00a6, 0x00a7, 0x00a8, 0x00a9, 0x0000, 0x00ab, 0x00ac, 0x00ad, 0x0000, 0x2015, /* 0xb0*/ 0x00b0, 0x00b1, 0x00b2, 0x00b3, 0x0384, 0x0385, 0x0386, 0x00b7, 0x0388, 0x0389, 0x038a, 0x00bb, 0x038c, 0x00bd, 0x038e, 0x038f, /* 0xc0*/ 0x0390, 0x0391, 0x0392, 0x0393, 0x0394, 0x0395, 0x0396, 0x0397, 0x0398, 0x0399, 0x039a, 0x039b, 0x039c, 0x039d, 0x039e, 0x039f, /* 0xd0*/ 0x03a0, 0x03a1, 0x0000, 0x03a3, 0x03a4, 0x03a5, 0x03a6, 0x03a7, 0x03a8, 0x03a9, 0x03aa, 0x03ab, 0x03ac, 0x03ad, 0x03ae, 0x03af, /* 0xe0*/ 0x03b0, 0x03b1, 0x03b2, 0x03b3, 0x03b4, 0x03b5, 0x03b6, 0x03b7, 0x03b8, 0x03b9, 0x03ba, 0x03bb, 0x03bc, 0x03bd, 0x03be, 0x03bf, /* 0xf0*/ 0x03c0, 0x03c1, 0x03c2, 0x03c3, 0x03c4, 0x03c5, 0x03c6, 0x03c7, 0x03c8, 0x03c9, 0x03ca, 0x03cb, 0x03cc, 0x03cd, 0x03ce, 0x0000, }; static const unsigned char page00[256] = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */ 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */ 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */ 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */ 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, /* 0x80-0x87 */ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, /* 0x88-0x8f */ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, /* 0x90-0x97 */ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, /* 0x98-0x9f */ 0xa0, 0x00, 0x00, 0xa3, 0x00, 0x00, 0xa6, 0xa7, /* 0xa0-0xa7 */ 0xa8, 0xa9, 0x00, 0xab, 0xac, 0xad, 0x00, 0x00, /* 0xa8-0xaf */ 0xb0, 0xb1, 0xb2, 0xb3, 0x00, 0x00, 0x00, 0xb7, /* 0xb0-0xb7 */ 0x00, 0x00, 0x00, 0xbb, 0x00, 0xbd, 0x00, 0x00, /* 0xb8-0xbf */ }; static const unsigned char page02[256] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */ 0x00, 0x00, 0x00, 0x00, 0xa2, 0xa1, 0x00, 0x00, /* 0xb8-0xbf */ }; static const unsigned char page03[256] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */ 0x00, 0x00, 0x00, 0x00, 0xb4, 0xb5, 0xb6, 0x00, /* 0x80-0x87 */ 0xb8, 0xb9, 0xba, 0x00, 0xbc, 0x00, 0xbe, 0xbf, /* 0x88-0x8f */ 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, /* 0x90-0x97 */ 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0x98-0x9f */ 0xd0, 0xd1, 0x00, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, /* 0xa0-0xa7 */ 0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, /* 0xa8-0xaf */ 0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, /* 0xb0-0xb7 */ 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, /* 0xb8-0xbf */ 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, /* 0xc0-0xc7 */ 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0x00, /* 0xc8-0xcf */ }; static const unsigned char page20[256] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0xaf, 0x00, 0x00, /* 0x10-0x17 */ }; static const unsigned char *const page_uni2charset[256] = { page00, NULL, page02, page03, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, page20, NULL, NULL, NULL, NULL, NULL, NULL, NULL, }; static const unsigned char charset2lower[256] = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */ 0x40, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x40-0x47 */ 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x48-0x4f */ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x50-0x57 */ 0x78, 0x79, 0x7a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */ 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */ 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */ 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, /* 0x80-0x87 */ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, /* 0x88-0x8f */ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, /* 0x90-0x97 */ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, /* 0x98-0x9f */ 0xa0, 0xa1, 0xa2, 0xa3, 0x00, 0x00, 0xa6, 0xa7, /* 0xa0-0xa7 */ 0xa8, 0xa9, 0x00, 0xab, 0xac, 0xad, 0x00, 0xaf, /* 0xa8-0xaf */ 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xdc, 0xb7, /* 0xb0-0xb7 */ 0xdd, 0xde, 0xdf, 0xbb, 0xfc, 0xbd, 0xfd, 0xfe, /* 0xb8-0xbf */ 0xc0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, /* 0xc0-0xc7 */ 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, /* 0xc8-0xcf */ 0xf0, 0xf1, 0x00, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, /* 0xd0-0xd7 */ 0xf8, 0xf9, 0xfa, 0xfb, 0xdc, 0xdd, 0xde, 0xdf, /* 0xd8-0xdf */ 0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, /* 0xe0-0xe7 */ 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, /* 0xe8-0xef */ 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, /* 0xf0-0xf7 */ 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0x00, /* 0xf8-0xff */ }; static const unsigned char charset2upper[256] = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */ 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */ 0x60, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x60-0x67 */ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x68-0x6f */ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x70-0x77 */ 0x58, 0x59, 0x5a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, /* 0x80-0x87 */ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, /* 0x88-0x8f */ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, /* 0x90-0x97 */ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, /* 0x98-0x9f */ 0xa0, 0xa1, 0xa2, 0xa3, 0x00, 0x00, 0xa6, 0xa7, /* 0xa0-0xa7 */ 0xa8, 0xa9, 0x00, 0xab, 0xac, 0xad, 0x00, 0xaf, /* 0xa8-0xaf */ 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, /* 0xb0-0xb7 */ 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, /* 0xb8-0xbf */ 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, /* 0xc0-0xc7 */ 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0xc8-0xcf */ 0xd0, 0xd1, 0x00, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, /* 0xd0-0xd7 */ 0xd8, 0xd9, 0xda, 0xdb, 0xb6, 0xb8, 0xb9, 0xba, /* 0xd8-0xdf */ 0xe0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, /* 0xe0-0xe7 */ 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0xe8-0xef */ 0xd0, 0xd1, 0xd3, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, /* 0xf0-0xf7 */ 0xd8, 0xd9, 0xda, 0xdb, 0xbc, 0xbe, 0xbf, 0x00, /* 0xf8-0xff */ }; static int uni2char(wchar_t uni, unsigned char *out, int boundlen) { const unsigned char *uni2charset; unsigned char cl = uni & 0x00ff; unsigned char ch = (uni & 0xff00) >> 8; if (boundlen <= 0) return -ENAMETOOLONG; uni2charset = page_uni2charset[ch]; if (uni2charset && uni2charset[cl]) out[0] = uni2charset[cl]; else return -EINVAL; return 1; } static int char2uni(const unsigned char *rawstring, int boundlen, wchar_t *uni) { *uni = charset2uni[*rawstring]; if (*uni == 0x0000) return -EINVAL; return 1; } static struct nls_table table = { .charset = "iso8859-7", .uni2char = uni2char, .char2uni = char2uni, .charset2lower = charset2lower, .charset2upper = charset2upper, }; static int __init init_nls_iso8859_7(void) { return register_nls(&table); } static void __exit exit_nls_iso8859_7(void) { unregister_nls(&table); } module_init(init_nls_iso8859_7) module_exit(exit_nls_iso8859_7) MODULE_LICENSE("Dual BSD/GPL"); |
291 292 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 | // SPDX-License-Identifier: GPL-2.0 /* Lock down the kernel * * Copyright (C) 2016 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public Licence * as published by the Free Software Foundation; either version * 2 of the Licence, or (at your option) any later version. */ #include <linux/security.h> #include <linux/export.h> #include <linux/lsm_hooks.h> #include <uapi/linux/lsm.h> static enum lockdown_reason kernel_locked_down; static const enum lockdown_reason lockdown_levels[] = {LOCKDOWN_NONE, LOCKDOWN_INTEGRITY_MAX, LOCKDOWN_CONFIDENTIALITY_MAX}; /* * Put the kernel into lock-down mode. */ static int lock_kernel_down(const char *where, enum lockdown_reason level) { if (kernel_locked_down >= level) return -EPERM; kernel_locked_down = level; pr_notice("Kernel is locked down from %s; see man kernel_lockdown.7\n", where); return 0; } static int __init lockdown_param(char *level) { if (!level) return -EINVAL; if (strcmp(level, "integrity") == 0) lock_kernel_down("command line", LOCKDOWN_INTEGRITY_MAX); else if (strcmp(level, "confidentiality") == 0) lock_kernel_down("command line", LOCKDOWN_CONFIDENTIALITY_MAX); else return -EINVAL; return 0; } early_param("lockdown", lockdown_param); /** * lockdown_is_locked_down - Find out if the kernel is locked down * @what: Tag to use in notice generated if lockdown is in effect */ static int lockdown_is_locked_down(enum lockdown_reason what) { if (WARN(what >= LOCKDOWN_CONFIDENTIALITY_MAX, "Invalid lockdown reason")) return -EPERM; if (kernel_locked_down >= what) { if (lockdown_reasons[what]) pr_notice_ratelimited("Lockdown: %s: %s is restricted; see man kernel_lockdown.7\n", current->comm, lockdown_reasons[what]); return -EPERM; } return 0; } static struct security_hook_list lockdown_hooks[] __ro_after_init = { LSM_HOOK_INIT(locked_down, lockdown_is_locked_down), }; const struct lsm_id lockdown_lsmid = { .name = "lockdown", .id = LSM_ID_LOCKDOWN, }; static int __init lockdown_lsm_init(void) { #if defined(CONFIG_LOCK_DOWN_KERNEL_FORCE_INTEGRITY) lock_kernel_down("Kernel configuration", LOCKDOWN_INTEGRITY_MAX); #elif defined(CONFIG_LOCK_DOWN_KERNEL_FORCE_CONFIDENTIALITY) lock_kernel_down("Kernel configuration", LOCKDOWN_CONFIDENTIALITY_MAX); #endif security_add_hooks(lockdown_hooks, ARRAY_SIZE(lockdown_hooks), &lockdown_lsmid); return 0; } static ssize_t lockdown_read(struct file *filp, char __user *buf, size_t count, loff_t *ppos) { char temp[80]; int i, offset = 0; for (i = 0; i < ARRAY_SIZE(lockdown_levels); i++) { enum lockdown_reason level = lockdown_levels[i]; if (lockdown_reasons[level]) { const char *label = lockdown_reasons[level]; if (kernel_locked_down == level) offset += sprintf(temp+offset, "[%s] ", label); else offset += sprintf(temp+offset, "%s ", label); } } /* Convert the last space to a newline if needed. */ if (offset > 0) temp[offset-1] = '\n'; return simple_read_from_buffer(buf, count, ppos, temp, strlen(temp)); } static ssize_t lockdown_write(struct file *file, const char __user *buf, size_t n, loff_t *ppos) { char *state; int i, len, err = -EINVAL; state = memdup_user_nul(buf, n); if (IS_ERR(state)) return PTR_ERR(state); len = strlen(state); if (len && state[len-1] == '\n') { state[len-1] = '\0'; len--; } for (i = 0; i < ARRAY_SIZE(lockdown_levels); i++) { enum lockdown_reason level = lockdown_levels[i]; const char *label = lockdown_reasons[level]; if (label && !strcmp(state, label)) err = lock_kernel_down("securityfs", level); } kfree(state); return err ? err : n; } static const struct file_operations lockdown_ops = { .read = lockdown_read, .write = lockdown_write, }; static int __init lockdown_secfs_init(void) { struct dentry *dentry; dentry = securityfs_create_file("lockdown", 0644, NULL, NULL, &lockdown_ops); return PTR_ERR_OR_ZERO(dentry); } core_initcall(lockdown_secfs_init); #ifdef CONFIG_SECURITY_LOCKDOWN_LSM_EARLY DEFINE_EARLY_LSM(lockdown) = { #else DEFINE_LSM(lockdown) = { #endif .name = "lockdown", .init = lockdown_lsm_init, }; |
1 1 1 1 1 1 1 1 126 129 1 129 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 | // SPDX-License-Identifier: GPL-2.0-only /* * Landlock LSM - Ptrace hooks * * Copyright © 2017-2020 Mickaël Salaün <mic@digikod.net> * Copyright © 2019-2020 ANSSI */ #include <asm/current.h> #include <linux/cred.h> #include <linux/errno.h> #include <linux/kernel.h> #include <linux/lsm_hooks.h> #include <linux/rcupdate.h> #include <linux/sched.h> #include "common.h" #include "cred.h" #include "ptrace.h" #include "ruleset.h" #include "setup.h" /** * domain_scope_le - Checks domain ordering for scoped ptrace * * @parent: Parent domain. * @child: Potential child of @parent. * * Checks if the @parent domain is less or equal to (i.e. an ancestor, which * means a subset of) the @child domain. */ static bool domain_scope_le(const struct landlock_ruleset *const parent, const struct landlock_ruleset *const child) { const struct landlock_hierarchy *walker; if (!parent) return true; if (!child) return false; for (walker = child->hierarchy; walker; walker = walker->parent) { if (walker == parent->hierarchy) /* @parent is in the scoped hierarchy of @child. */ return true; } /* There is no relationship between @parent and @child. */ return false; } static bool task_is_scoped(const struct task_struct *const parent, const struct task_struct *const child) { bool is_scoped; const struct landlock_ruleset *dom_parent, *dom_child; rcu_read_lock(); dom_parent = landlock_get_task_domain(parent); dom_child = landlock_get_task_domain(child); is_scoped = domain_scope_le(dom_parent, dom_child); rcu_read_unlock(); return is_scoped; } static int task_ptrace(const struct task_struct *const parent, const struct task_struct *const child) { /* Quick return for non-landlocked tasks. */ if (!landlocked(parent)) return 0; if (task_is_scoped(parent, child)) return 0; return -EPERM; } /** * hook_ptrace_access_check - Determines whether the current process may access * another * * @child: Process to be accessed. * @mode: Mode of attachment. * * If the current task has Landlock rules, then the child must have at least * the same rules. Else denied. * * Determines whether a process may access another, returning 0 if permission * granted, -errno if denied. */ static int hook_ptrace_access_check(struct task_struct *const child, const unsigned int mode) { return task_ptrace(current, child); } /** * hook_ptrace_traceme - Determines whether another process may trace the * current one * * @parent: Task proposed to be the tracer. * * If the parent has Landlock rules, then the current task must have the same * or more rules. Else denied. * * Determines whether the nominated task is permitted to trace the current * process, returning 0 if permission is granted, -errno if denied. */ static int hook_ptrace_traceme(struct task_struct *const parent) { return task_ptrace(parent, current); } static struct security_hook_list landlock_hooks[] __ro_after_init = { LSM_HOOK_INIT(ptrace_access_check, hook_ptrace_access_check), LSM_HOOK_INIT(ptrace_traceme, hook_ptrace_traceme), }; __init void landlock_add_ptrace_hooks(void) { security_add_hooks(landlock_hooks, ARRAY_SIZE(landlock_hooks), &landlock_lsmid); } |
37 117 1994 117 117 117 117 1 1 6 53 379 378 379 335 334 335 335 335 335 335 76 26 50 77 76 76 76 76 53 53 53 487 42 3024 3066 3066 3068 3068 1570 1578 3066 3062 3064 1530 41 1578 1579 3066 1620 1530 3065 3064 1621 1528 3069 41 42 41 68 145 145 145 87 57 42 103 145 145 145 145 145 145 145 1 41 42 42 42 42 42 42 1 1 42 42 42 41 1 42 42 42 42 42 42 42 42 3055 3057 31 3023 3026 42 42 42 42 42 42 41 42 53 12 12 12 53 53 53 40 12 53 12 41 2973 2979 30 2943 2976 2976 2975 2179 2179 2178 649 395 2178 1 1285 1565 16 16 16 16 15 15 15 15 15 487 1 1 1579 1578 1579 1578 2959 1547 35 35 1579 1 1578 1579 1579 1578 1576 1578 1578 1576 1576 29 29 585 5 5 5 3 5 5 589 589 2114 2116 1957 2116 2115 1712 1579 173 404 373 30 404 403 143 143 143 106 69 18 12 117 117 117 154 30 126 126 2893 802 3024 1452 1 1453 1453 1454 1359 1359 1357 595 599 594 598 597 392 599 596 1 596 1 19 533 1667 533 1563 117 3782 2650 1668 616 597 19 19 599 1669 1668 1880 655 1632 3026 3024 1612 2179 3026 3027 1613 2177 3027 3023 1287 3024 3027 3027 3023 3026 3023 3024 2177 126 1358 1358 27 50 50 50 49 1 1 50 50 50 27 27 27 27 27 27 25 2 27 27 26 27 27 27 27 27 49 49 49 49 49 49 49 49 49 49 49 25 1 26 26 26 26 26 26 26 26 27 27 27 50 50 26 26 26 26 26 25 26 26 26 26 25 26 26 26 26 49 49 26 26 26 26 26 26 26 26 26 26 26 26 26 26 26 26 26 26 26 26 26 49 49 49 27 27 27 27 27 27 27 26 26 1 1 26 27 27 26 1 27 26 1 27 27 27 27 50 50 50 50 50 50 5 5 5 5 52 51 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 3422 3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434 3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456 3457 3458 3459 3460 3461 3462 3463 3464 3465 3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479 3480 3481 3482 3483 3484 3485 3486 3487 3488 3489 3490 3491 3492 3493 3494 3495 3496 3497 3498 3499 3500 3501 3502 3503 3504 3505 3506 3507 3508 3509 3510 3511 3512 3513 3514 3515 3516 3517 3518 3519 3520 3521 3522 3523 3524 3525 3526 3527 3528 3529 3530 3531 3532 3533 3534 3535 3536 3537 3538 3539 3540 3541 3542 3543 3544 3545 3546 3547 3548 3549 3550 3551 3552 3553 3554 3555 3556 3557 3558 3559 3560 3561 3562 3563 3564 3565 3566 3567 3568 3569 3570 3571 3572 3573 3574 3575 3576 3577 3578 3579 3580 3581 3582 3583 3584 3585 3586 3587 3588 3589 3590 3591 3592 3593 3594 3595 3596 3597 3598 3599 3600 3601 3602 3603 3604 3605 3606 3607 3608 3609 3610 3611 3612 3613 3614 3615 3616 3617 3618 3619 3620 3621 3622 3623 3624 3625 3626 3627 3628 3629 3630 3631 3632 3633 3634 3635 3636 3637 3638 3639 3640 3641 3642 3643 3644 3645 3646 3647 3648 3649 3650 3651 3652 3653 3654 3655 3656 3657 3658 3659 3660 3661 3662 3663 3664 3665 3666 3667 3668 3669 3670 3671 3672 3673 3674 3675 3676 3677 3678 3679 3680 3681 3682 3683 3684 3685 3686 3687 3688 3689 3690 3691 3692 3693 3694 3695 3696 3697 3698 3699 3700 3701 3702 3703 3704 3705 3706 3707 3708 3709 3710 3711 3712 3713 3714 3715 3716 3717 3718 3719 3720 3721 3722 3723 3724 3725 3726 3727 3728 3729 3730 3731 3732 3733 3734 3735 3736 3737 3738 3739 3740 3741 3742 3743 3744 3745 3746 3747 3748 3749 3750 3751 3752 3753 3754 3755 3756 3757 3758 3759 3760 3761 3762 3763 3764 3765 3766 3767 3768 3769 3770 3771 3772 3773 3774 3775 3776 3777 3778 3779 3780 3781 3782 3783 3784 3785 3786 3787 3788 3789 3790 3791 3792 3793 3794 3795 3796 3797 3798 3799 3800 3801 3802 3803 3804 3805 3806 3807 3808 3809 3810 3811 3812 3813 3814 3815 3816 3817 3818 3819 3820 3821 3822 3823 3824 3825 3826 3827 3828 3829 3830 3831 3832 3833 3834 3835 3836 3837 3838 3839 3840 3841 3842 3843 3844 3845 3846 3847 3848 3849 3850 3851 3852 3853 3854 3855 3856 3857 3858 3859 3860 3861 3862 3863 3864 3865 3866 3867 3868 3869 3870 3871 3872 3873 3874 3875 3876 3877 3878 3879 3880 3881 3882 3883 3884 3885 3886 3887 3888 3889 3890 3891 3892 3893 3894 3895 3896 3897 3898 3899 3900 3901 3902 3903 3904 3905 3906 3907 3908 3909 3910 3911 3912 3913 3914 3915 3916 3917 3918 3919 3920 3921 3922 3923 3924 3925 3926 3927 3928 3929 3930 3931 3932 3933 3934 3935 3936 3937 3938 3939 3940 3941 3942 3943 3944 3945 3946 3947 3948 3949 3950 3951 3952 3953 3954 3955 3956 3957 3958 3959 3960 3961 3962 3963 3964 3965 3966 3967 3968 3969 3970 3971 3972 3973 3974 3975 3976 3977 3978 3979 3980 3981 3982 3983 3984 3985 3986 3987 3988 3989 3990 3991 3992 3993 3994 3995 3996 3997 3998 3999 4000 4001 4002 4003 4004 4005 4006 4007 4008 4009 4010 4011 4012 4013 4014 4015 4016 4017 4018 4019 4020 4021 4022 4023 4024 4025 4026 4027 4028 4029 4030 4031 4032 4033 4034 4035 4036 4037 4038 4039 4040 4041 4042 4043 4044 4045 4046 4047 4048 4049 4050 4051 4052 4053 4054 4055 4056 4057 4058 4059 4060 4061 4062 4063 4064 4065 4066 4067 4068 4069 4070 4071 4072 4073 4074 4075 4076 4077 4078 4079 4080 4081 4082 4083 4084 4085 4086 4087 4088 4089 4090 4091 4092 4093 4094 4095 4096 4097 4098 4099 4100 4101 4102 4103 4104 4105 4106 4107 4108 4109 4110 4111 4112 4113 4114 4115 4116 4117 4118 4119 4120 4121 4122 4123 4124 4125 4126 4127 4128 4129 4130 4131 4132 4133 4134 4135 4136 4137 4138 4139 4140 4141 4142 4143 4144 4145 4146 4147 4148 4149 4150 4151 4152 4153 4154 4155 4156 4157 4158 4159 4160 4161 4162 4163 4164 4165 4166 4167 4168 4169 4170 4171 4172 4173 4174 4175 4176 4177 4178 4179 4180 4181 4182 4183 4184 4185 4186 4187 4188 4189 4190 4191 4192 4193 4194 4195 4196 4197 4198 4199 4200 4201 4202 4203 4204 4205 4206 4207 4208 4209 4210 4211 4212 4213 4214 4215 4216 4217 4218 4219 4220 4221 4222 4223 4224 4225 4226 4227 4228 4229 4230 4231 4232 4233 4234 4235 4236 4237 4238 4239 4240 4241 4242 4243 4244 4245 4246 4247 4248 4249 4250 4251 4252 4253 4254 4255 4256 4257 4258 4259 4260 4261 4262 4263 4264 4265 4266 4267 4268 4269 4270 4271 4272 4273 4274 4275 4276 4277 4278 4279 4280 4281 4282 4283 4284 4285 4286 4287 4288 4289 4290 4291 4292 4293 4294 4295 4296 4297 4298 4299 4300 4301 4302 4303 4304 4305 4306 4307 4308 4309 4310 4311 4312 4313 4314 4315 4316 4317 4318 4319 4320 4321 4322 4323 4324 4325 4326 4327 4328 4329 4330 4331 4332 4333 4334 4335 4336 4337 4338 4339 4340 4341 4342 4343 4344 4345 4346 4347 4348 4349 4350 4351 4352 4353 4354 4355 4356 4357 4358 4359 4360 4361 4362 4363 4364 4365 4366 4367 4368 4369 4370 4371 4372 4373 4374 4375 4376 4377 4378 4379 4380 4381 4382 4383 4384 4385 4386 4387 4388 4389 4390 4391 4392 4393 4394 4395 4396 4397 4398 4399 4400 4401 4402 4403 4404 4405 4406 4407 4408 4409 4410 4411 4412 4413 4414 4415 4416 4417 4418 4419 4420 4421 4422 4423 4424 4425 4426 4427 4428 4429 4430 4431 4432 4433 4434 4435 4436 4437 4438 4439 4440 4441 4442 4443 4444 4445 4446 4447 4448 4449 4450 4451 4452 4453 4454 4455 4456 4457 4458 4459 4460 4461 4462 4463 4464 4465 4466 4467 4468 4469 4470 4471 4472 4473 4474 4475 4476 4477 4478 4479 4480 4481 4482 4483 4484 4485 4486 4487 4488 4489 4490 4491 4492 4493 4494 4495 4496 4497 4498 4499 4500 4501 4502 4503 4504 4505 4506 4507 4508 4509 4510 4511 4512 4513 4514 4515 4516 4517 4518 4519 4520 4521 4522 4523 4524 4525 4526 4527 4528 4529 4530 4531 4532 4533 4534 4535 4536 4537 4538 4539 4540 4541 4542 4543 4544 4545 4546 4547 4548 4549 4550 4551 4552 4553 4554 4555 4556 4557 4558 4559 4560 4561 4562 4563 4564 4565 4566 4567 4568 4569 4570 4571 4572 4573 4574 4575 4576 4577 4578 4579 4580 4581 4582 4583 4584 4585 4586 4587 4588 4589 4590 4591 4592 4593 4594 4595 4596 4597 4598 4599 4600 4601 4602 4603 4604 4605 4606 4607 4608 4609 4610 4611 4612 4613 4614 4615 4616 4617 4618 4619 4620 4621 4622 4623 4624 4625 4626 4627 4628 4629 4630 4631 4632 4633 4634 4635 4636 4637 4638 4639 4640 4641 4642 4643 4644 4645 4646 4647 4648 4649 4650 4651 4652 4653 4654 4655 4656 4657 4658 4659 4660 4661 4662 4663 4664 4665 4666 4667 4668 4669 4670 4671 4672 4673 4674 4675 4676 4677 4678 4679 4680 4681 4682 4683 4684 4685 4686 4687 4688 4689 4690 4691 4692 4693 4694 4695 4696 4697 4698 4699 4700 4701 4702 4703 4704 4705 4706 4707 4708 4709 4710 4711 4712 4713 4714 4715 4716 4717 4718 4719 4720 4721 4722 4723 4724 4725 4726 4727 4728 4729 4730 4731 4732 4733 4734 4735 4736 4737 4738 4739 4740 4741 4742 4743 4744 4745 4746 4747 4748 4749 4750 4751 4752 4753 4754 4755 4756 4757 4758 4759 4760 4761 4762 4763 4764 4765 4766 4767 4768 4769 4770 4771 4772 4773 4774 4775 4776 4777 4778 4779 4780 4781 4782 4783 4784 4785 4786 4787 4788 4789 4790 4791 4792 4793 4794 4795 4796 4797 4798 4799 4800 4801 4802 4803 4804 4805 4806 4807 4808 4809 4810 4811 4812 4813 4814 4815 4816 4817 4818 4819 4820 4821 4822 4823 4824 4825 4826 4827 4828 4829 4830 4831 4832 4833 4834 4835 4836 4837 4838 4839 4840 4841 4842 4843 4844 4845 4846 4847 4848 4849 4850 4851 4852 4853 4854 4855 4856 4857 4858 4859 4860 4861 4862 4863 4864 4865 4866 4867 4868 4869 4870 4871 4872 4873 4874 4875 4876 4877 4878 4879 4880 4881 4882 4883 4884 4885 4886 4887 4888 4889 4890 4891 4892 4893 4894 4895 4896 4897 4898 4899 4900 4901 | // SPDX-License-Identifier: GPL-2.0 /* * Block multiqueue core code * * Copyright (C) 2013-2014 Jens Axboe * Copyright (C) 2013-2014 Christoph Hellwig */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/backing-dev.h> #include <linux/bio.h> #include <linux/blkdev.h> #include <linux/blk-integrity.h> #include <linux/kmemleak.h> #include <linux/mm.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/workqueue.h> #include <linux/smp.h> #include <linux/interrupt.h> #include <linux/llist.h> #include <linux/cpu.h> #include <linux/cache.h> #include <linux/sched/sysctl.h> #include <linux/sched/topology.h> #include <linux/sched/signal.h> #include <linux/delay.h> #include <linux/crash_dump.h> #include <linux/prefetch.h> #include <linux/blk-crypto.h> #include <linux/part_stat.h> #include <trace/events/block.h> #include <linux/t10-pi.h> #include "blk.h" #include "blk-mq.h" #include "blk-mq-debugfs.h" #include "blk-pm.h" #include "blk-stat.h" #include "blk-mq-sched.h" #include "blk-rq-qos.h" static DEFINE_PER_CPU(struct llist_head, blk_cpu_done); static DEFINE_PER_CPU(call_single_data_t, blk_cpu_csd); static void blk_mq_insert_request(struct request *rq, blk_insert_t flags); static void blk_mq_request_bypass_insert(struct request *rq, blk_insert_t flags); static void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx, struct list_head *list); static int blk_hctx_poll(struct request_queue *q, struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob, unsigned int flags); /* * Check if any of the ctx, dispatch list or elevator * have pending work in this hardware queue. */ static bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx) { return !list_empty_careful(&hctx->dispatch) || sbitmap_any_bit_set(&hctx->ctx_map) || blk_mq_sched_has_work(hctx); } /* * Mark this ctx as having pending work in this hardware queue */ static void blk_mq_hctx_mark_pending(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx) { const int bit = ctx->index_hw[hctx->type]; if (!sbitmap_test_bit(&hctx->ctx_map, bit)) sbitmap_set_bit(&hctx->ctx_map, bit); } static void blk_mq_hctx_clear_pending(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx) { const int bit = ctx->index_hw[hctx->type]; sbitmap_clear_bit(&hctx->ctx_map, bit); } struct mq_inflight { struct block_device *part; unsigned int inflight[2]; }; static bool blk_mq_check_inflight(struct request *rq, void *priv) { struct mq_inflight *mi = priv; if (rq->part && blk_do_io_stat(rq) && (!mi->part->bd_partno || rq->part == mi->part) && blk_mq_rq_state(rq) == MQ_RQ_IN_FLIGHT) mi->inflight[rq_data_dir(rq)]++; return true; } unsigned int blk_mq_in_flight(struct request_queue *q, struct block_device *part) { struct mq_inflight mi = { .part = part }; blk_mq_queue_tag_busy_iter(q, blk_mq_check_inflight, &mi); return mi.inflight[0] + mi.inflight[1]; } void blk_mq_in_flight_rw(struct request_queue *q, struct block_device *part, unsigned int inflight[2]) { struct mq_inflight mi = { .part = part }; blk_mq_queue_tag_busy_iter(q, blk_mq_check_inflight, &mi); inflight[0] = mi.inflight[0]; inflight[1] = mi.inflight[1]; } void blk_freeze_queue_start(struct request_queue *q) { mutex_lock(&q->mq_freeze_lock); if (++q->mq_freeze_depth == 1) { percpu_ref_kill(&q->q_usage_counter); mutex_unlock(&q->mq_freeze_lock); if (queue_is_mq(q)) blk_mq_run_hw_queues(q, false); } else { mutex_unlock(&q->mq_freeze_lock); } } EXPORT_SYMBOL_GPL(blk_freeze_queue_start); void blk_mq_freeze_queue_wait(struct request_queue *q) { wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->q_usage_counter)); } EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_wait); int blk_mq_freeze_queue_wait_timeout(struct request_queue *q, unsigned long timeout) { return wait_event_timeout(q->mq_freeze_wq, percpu_ref_is_zero(&q->q_usage_counter), timeout); } EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_wait_timeout); /* * Guarantee no request is in use, so we can change any data structure of * the queue afterward. */ void blk_freeze_queue(struct request_queue *q) { /* * In the !blk_mq case we are only calling this to kill the * q_usage_counter, otherwise this increases the freeze depth * and waits for it to return to zero. For this reason there is * no blk_unfreeze_queue(), and blk_freeze_queue() is not * exported to drivers as the only user for unfreeze is blk_mq. */ blk_freeze_queue_start(q); blk_mq_freeze_queue_wait(q); } void blk_mq_freeze_queue(struct request_queue *q) { /* * ...just an alias to keep freeze and unfreeze actions balanced * in the blk_mq_* namespace */ blk_freeze_queue(q); } EXPORT_SYMBOL_GPL(blk_mq_freeze_queue); void __blk_mq_unfreeze_queue(struct request_queue *q, bool force_atomic) { mutex_lock(&q->mq_freeze_lock); if (force_atomic) q->q_usage_counter.data->force_atomic = true; q->mq_freeze_depth--; WARN_ON_ONCE(q->mq_freeze_depth < 0); if (!q->mq_freeze_depth) { percpu_ref_resurrect(&q->q_usage_counter); wake_up_all(&q->mq_freeze_wq); } mutex_unlock(&q->mq_freeze_lock); } void blk_mq_unfreeze_queue(struct request_queue *q) { __blk_mq_unfreeze_queue(q, false); } EXPORT_SYMBOL_GPL(blk_mq_unfreeze_queue); /* * FIXME: replace the scsi_internal_device_*block_nowait() calls in the * mpt3sas driver such that this function can be removed. */ void blk_mq_quiesce_queue_nowait(struct request_queue *q) { unsigned long flags; spin_lock_irqsave(&q->queue_lock, flags); if (!q->quiesce_depth++) blk_queue_flag_set(QUEUE_FLAG_QUIESCED, q); spin_unlock_irqrestore(&q->queue_lock, flags); } EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue_nowait); /** * blk_mq_wait_quiesce_done() - wait until in-progress quiesce is done * @set: tag_set to wait on * * Note: it is driver's responsibility for making sure that quiesce has * been started on or more of the request_queues of the tag_set. This * function only waits for the quiesce on those request_queues that had * the quiesce flag set using blk_mq_quiesce_queue_nowait. */ void blk_mq_wait_quiesce_done(struct blk_mq_tag_set *set) { if (set->flags & BLK_MQ_F_BLOCKING) synchronize_srcu(set->srcu); else synchronize_rcu(); } EXPORT_SYMBOL_GPL(blk_mq_wait_quiesce_done); /** * blk_mq_quiesce_queue() - wait until all ongoing dispatches have finished * @q: request queue. * * Note: this function does not prevent that the struct request end_io() * callback function is invoked. Once this function is returned, we make * sure no dispatch can happen until the queue is unquiesced via * blk_mq_unquiesce_queue(). */ void blk_mq_quiesce_queue(struct request_queue *q) { blk_mq_quiesce_queue_nowait(q); /* nothing to wait for non-mq queues */ if (queue_is_mq(q)) blk_mq_wait_quiesce_done(q->tag_set); } EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue); /* * blk_mq_unquiesce_queue() - counterpart of blk_mq_quiesce_queue() * @q: request queue. * * This function recovers queue into the state before quiescing * which is done by blk_mq_quiesce_queue. */ void blk_mq_unquiesce_queue(struct request_queue *q) { unsigned long flags; bool run_queue = false; spin_lock_irqsave(&q->queue_lock, flags); if (WARN_ON_ONCE(q->quiesce_depth <= 0)) { ; } else if (!--q->quiesce_depth) { blk_queue_flag_clear(QUEUE_FLAG_QUIESCED, q); run_queue = true; } spin_unlock_irqrestore(&q->queue_lock, flags); /* dispatch requests which are inserted during quiescing */ if (run_queue) blk_mq_run_hw_queues(q, true); } EXPORT_SYMBOL_GPL(blk_mq_unquiesce_queue); void blk_mq_quiesce_tagset(struct blk_mq_tag_set *set) { struct request_queue *q; mutex_lock(&set->tag_list_lock); list_for_each_entry(q, &set->tag_list, tag_set_list) { if (!blk_queue_skip_tagset_quiesce(q)) blk_mq_quiesce_queue_nowait(q); } blk_mq_wait_quiesce_done(set); mutex_unlock(&set->tag_list_lock); } EXPORT_SYMBOL_GPL(blk_mq_quiesce_tagset); void blk_mq_unquiesce_tagset(struct blk_mq_tag_set *set) { struct request_queue *q; mutex_lock(&set->tag_list_lock); list_for_each_entry(q, &set->tag_list, tag_set_list) { if (!blk_queue_skip_tagset_quiesce(q)) blk_mq_unquiesce_queue(q); } mutex_unlock(&set->tag_list_lock); } EXPORT_SYMBOL_GPL(blk_mq_unquiesce_tagset); void blk_mq_wake_waiters(struct request_queue *q) { struct blk_mq_hw_ctx *hctx; unsigned long i; queue_for_each_hw_ctx(q, hctx, i) if (blk_mq_hw_queue_mapped(hctx)) blk_mq_tag_wakeup_all(hctx->tags, true); } void blk_rq_init(struct request_queue *q, struct request *rq) { memset(rq, 0, sizeof(*rq)); INIT_LIST_HEAD(&rq->queuelist); rq->q = q; rq->__sector = (sector_t) -1; INIT_HLIST_NODE(&rq->hash); RB_CLEAR_NODE(&rq->rb_node); rq->tag = BLK_MQ_NO_TAG; rq->internal_tag = BLK_MQ_NO_TAG; rq->start_time_ns = ktime_get_ns(); rq->part = NULL; blk_crypto_rq_set_defaults(rq); } EXPORT_SYMBOL(blk_rq_init); /* Set start and alloc time when the allocated request is actually used */ static inline void blk_mq_rq_time_init(struct request *rq, u64 alloc_time_ns) { if (blk_mq_need_time_stamp(rq)) rq->start_time_ns = ktime_get_ns(); else rq->start_time_ns = 0; #ifdef CONFIG_BLK_RQ_ALLOC_TIME if (blk_queue_rq_alloc_time(rq->q)) rq->alloc_time_ns = alloc_time_ns ?: rq->start_time_ns; else rq->alloc_time_ns = 0; #endif } static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data, struct blk_mq_tags *tags, unsigned int tag) { struct blk_mq_ctx *ctx = data->ctx; struct blk_mq_hw_ctx *hctx = data->hctx; struct request_queue *q = data->q; struct request *rq = tags->static_rqs[tag]; rq->q = q; rq->mq_ctx = ctx; rq->mq_hctx = hctx; rq->cmd_flags = data->cmd_flags; if (data->flags & BLK_MQ_REQ_PM) data->rq_flags |= RQF_PM; if (blk_queue_io_stat(q)) data->rq_flags |= RQF_IO_STAT; rq->rq_flags = data->rq_flags; if (data->rq_flags & RQF_SCHED_TAGS) { rq->tag = BLK_MQ_NO_TAG; rq->internal_tag = tag; } else { rq->tag = tag; rq->internal_tag = BLK_MQ_NO_TAG; } rq->timeout = 0; rq->part = NULL; rq->io_start_time_ns = 0; rq->stats_sectors = 0; rq->nr_phys_segments = 0; #if defined(CONFIG_BLK_DEV_INTEGRITY) rq->nr_integrity_segments = 0; #endif rq->end_io = NULL; rq->end_io_data = NULL; blk_crypto_rq_set_defaults(rq); INIT_LIST_HEAD(&rq->queuelist); /* tag was already set */ WRITE_ONCE(rq->deadline, 0); req_ref_set(rq, 1); if (rq->rq_flags & RQF_USE_SCHED) { struct elevator_queue *e = data->q->elevator; INIT_HLIST_NODE(&rq->hash); RB_CLEAR_NODE(&rq->rb_node); if (e->type->ops.prepare_request) e->type->ops.prepare_request(rq); } return rq; } static inline struct request * __blk_mq_alloc_requests_batch(struct blk_mq_alloc_data *data) { unsigned int tag, tag_offset; struct blk_mq_tags *tags; struct request *rq; unsigned long tag_mask; int i, nr = 0; tag_mask = blk_mq_get_tags(data, data->nr_tags, &tag_offset); if (unlikely(!tag_mask)) return NULL; tags = blk_mq_tags_from_data(data); for (i = 0; tag_mask; i++) { if (!(tag_mask & (1UL << i))) continue; tag = tag_offset + i; prefetch(tags->static_rqs[tag]); tag_mask &= ~(1UL << i); rq = blk_mq_rq_ctx_init(data, tags, tag); rq_list_add(data->cached_rq, rq); nr++; } if (!(data->rq_flags & RQF_SCHED_TAGS)) blk_mq_add_active_requests(data->hctx, nr); /* caller already holds a reference, add for remainder */ percpu_ref_get_many(&data->q->q_usage_counter, nr - 1); data->nr_tags -= nr; return rq_list_pop(data->cached_rq); } static struct request *__blk_mq_alloc_requests(struct blk_mq_alloc_data *data) { struct request_queue *q = data->q; u64 alloc_time_ns = 0; struct request *rq; unsigned int tag; /* alloc_time includes depth and tag waits */ if (blk_queue_rq_alloc_time(q)) alloc_time_ns = ktime_get_ns(); if (data->cmd_flags & REQ_NOWAIT) data->flags |= BLK_MQ_REQ_NOWAIT; if (q->elevator) { /* * All requests use scheduler tags when an I/O scheduler is * enabled for the queue. */ data->rq_flags |= RQF_SCHED_TAGS; /* * Flush/passthrough requests are special and go directly to the * dispatch list. */ if ((data->cmd_flags & REQ_OP_MASK) != REQ_OP_FLUSH && !blk_op_is_passthrough(data->cmd_flags)) { struct elevator_mq_ops *ops = &q->elevator->type->ops; WARN_ON_ONCE(data->flags & BLK_MQ_REQ_RESERVED); data->rq_flags |= RQF_USE_SCHED; if (ops->limit_depth) ops->limit_depth(data->cmd_flags, data); } } retry: data->ctx = blk_mq_get_ctx(q); data->hctx = blk_mq_map_queue(q, data->cmd_flags, data->ctx); if (!(data->rq_flags & RQF_SCHED_TAGS)) blk_mq_tag_busy(data->hctx); if (data->flags & BLK_MQ_REQ_RESERVED) data->rq_flags |= RQF_RESV; /* * Try batched alloc if we want more than 1 tag. */ if (data->nr_tags > 1) { rq = __blk_mq_alloc_requests_batch(data); if (rq) { blk_mq_rq_time_init(rq, alloc_time_ns); return rq; } data->nr_tags = 1; } /* * Waiting allocations only fail because of an inactive hctx. In that * case just retry the hctx assignment and tag allocation as CPU hotplug * should have migrated us to an online CPU by now. */ tag = blk_mq_get_tag(data); if (tag == BLK_MQ_NO_TAG) { if (data->flags & BLK_MQ_REQ_NOWAIT) return NULL; /* * Give up the CPU and sleep for a random short time to * ensure that thread using a realtime scheduling class * are migrated off the CPU, and thus off the hctx that * is going away. */ msleep(3); goto retry; } if (!(data->rq_flags & RQF_SCHED_TAGS)) blk_mq_inc_active_requests(data->hctx); rq = blk_mq_rq_ctx_init(data, blk_mq_tags_from_data(data), tag); blk_mq_rq_time_init(rq, alloc_time_ns); return rq; } static struct request *blk_mq_rq_cache_fill(struct request_queue *q, struct blk_plug *plug, blk_opf_t opf, blk_mq_req_flags_t flags) { struct blk_mq_alloc_data data = { .q = q, .flags = flags, .cmd_flags = opf, .nr_tags = plug->nr_ios, .cached_rq = &plug->cached_rq, }; struct request *rq; if (blk_queue_enter(q, flags)) return NULL; plug->nr_ios = 1; rq = __blk_mq_alloc_requests(&data); if (unlikely(!rq)) blk_queue_exit(q); return rq; } static struct request *blk_mq_alloc_cached_request(struct request_queue *q, blk_opf_t opf, blk_mq_req_flags_t flags) { struct blk_plug *plug = current->plug; struct request *rq; if (!plug) return NULL; if (rq_list_empty(plug->cached_rq)) { if (plug->nr_ios == 1) return NULL; rq = blk_mq_rq_cache_fill(q, plug, opf, flags); if (!rq) return NULL; } else { rq = rq_list_peek(&plug->cached_rq); if (!rq || rq->q != q) return NULL; if (blk_mq_get_hctx_type(opf) != rq->mq_hctx->type) return NULL; if (op_is_flush(rq->cmd_flags) != op_is_flush(opf)) return NULL; plug->cached_rq = rq_list_next(rq); blk_mq_rq_time_init(rq, 0); } rq->cmd_flags = opf; INIT_LIST_HEAD(&rq->queuelist); return rq; } struct request *blk_mq_alloc_request(struct request_queue *q, blk_opf_t opf, blk_mq_req_flags_t flags) { struct request *rq; rq = blk_mq_alloc_cached_request(q, opf, flags); if (!rq) { struct blk_mq_alloc_data data = { .q = q, .flags = flags, .cmd_flags = opf, .nr_tags = 1, }; int ret; ret = blk_queue_enter(q, flags); if (ret) return ERR_PTR(ret); rq = __blk_mq_alloc_requests(&data); if (!rq) goto out_queue_exit; } rq->__data_len = 0; rq->__sector = (sector_t) -1; rq->bio = rq->biotail = NULL; return rq; out_queue_exit: blk_queue_exit(q); return ERR_PTR(-EWOULDBLOCK); } EXPORT_SYMBOL(blk_mq_alloc_request); struct request *blk_mq_alloc_request_hctx(struct request_queue *q, blk_opf_t opf, blk_mq_req_flags_t flags, unsigned int hctx_idx) { struct blk_mq_alloc_data data = { .q = q, .flags = flags, .cmd_flags = opf, .nr_tags = 1, }; u64 alloc_time_ns = 0; struct request *rq; unsigned int cpu; unsigned int tag; int ret; /* alloc_time includes depth and tag waits */ if (blk_queue_rq_alloc_time(q)) alloc_time_ns = ktime_get_ns(); /* * If the tag allocator sleeps we could get an allocation for a * different hardware context. No need to complicate the low level * allocator for this for the rare use case of a command tied to * a specific queue. */ if (WARN_ON_ONCE(!(flags & BLK_MQ_REQ_NOWAIT)) || WARN_ON_ONCE(!(flags & BLK_MQ_REQ_RESERVED))) return ERR_PTR(-EINVAL); if (hctx_idx >= q->nr_hw_queues) return ERR_PTR(-EIO); ret = blk_queue_enter(q, flags); if (ret) return ERR_PTR(ret); /* * Check if the hardware context is actually mapped to anything. * If not tell the caller that it should skip this queue. */ ret = -EXDEV; data.hctx = xa_load(&q->hctx_table, hctx_idx); if (!blk_mq_hw_queue_mapped(data.hctx)) goto out_queue_exit; cpu = cpumask_first_and(data.hctx->cpumask, cpu_online_mask); if (cpu >= nr_cpu_ids) goto out_queue_exit; data.ctx = __blk_mq_get_ctx(q, cpu); if (q->elevator) data.rq_flags |= RQF_SCHED_TAGS; else blk_mq_tag_busy(data.hctx); if (flags & BLK_MQ_REQ_RESERVED) data.rq_flags |= RQF_RESV; ret = -EWOULDBLOCK; tag = blk_mq_get_tag(&data); if (tag == BLK_MQ_NO_TAG) goto out_queue_exit; if (!(data.rq_flags & RQF_SCHED_TAGS)) blk_mq_inc_active_requests(data.hctx); rq = blk_mq_rq_ctx_init(&data, blk_mq_tags_from_data(&data), tag); blk_mq_rq_time_init(rq, alloc_time_ns); rq->__data_len = 0; rq->__sector = (sector_t) -1; rq->bio = rq->biotail = NULL; return rq; out_queue_exit: blk_queue_exit(q); return ERR_PTR(ret); } EXPORT_SYMBOL_GPL(blk_mq_alloc_request_hctx); static void blk_mq_finish_request(struct request *rq) { struct request_queue *q = rq->q; if (rq->rq_flags & RQF_USE_SCHED) { q->elevator->type->ops.finish_request(rq); /* * For postflush request that may need to be * completed twice, we should clear this flag * to avoid double finish_request() on the rq. */ rq->rq_flags &= ~RQF_USE_SCHED; } } static void __blk_mq_free_request(struct request *rq) { struct request_queue *q = rq->q; struct blk_mq_ctx *ctx = rq->mq_ctx; struct blk_mq_hw_ctx *hctx = rq->mq_hctx; const int sched_tag = rq->internal_tag; blk_crypto_free_request(rq); blk_pm_mark_last_busy(rq); rq->mq_hctx = NULL; if (rq->tag != BLK_MQ_NO_TAG) { blk_mq_dec_active_requests(hctx); blk_mq_put_tag(hctx->tags, ctx, rq->tag); } if (sched_tag != BLK_MQ_NO_TAG) blk_mq_put_tag(hctx->sched_tags, ctx, sched_tag); blk_mq_sched_restart(hctx); blk_queue_exit(q); } void blk_mq_free_request(struct request *rq) { struct request_queue *q = rq->q; blk_mq_finish_request(rq); if (unlikely(laptop_mode && !blk_rq_is_passthrough(rq))) laptop_io_completion(q->disk->bdi); rq_qos_done(q, rq); WRITE_ONCE(rq->state, MQ_RQ_IDLE); if (req_ref_put_and_test(rq)) __blk_mq_free_request(rq); } EXPORT_SYMBOL_GPL(blk_mq_free_request); void blk_mq_free_plug_rqs(struct blk_plug *plug) { struct request *rq; while ((rq = rq_list_pop(&plug->cached_rq)) != NULL) blk_mq_free_request(rq); } void blk_dump_rq_flags(struct request *rq, char *msg) { printk(KERN_INFO "%s: dev %s: flags=%llx\n", msg, rq->q->disk ? rq->q->disk->disk_name : "?", (__force unsigned long long) rq->cmd_flags); printk(KERN_INFO " sector %llu, nr/cnr %u/%u\n", (unsigned long long)blk_rq_pos(rq), blk_rq_sectors(rq), blk_rq_cur_sectors(rq)); printk(KERN_INFO " bio %p, biotail %p, len %u\n", rq->bio, rq->biotail, blk_rq_bytes(rq)); } EXPORT_SYMBOL(blk_dump_rq_flags); static void req_bio_endio(struct request *rq, struct bio *bio, unsigned int nbytes, blk_status_t error) { if (unlikely(error)) { bio->bi_status = error; } else if (req_op(rq) == REQ_OP_ZONE_APPEND) { /* * Partial zone append completions cannot be supported as the * BIO fragments may end up not being written sequentially. * For such case, force the completed nbytes to be equal to * the BIO size so that bio_advance() sets the BIO remaining * size to 0 and we end up calling bio_endio() before returning. */ if (bio->bi_iter.bi_size != nbytes) { bio->bi_status = BLK_STS_IOERR; nbytes = bio->bi_iter.bi_size; } else { bio->bi_iter.bi_sector = rq->__sector; } } bio_advance(bio, nbytes); if (unlikely(rq->rq_flags & RQF_QUIET)) bio_set_flag(bio, BIO_QUIET); /* don't actually finish bio if it's part of flush sequence */ if (bio->bi_iter.bi_size == 0 && !(rq->rq_flags & RQF_FLUSH_SEQ)) bio_endio(bio); } static void blk_account_io_completion(struct request *req, unsigned int bytes) { if (req->part && blk_do_io_stat(req)) { const int sgrp = op_stat_group(req_op(req)); part_stat_lock(); part_stat_add(req->part, sectors[sgrp], bytes >> 9); part_stat_unlock(); } } static void blk_print_req_error(struct request *req, blk_status_t status) { printk_ratelimited(KERN_ERR "%s error, dev %s, sector %llu op 0x%x:(%s) flags 0x%x " "phys_seg %u prio class %u\n", blk_status_to_str(status), req->q->disk ? req->q->disk->disk_name : "?", blk_rq_pos(req), (__force u32)req_op(req), blk_op_str(req_op(req)), (__force u32)(req->cmd_flags & ~REQ_OP_MASK), req->nr_phys_segments, IOPRIO_PRIO_CLASS(req->ioprio)); } /* * Fully end IO on a request. Does not support partial completions, or * errors. */ static void blk_complete_request(struct request *req) { const bool is_flush = (req->rq_flags & RQF_FLUSH_SEQ) != 0; int total_bytes = blk_rq_bytes(req); struct bio *bio = req->bio; trace_block_rq_complete(req, BLK_STS_OK, total_bytes); if (!bio) return; #ifdef CONFIG_BLK_DEV_INTEGRITY if (blk_integrity_rq(req) && req_op(req) == REQ_OP_READ) req->q->integrity.profile->complete_fn(req, total_bytes); #endif /* * Upper layers may call blk_crypto_evict_key() anytime after the last * bio_endio(). Therefore, the keyslot must be released before that. */ blk_crypto_rq_put_keyslot(req); blk_account_io_completion(req, total_bytes); do { struct bio *next = bio->bi_next; /* Completion has already been traced */ bio_clear_flag(bio, BIO_TRACE_COMPLETION); if (req_op(req) == REQ_OP_ZONE_APPEND) bio->bi_iter.bi_sector = req->__sector; if (!is_flush) bio_endio(bio); bio = next; } while (bio); /* * Reset counters so that the request stacking driver * can find how many bytes remain in the request * later. */ if (!req->end_io) { req->bio = NULL; req->__data_len = 0; } } /** * blk_update_request - Complete multiple bytes without completing the request * @req: the request being processed * @error: block status code * @nr_bytes: number of bytes to complete for @req * * Description: * Ends I/O on a number of bytes attached to @req, but doesn't complete * the request structure even if @req doesn't have leftover. * If @req has leftover, sets it up for the next range of segments. * * Passing the result of blk_rq_bytes() as @nr_bytes guarantees * %false return from this function. * * Note: * The RQF_SPECIAL_PAYLOAD flag is ignored on purpose in this function * except in the consistency check at the end of this function. * * Return: * %false - this request doesn't have any more data * %true - this request has more data **/ bool blk_update_request(struct request *req, blk_status_t error, unsigned int nr_bytes) { int total_bytes; trace_block_rq_complete(req, error, nr_bytes); if (!req->bio) return false; #ifdef CONFIG_BLK_DEV_INTEGRITY if (blk_integrity_rq(req) && req_op(req) == REQ_OP_READ && error == BLK_STS_OK) req->q->integrity.profile->complete_fn(req, nr_bytes); #endif /* * Upper layers may call blk_crypto_evict_key() anytime after the last * bio_endio(). Therefore, the keyslot must be released before that. */ if (blk_crypto_rq_has_keyslot(req) && nr_bytes >= blk_rq_bytes(req)) __blk_crypto_rq_put_keyslot(req); if (unlikely(error && !blk_rq_is_passthrough(req) && !(req->rq_flags & RQF_QUIET)) && !test_bit(GD_DEAD, &req->q->disk->state)) { blk_print_req_error(req, error); trace_block_rq_error(req, error, nr_bytes); } blk_account_io_completion(req, nr_bytes); total_bytes = 0; while (req->bio) { struct bio *bio = req->bio; unsigned bio_bytes = min(bio->bi_iter.bi_size, nr_bytes); if (bio_bytes == bio->bi_iter.bi_size) req->bio = bio->bi_next; /* Completion has already been traced */ bio_clear_flag(bio, BIO_TRACE_COMPLETION); req_bio_endio(req, bio, bio_bytes, error); total_bytes += bio_bytes; nr_bytes -= bio_bytes; if (!nr_bytes) break; } /* * completely done */ if (!req->bio) { /* * Reset counters so that the request stacking driver * can find how many bytes remain in the request * later. */ req->__data_len = 0; return false; } req->__data_len -= total_bytes; /* update sector only for requests with clear definition of sector */ if (!blk_rq_is_passthrough(req)) req->__sector += total_bytes >> 9; /* mixed attributes always follow the first bio */ if (req->rq_flags & RQF_MIXED_MERGE) { req->cmd_flags &= ~REQ_FAILFAST_MASK; req->cmd_flags |= req->bio->bi_opf & REQ_FAILFAST_MASK; } if (!(req->rq_flags & RQF_SPECIAL_PAYLOAD)) { /* * If total number of sectors is less than the first segment * size, something has gone terribly wrong. */ if (blk_rq_bytes(req) < blk_rq_cur_bytes(req)) { blk_dump_rq_flags(req, "request botched"); req->__data_len = blk_rq_cur_bytes(req); } /* recalculate the number of segments */ req->nr_phys_segments = blk_recalc_rq_segments(req); } return true; } EXPORT_SYMBOL_GPL(blk_update_request); static inline void blk_account_io_done(struct request *req, u64 now) { trace_block_io_done(req); /* * Account IO completion. flush_rq isn't accounted as a * normal IO on queueing nor completion. Accounting the * containing request is enough. */ if (blk_do_io_stat(req) && req->part && !(req->rq_flags & RQF_FLUSH_SEQ)) { const int sgrp = op_stat_group(req_op(req)); part_stat_lock(); update_io_ticks(req->part, jiffies, true); part_stat_inc(req->part, ios[sgrp]); part_stat_add(req->part, nsecs[sgrp], now - req->start_time_ns); part_stat_unlock(); } } static inline void blk_account_io_start(struct request *req) { trace_block_io_start(req); if (blk_do_io_stat(req)) { /* * All non-passthrough requests are created from a bio with one * exception: when a flush command that is part of a flush sequence * generated by the state machine in blk-flush.c is cloned onto the * lower device by dm-multipath we can get here without a bio. */ if (req->bio) req->part = req->bio->bi_bdev; else req->part = req->q->disk->part0; part_stat_lock(); update_io_ticks(req->part, jiffies, false); part_stat_unlock(); } } static inline void __blk_mq_end_request_acct(struct request *rq, u64 now) { if (rq->rq_flags & RQF_STATS) blk_stat_add(rq, now); blk_mq_sched_completed_request(rq, now); blk_account_io_done(rq, now); } inline void __blk_mq_end_request(struct request *rq, blk_status_t error) { if (blk_mq_need_time_stamp(rq)) __blk_mq_end_request_acct(rq, ktime_get_ns()); blk_mq_finish_request(rq); if (rq->end_io) { rq_qos_done(rq->q, rq); if (rq->end_io(rq, error) == RQ_END_IO_FREE) blk_mq_free_request(rq); } else { blk_mq_free_request(rq); } } EXPORT_SYMBOL(__blk_mq_end_request); void blk_mq_end_request(struct request *rq, blk_status_t error) { if (blk_update_request(rq, error, blk_rq_bytes(rq))) BUG(); __blk_mq_end_request(rq, error); } EXPORT_SYMBOL(blk_mq_end_request); #define TAG_COMP_BATCH 32 static inline void blk_mq_flush_tag_batch(struct blk_mq_hw_ctx *hctx, int *tag_array, int nr_tags) { struct request_queue *q = hctx->queue; blk_mq_sub_active_requests(hctx, nr_tags); blk_mq_put_tags(hctx->tags, tag_array, nr_tags); percpu_ref_put_many(&q->q_usage_counter, nr_tags); } void blk_mq_end_request_batch(struct io_comp_batch *iob) { int tags[TAG_COMP_BATCH], nr_tags = 0; struct blk_mq_hw_ctx *cur_hctx = NULL; struct request *rq; u64 now = 0; if (iob->need_ts) now = ktime_get_ns(); while ((rq = rq_list_pop(&iob->req_list)) != NULL) { prefetch(rq->bio); prefetch(rq->rq_next); blk_complete_request(rq); if (iob->need_ts) __blk_mq_end_request_acct(rq, now); blk_mq_finish_request(rq); rq_qos_done(rq->q, rq); /* * If end_io handler returns NONE, then it still has * ownership of the request. */ if (rq->end_io && rq->end_io(rq, 0) == RQ_END_IO_NONE) continue; WRITE_ONCE(rq->state, MQ_RQ_IDLE); if (!req_ref_put_and_test(rq)) continue; blk_crypto_free_request(rq); blk_pm_mark_last_busy(rq); if (nr_tags == TAG_COMP_BATCH || cur_hctx != rq->mq_hctx) { if (cur_hctx) blk_mq_flush_tag_batch(cur_hctx, tags, nr_tags); nr_tags = 0; cur_hctx = rq->mq_hctx; } tags[nr_tags++] = rq->tag; } if (nr_tags) blk_mq_flush_tag_batch(cur_hctx, tags, nr_tags); } EXPORT_SYMBOL_GPL(blk_mq_end_request_batch); static void blk_complete_reqs(struct llist_head *list) { struct llist_node *entry = llist_reverse_order(llist_del_all(list)); struct request *rq, *next; llist_for_each_entry_safe(rq, next, entry, ipi_list) rq->q->mq_ops->complete(rq); } static __latent_entropy void blk_done_softirq(struct softirq_action *h) { blk_complete_reqs(this_cpu_ptr(&blk_cpu_done)); } static int blk_softirq_cpu_dead(unsigned int cpu) { blk_complete_reqs(&per_cpu(blk_cpu_done, cpu)); return 0; } static void __blk_mq_complete_request_remote(void *data) { __raise_softirq_irqoff(BLOCK_SOFTIRQ); } static inline bool blk_mq_complete_need_ipi(struct request *rq) { int cpu = raw_smp_processor_id(); if (!IS_ENABLED(CONFIG_SMP) || !test_bit(QUEUE_FLAG_SAME_COMP, &rq->q->queue_flags)) return false; /* * With force threaded interrupts enabled, raising softirq from an SMP * function call will always result in waking the ksoftirqd thread. * This is probably worse than completing the request on a different * cache domain. */ if (force_irqthreads()) return false; /* same CPU or cache domain? Complete locally */ if (cpu == rq->mq_ctx->cpu || (!test_bit(QUEUE_FLAG_SAME_FORCE, &rq->q->queue_flags) && cpus_share_cache(cpu, rq->mq_ctx->cpu))) return false; /* don't try to IPI to an offline CPU */ return cpu_online(rq->mq_ctx->cpu); } static void blk_mq_complete_send_ipi(struct request *rq) { unsigned int cpu; cpu = rq->mq_ctx->cpu; if (llist_add(&rq->ipi_list, &per_cpu(blk_cpu_done, cpu))) smp_call_function_single_async(cpu, &per_cpu(blk_cpu_csd, cpu)); } static void blk_mq_raise_softirq(struct request *rq) { struct llist_head *list; preempt_disable(); list = this_cpu_ptr(&blk_cpu_done); if (llist_add(&rq->ipi_list, list)) raise_softirq(BLOCK_SOFTIRQ); preempt_enable(); } bool blk_mq_complete_request_remote(struct request *rq) { WRITE_ONCE(rq->state, MQ_RQ_COMPLETE); /* * For request which hctx has only one ctx mapping, * or a polled request, always complete locally, * it's pointless to redirect the completion. */ if ((rq->mq_hctx->nr_ctx == 1 && rq->mq_ctx->cpu == raw_smp_processor_id()) || rq->cmd_flags & REQ_POLLED) return false; if (blk_mq_complete_need_ipi(rq)) { blk_mq_complete_send_ipi(rq); return true; } if (rq->q->nr_hw_queues == 1) { blk_mq_raise_softirq(rq); return true; } return false; } EXPORT_SYMBOL_GPL(blk_mq_complete_request_remote); /** * blk_mq_complete_request - end I/O on a request * @rq: the request being processed * * Description: * Complete a request by scheduling the ->complete_rq operation. **/ void blk_mq_complete_request(struct request *rq) { if (!blk_mq_complete_request_remote(rq)) rq->q->mq_ops->complete(rq); } EXPORT_SYMBOL(blk_mq_complete_request); /** * blk_mq_start_request - Start processing a request * @rq: Pointer to request to be started * * Function used by device drivers to notify the block layer that a request * is going to be processed now, so blk layer can do proper initializations * such as starting the timeout timer. */ void blk_mq_start_request(struct request *rq) { struct request_queue *q = rq->q; trace_block_rq_issue(rq); if (test_bit(QUEUE_FLAG_STATS, &q->queue_flags) && !blk_rq_is_passthrough(rq)) { rq->io_start_time_ns = ktime_get_ns(); rq->stats_sectors = blk_rq_sectors(rq); rq->rq_flags |= RQF_STATS; rq_qos_issue(q, rq); } WARN_ON_ONCE(blk_mq_rq_state(rq) != MQ_RQ_IDLE); blk_add_timer(rq); WRITE_ONCE(rq->state, MQ_RQ_IN_FLIGHT); rq->mq_hctx->tags->rqs[rq->tag] = rq; #ifdef CONFIG_BLK_DEV_INTEGRITY if (blk_integrity_rq(rq) && req_op(rq) == REQ_OP_WRITE) q->integrity.profile->prepare_fn(rq); #endif if (rq->bio && rq->bio->bi_opf & REQ_POLLED) WRITE_ONCE(rq->bio->bi_cookie, rq->mq_hctx->queue_num); } EXPORT_SYMBOL(blk_mq_start_request); /* * Allow 2x BLK_MAX_REQUEST_COUNT requests on plug queue for multiple * queues. This is important for md arrays to benefit from merging * requests. */ static inline unsigned short blk_plug_max_rq_count(struct blk_plug *plug) { if (plug->multiple_queues) return BLK_MAX_REQUEST_COUNT * 2; return BLK_MAX_REQUEST_COUNT; } static void blk_add_rq_to_plug(struct blk_plug *plug, struct request *rq) { struct request *last = rq_list_peek(&plug->mq_list); if (!plug->rq_count) { trace_block_plug(rq->q); } else if (plug->rq_count >= blk_plug_max_rq_count(plug) || (!blk_queue_nomerges(rq->q) && blk_rq_bytes(last) >= BLK_PLUG_FLUSH_SIZE)) { blk_mq_flush_plug_list(plug, false); last = NULL; trace_block_plug(rq->q); } if (!plug->multiple_queues && last && last->q != rq->q) plug->multiple_queues = true; /* * Any request allocated from sched tags can't be issued to * ->queue_rqs() directly */ if (!plug->has_elevator && (rq->rq_flags & RQF_SCHED_TAGS)) plug->has_elevator = true; rq->rq_next = NULL; rq_list_add(&plug->mq_list, rq); plug->rq_count++; } /** * blk_execute_rq_nowait - insert a request to I/O scheduler for execution * @rq: request to insert * @at_head: insert request at head or tail of queue * * Description: * Insert a fully prepared request at the back of the I/O scheduler queue * for execution. Don't wait for completion. * * Note: * This function will invoke @done directly if the queue is dead. */ void blk_execute_rq_nowait(struct request *rq, bool at_head) { struct blk_mq_hw_ctx *hctx = rq->mq_hctx; WARN_ON(irqs_disabled()); WARN_ON(!blk_rq_is_passthrough(rq)); blk_account_io_start(rq); /* * As plugging can be enabled for passthrough requests on a zoned * device, directly accessing the plug instead of using blk_mq_plug() * should not have any consequences. */ if (current->plug && !at_head) { blk_add_rq_to_plug(current->plug, rq); return; } blk_mq_insert_request(rq, at_head ? BLK_MQ_INSERT_AT_HEAD : 0); blk_mq_run_hw_queue(hctx, hctx->flags & BLK_MQ_F_BLOCKING); } EXPORT_SYMBOL_GPL(blk_execute_rq_nowait); struct blk_rq_wait { struct completion done; blk_status_t ret; }; static enum rq_end_io_ret blk_end_sync_rq(struct request *rq, blk_status_t ret) { struct blk_rq_wait *wait = rq->end_io_data; wait->ret = ret; complete(&wait->done); return RQ_END_IO_NONE; } bool blk_rq_is_poll(struct request *rq) { if (!rq->mq_hctx) return false; if (rq->mq_hctx->type != HCTX_TYPE_POLL) return false; return true; } EXPORT_SYMBOL_GPL(blk_rq_is_poll); static void blk_rq_poll_completion(struct request *rq, struct completion *wait) { do { blk_hctx_poll(rq->q, rq->mq_hctx, NULL, 0); cond_resched(); } while (!completion_done(wait)); } /** * blk_execute_rq - insert a request into queue for execution * @rq: request to insert * @at_head: insert request at head or tail of queue * * Description: * Insert a fully prepared request at the back of the I/O scheduler queue * for execution and wait for completion. * Return: The blk_status_t result provided to blk_mq_end_request(). */ blk_status_t blk_execute_rq(struct request *rq, bool at_head) { struct blk_mq_hw_ctx *hctx = rq->mq_hctx; struct blk_rq_wait wait = { .done = COMPLETION_INITIALIZER_ONSTACK(wait.done), }; WARN_ON(irqs_disabled()); WARN_ON(!blk_rq_is_passthrough(rq)); rq->end_io_data = &wait; rq->end_io = blk_end_sync_rq; blk_account_io_start(rq); blk_mq_insert_request(rq, at_head ? BLK_MQ_INSERT_AT_HEAD : 0); blk_mq_run_hw_queue(hctx, false); if (blk_rq_is_poll(rq)) { blk_rq_poll_completion(rq, &wait.done); } else { /* * Prevent hang_check timer from firing at us during very long * I/O */ unsigned long hang_check = sysctl_hung_task_timeout_secs; if (hang_check) while (!wait_for_completion_io_timeout(&wait.done, hang_check * (HZ/2))) ; else wait_for_completion_io(&wait.done); } return wait.ret; } EXPORT_SYMBOL(blk_execute_rq); static void __blk_mq_requeue_request(struct request *rq) { struct request_queue *q = rq->q; blk_mq_put_driver_tag(rq); trace_block_rq_requeue(rq); rq_qos_requeue(q, rq); if (blk_mq_request_started(rq)) { WRITE_ONCE(rq->state, MQ_RQ_IDLE); rq->rq_flags &= ~RQF_TIMED_OUT; } } void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list) { struct request_queue *q = rq->q; unsigned long flags; __blk_mq_requeue_request(rq); /* this request will be re-inserted to io scheduler queue */ blk_mq_sched_requeue_request(rq); spin_lock_irqsave(&q->requeue_lock, flags); list_add_tail(&rq->queuelist, &q->requeue_list); spin_unlock_irqrestore(&q->requeue_lock, flags); if (kick_requeue_list) blk_mq_kick_requeue_list(q); } EXPORT_SYMBOL(blk_mq_requeue_request); static void blk_mq_requeue_work(struct work_struct *work) { struct request_queue *q = container_of(work, struct request_queue, requeue_work.work); LIST_HEAD(rq_list); LIST_HEAD(flush_list); struct request *rq; spin_lock_irq(&q->requeue_lock); list_splice_init(&q->requeue_list, &rq_list); list_splice_init(&q->flush_list, &flush_list); spin_unlock_irq(&q->requeue_lock); while (!list_empty(&rq_list)) { rq = list_entry(rq_list.next, struct request, queuelist); /* * If RQF_DONTPREP ist set, the request has been started by the * driver already and might have driver-specific data allocated * already. Insert it into the hctx dispatch list to avoid * block layer merges for the request. */ if (rq->rq_flags & RQF_DONTPREP) { list_del_init(&rq->queuelist); blk_mq_request_bypass_insert(rq, 0); } else { list_del_init(&rq->queuelist); blk_mq_insert_request(rq, BLK_MQ_INSERT_AT_HEAD); } } while (!list_empty(&flush_list)) { rq = list_entry(flush_list.next, struct request, queuelist); list_del_init(&rq->queuelist); blk_mq_insert_request(rq, 0); } blk_mq_run_hw_queues(q, false); } void blk_mq_kick_requeue_list(struct request_queue *q) { kblockd_mod_delayed_work_on(WORK_CPU_UNBOUND, &q->requeue_work, 0); } EXPORT_SYMBOL(blk_mq_kick_requeue_list); void blk_mq_delay_kick_requeue_list(struct request_queue *q, unsigned long msecs) { kblockd_mod_delayed_work_on(WORK_CPU_UNBOUND, &q->requeue_work, msecs_to_jiffies(msecs)); } EXPORT_SYMBOL(blk_mq_delay_kick_requeue_list); static bool blk_is_flush_data_rq(struct request *rq) { return (rq->rq_flags & RQF_FLUSH_SEQ) && !is_flush_rq(rq); } static bool blk_mq_rq_inflight(struct request *rq, void *priv) { /* * If we find a request that isn't idle we know the queue is busy * as it's checked in the iter. * Return false to stop the iteration. * * In case of queue quiesce, if one flush data request is completed, * don't count it as inflight given the flush sequence is suspended, * and the original flush data request is invisible to driver, just * like other pending requests because of quiesce */ if (blk_mq_request_started(rq) && !(blk_queue_quiesced(rq->q) && blk_is_flush_data_rq(rq) && blk_mq_request_completed(rq))) { bool *busy = priv; *busy = true; return false; } return true; } bool blk_mq_queue_inflight(struct request_queue *q) { bool busy = false; blk_mq_queue_tag_busy_iter(q, blk_mq_rq_inflight, &busy); return busy; } EXPORT_SYMBOL_GPL(blk_mq_queue_inflight); static void blk_mq_rq_timed_out(struct request *req) { req->rq_flags |= RQF_TIMED_OUT; if (req->q->mq_ops->timeout) { enum blk_eh_timer_return ret; ret = req->q->mq_ops->timeout(req); if (ret == BLK_EH_DONE) return; WARN_ON_ONCE(ret != BLK_EH_RESET_TIMER); } blk_add_timer(req); } struct blk_expired_data { bool has_timedout_rq; unsigned long next; unsigned long timeout_start; }; static bool blk_mq_req_expired(struct request *rq, struct blk_expired_data *expired) { unsigned long deadline; if (blk_mq_rq_state(rq) != MQ_RQ_IN_FLIGHT) return false; if (rq->rq_flags & RQF_TIMED_OUT) return false; deadline = READ_ONCE(rq->deadline); if (time_after_eq(expired->timeout_start, deadline)) return true; if (expired->next == 0) expired->next = deadline; else if (time_after(expired->next, deadline)) expired->next = deadline; return false; } void blk_mq_put_rq_ref(struct request *rq) { if (is_flush_rq(rq)) { if (rq->end_io(rq, 0) == RQ_END_IO_FREE) blk_mq_free_request(rq); } else if (req_ref_put_and_test(rq)) { __blk_mq_free_request(rq); } } static bool blk_mq_check_expired(struct request *rq, void *priv) { struct blk_expired_data *expired = priv; /* * blk_mq_queue_tag_busy_iter() has locked the request, so it cannot * be reallocated underneath the timeout handler's processing, then * the expire check is reliable. If the request is not expired, then * it was completed and reallocated as a new request after returning * from blk_mq_check_expired(). */ if (blk_mq_req_expired(rq, expired)) { expired->has_timedout_rq = true; return false; } return true; } static bool blk_mq_handle_expired(struct request *rq, void *priv) { struct blk_expired_data *expired = priv; if (blk_mq_req_expired(rq, expired)) blk_mq_rq_timed_out(rq); return true; } static void blk_mq_timeout_work(struct work_struct *work) { struct request_queue *q = container_of(work, struct request_queue, timeout_work); struct blk_expired_data expired = { .timeout_start = jiffies, }; struct blk_mq_hw_ctx *hctx; unsigned long i; /* A deadlock might occur if a request is stuck requiring a * timeout at the same time a queue freeze is waiting * completion, since the timeout code would not be able to * acquire the queue reference here. * * That's why we don't use blk_queue_enter here; instead, we use * percpu_ref_tryget directly, because we need to be able to * obtain a reference even in the short window between the queue * starting to freeze, by dropping the first reference in * blk_freeze_queue_start, and the moment the last request is * consumed, marked by the instant q_usage_counter reaches * zero. */ if (!percpu_ref_tryget(&q->q_usage_counter)) return; /* check if there is any timed-out request */ blk_mq_queue_tag_busy_iter(q, blk_mq_check_expired, &expired); if (expired.has_timedout_rq) { /* * Before walking tags, we must ensure any submit started * before the current time has finished. Since the submit * uses srcu or rcu, wait for a synchronization point to * ensure all running submits have finished */ blk_mq_wait_quiesce_done(q->tag_set); expired.next = 0; blk_mq_queue_tag_busy_iter(q, blk_mq_handle_expired, &expired); } if (expired.next != 0) { mod_timer(&q->timeout, expired.next); } else { /* * Request timeouts are handled as a forward rolling timer. If * we end up here it means that no requests are pending and * also that no request has been pending for a while. Mark * each hctx as idle. */ queue_for_each_hw_ctx(q, hctx, i) { /* the hctx may be unmapped, so check it here */ if (blk_mq_hw_queue_mapped(hctx)) blk_mq_tag_idle(hctx); } } blk_queue_exit(q); } struct flush_busy_ctx_data { struct blk_mq_hw_ctx *hctx; struct list_head *list; }; static bool flush_busy_ctx(struct sbitmap *sb, unsigned int bitnr, void *data) { struct flush_busy_ctx_data *flush_data = data; struct blk_mq_hw_ctx *hctx = flush_data->hctx; struct blk_mq_ctx *ctx = hctx->ctxs[bitnr]; enum hctx_type type = hctx->type; spin_lock(&ctx->lock); list_splice_tail_init(&ctx->rq_lists[type], flush_data->list); sbitmap_clear_bit(sb, bitnr); spin_unlock(&ctx->lock); return true; } /* * Process software queues that have been marked busy, splicing them * to the for-dispatch */ void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list) { struct flush_busy_ctx_data data = { .hctx = hctx, .list = list, }; sbitmap_for_each_set(&hctx->ctx_map, flush_busy_ctx, &data); } EXPORT_SYMBOL_GPL(blk_mq_flush_busy_ctxs); struct dispatch_rq_data { struct blk_mq_hw_ctx *hctx; struct request *rq; }; static bool dispatch_rq_from_ctx(struct sbitmap *sb, unsigned int bitnr, void *data) { struct dispatch_rq_data *dispatch_data = data; struct blk_mq_hw_ctx *hctx = dispatch_data->hctx; struct blk_mq_ctx *ctx = hctx->ctxs[bitnr]; enum hctx_type type = hctx->type; spin_lock(&ctx->lock); if (!list_empty(&ctx->rq_lists[type])) { dispatch_data->rq = list_entry_rq(ctx->rq_lists[type].next); list_del_init(&dispatch_data->rq->queuelist); if (list_empty(&ctx->rq_lists[type])) sbitmap_clear_bit(sb, bitnr); } spin_unlock(&ctx->lock); return !dispatch_data->rq; } struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *start) { unsigned off = start ? start->index_hw[hctx->type] : 0; struct dispatch_rq_data data = { .hctx = hctx, .rq = NULL, }; __sbitmap_for_each_set(&hctx->ctx_map, off, dispatch_rq_from_ctx, &data); return data.rq; } bool __blk_mq_alloc_driver_tag(struct request *rq) { struct sbitmap_queue *bt = &rq->mq_hctx->tags->bitmap_tags; unsigned int tag_offset = rq->mq_hctx->tags->nr_reserved_tags; int tag; blk_mq_tag_busy(rq->mq_hctx); if (blk_mq_tag_is_reserved(rq->mq_hctx->sched_tags, rq->internal_tag)) { bt = &rq->mq_hctx->tags->breserved_tags; tag_offset = 0; } else { if (!hctx_may_queue(rq->mq_hctx, bt)) return false; } tag = __sbitmap_queue_get(bt); if (tag == BLK_MQ_NO_TAG) return false; rq->tag = tag + tag_offset; blk_mq_inc_active_requests(rq->mq_hctx); return true; } static int blk_mq_dispatch_wake(wait_queue_entry_t *wait, unsigned mode, int flags, void *key) { struct blk_mq_hw_ctx *hctx; hctx = container_of(wait, struct blk_mq_hw_ctx, dispatch_wait); spin_lock(&hctx->dispatch_wait_lock); if (!list_empty(&wait->entry)) { struct sbitmap_queue *sbq; list_del_init(&wait->entry); sbq = &hctx->tags->bitmap_tags; atomic_dec(&sbq->ws_active); } spin_unlock(&hctx->dispatch_wait_lock); blk_mq_run_hw_queue(hctx, true); return 1; } /* * Mark us waiting for a tag. For shared tags, this involves hooking us into * the tag wakeups. For non-shared tags, we can simply mark us needing a * restart. For both cases, take care to check the condition again after * marking us as waiting. */ static bool blk_mq_mark_tag_wait(struct blk_mq_hw_ctx *hctx, struct request *rq) { struct sbitmap_queue *sbq; struct wait_queue_head *wq; wait_queue_entry_t *wait; bool ret; if (!(hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED) && !(blk_mq_is_shared_tags(hctx->flags))) { blk_mq_sched_mark_restart_hctx(hctx); /* * It's possible that a tag was freed in the window between the * allocation failure and adding the hardware queue to the wait * queue. * * Don't clear RESTART here, someone else could have set it. * At most this will cost an extra queue run. */ return blk_mq_get_driver_tag(rq); } wait = &hctx->dispatch_wait; if (!list_empty_careful(&wait->entry)) return false; if (blk_mq_tag_is_reserved(rq->mq_hctx->sched_tags, rq->internal_tag)) sbq = &hctx->tags->breserved_tags; else sbq = &hctx->tags->bitmap_tags; wq = &bt_wait_ptr(sbq, hctx)->wait; spin_lock_irq(&wq->lock); spin_lock(&hctx->dispatch_wait_lock); if (!list_empty(&wait->entry)) { spin_unlock(&hctx->dispatch_wait_lock); spin_unlock_irq(&wq->lock); return false; } atomic_inc(&sbq->ws_active); wait->flags &= ~WQ_FLAG_EXCLUSIVE; __add_wait_queue(wq, wait); /* * Add one explicit barrier since blk_mq_get_driver_tag() may * not imply barrier in case of failure. * * Order adding us to wait queue and allocating driver tag. * * The pair is the one implied in sbitmap_queue_wake_up() which * orders clearing sbitmap tag bits and waitqueue_active() in * __sbitmap_queue_wake_up(), since waitqueue_active() is lockless * * Otherwise, re-order of adding wait queue and getting driver tag * may cause __sbitmap_queue_wake_up() to wake up nothing because * the waitqueue_active() may not observe us in wait queue. */ smp_mb(); /* * It's possible that a tag was freed in the window between the * allocation failure and adding the hardware queue to the wait * queue. */ ret = blk_mq_get_driver_tag(rq); if (!ret) { spin_unlock(&hctx->dispatch_wait_lock); spin_unlock_irq(&wq->lock); return false; } /* * We got a tag, remove ourselves from the wait queue to ensure * someone else gets the wakeup. */ list_del_init(&wait->entry); atomic_dec(&sbq->ws_active); spin_unlock(&hctx->dispatch_wait_lock); spin_unlock_irq(&wq->lock); return true; } #define BLK_MQ_DISPATCH_BUSY_EWMA_WEIGHT 8 #define BLK_MQ_DISPATCH_BUSY_EWMA_FACTOR 4 /* * Update dispatch busy with the Exponential Weighted Moving Average(EWMA): * - EWMA is one simple way to compute running average value * - weight(7/8 and 1/8) is applied so that it can decrease exponentially * - take 4 as factor for avoiding to get too small(0) result, and this * factor doesn't matter because EWMA decreases exponentially */ static void blk_mq_update_dispatch_busy(struct blk_mq_hw_ctx *hctx, bool busy) { unsigned int ewma; ewma = hctx->dispatch_busy; if (!ewma && !busy) return; ewma *= BLK_MQ_DISPATCH_BUSY_EWMA_WEIGHT - 1; if (busy) ewma += 1 << BLK_MQ_DISPATCH_BUSY_EWMA_FACTOR; ewma /= BLK_MQ_DISPATCH_BUSY_EWMA_WEIGHT; hctx->dispatch_busy = ewma; } #define BLK_MQ_RESOURCE_DELAY 3 /* ms units */ static void blk_mq_handle_dev_resource(struct request *rq, struct list_head *list) { list_add(&rq->queuelist, list); __blk_mq_requeue_request(rq); } static void blk_mq_handle_zone_resource(struct request *rq, struct list_head *zone_list) { /* * If we end up here it is because we cannot dispatch a request to a * specific zone due to LLD level zone-write locking or other zone * related resource not being available. In this case, set the request * aside in zone_list for retrying it later. */ list_add(&rq->queuelist, zone_list); __blk_mq_requeue_request(rq); } enum prep_dispatch { PREP_DISPATCH_OK, PREP_DISPATCH_NO_TAG, PREP_DISPATCH_NO_BUDGET, }; static enum prep_dispatch blk_mq_prep_dispatch_rq(struct request *rq, bool need_budget) { struct blk_mq_hw_ctx *hctx = rq->mq_hctx; int budget_token = -1; if (need_budget) { budget_token = blk_mq_get_dispatch_budget(rq->q); if (budget_token < 0) { blk_mq_put_driver_tag(rq); return PREP_DISPATCH_NO_BUDGET; } blk_mq_set_rq_budget_token(rq, budget_token); } if (!blk_mq_get_driver_tag(rq)) { /* * The initial allocation attempt failed, so we need to * rerun the hardware queue when a tag is freed. The * waitqueue takes care of that. If the queue is run * before we add this entry back on the dispatch list, * we'll re-run it below. */ if (!blk_mq_mark_tag_wait(hctx, rq)) { /* * All budgets not got from this function will be put * together during handling partial dispatch */ if (need_budget) blk_mq_put_dispatch_budget(rq->q, budget_token); return PREP_DISPATCH_NO_TAG; } } return PREP_DISPATCH_OK; } /* release all allocated budgets before calling to blk_mq_dispatch_rq_list */ static void blk_mq_release_budgets(struct request_queue *q, struct list_head *list) { struct request *rq; list_for_each_entry(rq, list, queuelist) { int budget_token = blk_mq_get_rq_budget_token(rq); if (budget_token >= 0) blk_mq_put_dispatch_budget(q, budget_token); } } /* * blk_mq_commit_rqs will notify driver using bd->last that there is no * more requests. (See comment in struct blk_mq_ops for commit_rqs for * details) * Attention, we should explicitly call this in unusual cases: * 1) did not queue everything initially scheduled to queue * 2) the last attempt to queue a request failed */ static void blk_mq_commit_rqs(struct blk_mq_hw_ctx *hctx, int queued, bool from_schedule) { if (hctx->queue->mq_ops->commit_rqs && queued) { trace_block_unplug(hctx->queue, queued, !from_schedule); hctx->queue->mq_ops->commit_rqs(hctx); } } /* * Returns true if we did some work AND can potentially do more. */ bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list, unsigned int nr_budgets) { enum prep_dispatch prep; struct request_queue *q = hctx->queue; struct request *rq; int queued; blk_status_t ret = BLK_STS_OK; LIST_HEAD(zone_list); bool needs_resource = false; if (list_empty(list)) return false; /* * Now process all the entries, sending them to the driver. */ queued = 0; do { struct blk_mq_queue_data bd; rq = list_first_entry(list, struct request, queuelist); WARN_ON_ONCE(hctx != rq->mq_hctx); prep = blk_mq_prep_dispatch_rq(rq, !nr_budgets); if (prep != PREP_DISPATCH_OK) break; list_del_init(&rq->queuelist); bd.rq = rq; bd.last = list_empty(list); /* * once the request is queued to lld, no need to cover the * budget any more */ if (nr_budgets) nr_budgets--; ret = q->mq_ops->queue_rq(hctx, &bd); switch (ret) { case BLK_STS_OK: queued++; break; case BLK_STS_RESOURCE: needs_resource = true; fallthrough; case BLK_STS_DEV_RESOURCE: blk_mq_handle_dev_resource(rq, list); goto out; case BLK_STS_ZONE_RESOURCE: /* * Move the request to zone_list and keep going through * the dispatch list to find more requests the drive can * accept. */ blk_mq_handle_zone_resource(rq, &zone_list); needs_resource = true; break; default: blk_mq_end_request(rq, ret); } } while (!list_empty(list)); out: if (!list_empty(&zone_list)) list_splice_tail_init(&zone_list, list); /* If we didn't flush the entire list, we could have told the driver * there was more coming, but that turned out to be a lie. */ if (!list_empty(list) || ret != BLK_STS_OK) blk_mq_commit_rqs(hctx, queued, false); /* * Any items that need requeuing? Stuff them into hctx->dispatch, * that is where we will continue on next queue run. */ if (!list_empty(list)) { bool needs_restart; /* For non-shared tags, the RESTART check will suffice */ bool no_tag = prep == PREP_DISPATCH_NO_TAG && ((hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED) || blk_mq_is_shared_tags(hctx->flags)); if (nr_budgets) blk_mq_release_budgets(q, list); spin_lock(&hctx->lock); list_splice_tail_init(list, &hctx->dispatch); spin_unlock(&hctx->lock); /* * Order adding requests to hctx->dispatch and checking * SCHED_RESTART flag. The pair of this smp_mb() is the one * in blk_mq_sched_restart(). Avoid restart code path to * miss the new added requests to hctx->dispatch, meantime * SCHED_RESTART is observed here. */ smp_mb(); /* * If SCHED_RESTART was set by the caller of this function and * it is no longer set that means that it was cleared by another * thread and hence that a queue rerun is needed. * * If 'no_tag' is set, that means that we failed getting * a driver tag with an I/O scheduler attached. If our dispatch * waitqueue is no longer active, ensure that we run the queue * AFTER adding our entries back to the list. * * If no I/O scheduler has been configured it is possible that * the hardware queue got stopped and restarted before requests * were pushed back onto the dispatch list. Rerun the queue to * avoid starvation. Notes: * - blk_mq_run_hw_queue() checks whether or not a queue has * been stopped before rerunning a queue. * - Some but not all block drivers stop a queue before * returning BLK_STS_RESOURCE. Two exceptions are scsi-mq * and dm-rq. * * If driver returns BLK_STS_RESOURCE and SCHED_RESTART * bit is set, run queue after a delay to avoid IO stalls * that could otherwise occur if the queue is idle. We'll do * similar if we couldn't get budget or couldn't lock a zone * and SCHED_RESTART is set. */ needs_restart = blk_mq_sched_needs_restart(hctx); if (prep == PREP_DISPATCH_NO_BUDGET) needs_resource = true; if (!needs_restart || (no_tag && list_empty_careful(&hctx->dispatch_wait.entry))) blk_mq_run_hw_queue(hctx, true); else if (needs_resource) blk_mq_delay_run_hw_queue(hctx, BLK_MQ_RESOURCE_DELAY); blk_mq_update_dispatch_busy(hctx, true); return false; } blk_mq_update_dispatch_busy(hctx, false); return true; } static inline int blk_mq_first_mapped_cpu(struct blk_mq_hw_ctx *hctx) { int cpu = cpumask_first_and(hctx->cpumask, cpu_online_mask); if (cpu >= nr_cpu_ids) cpu = cpumask_first(hctx->cpumask); return cpu; } /* * It'd be great if the workqueue API had a way to pass * in a mask and had some smarts for more clever placement. * For now we just round-robin here, switching for every * BLK_MQ_CPU_WORK_BATCH queued items. */ static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx) { bool tried = false; int next_cpu = hctx->next_cpu; if (hctx->queue->nr_hw_queues == 1) return WORK_CPU_UNBOUND; if (--hctx->next_cpu_batch <= 0) { select_cpu: next_cpu = cpumask_next_and(next_cpu, hctx->cpumask, cpu_online_mask); if (next_cpu >= nr_cpu_ids) next_cpu = blk_mq_first_mapped_cpu(hctx); hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH; } /* * Do unbound schedule if we can't find a online CPU for this hctx, * and it should only happen in the path of handling CPU DEAD. */ if (!cpu_online(next_cpu)) { if (!tried) { tried = true; goto select_cpu; } /* * Make sure to re-select CPU next time once after CPUs * in hctx->cpumask become online again. */ hctx->next_cpu = next_cpu; hctx->next_cpu_batch = 1; return WORK_CPU_UNBOUND; } hctx->next_cpu = next_cpu; return next_cpu; } /** * blk_mq_delay_run_hw_queue - Run a hardware queue asynchronously. * @hctx: Pointer to the hardware queue to run. * @msecs: Milliseconds of delay to wait before running the queue. * * Run a hardware queue asynchronously with a delay of @msecs. */ void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs) { if (unlikely(blk_mq_hctx_stopped(hctx))) return; kblockd_mod_delayed_work_on(blk_mq_hctx_next_cpu(hctx), &hctx->run_work, msecs_to_jiffies(msecs)); } EXPORT_SYMBOL(blk_mq_delay_run_hw_queue); /** * blk_mq_run_hw_queue - Start to run a hardware queue. * @hctx: Pointer to the hardware queue to run. * @async: If we want to run the queue asynchronously. * * Check if the request queue is not in a quiesced state and if there are * pending requests to be sent. If this is true, run the queue to send requests * to hardware. */ void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async) { bool need_run; /* * We can't run the queue inline with interrupts disabled. */ WARN_ON_ONCE(!async && in_interrupt()); might_sleep_if(!async && hctx->flags & BLK_MQ_F_BLOCKING); /* * When queue is quiesced, we may be switching io scheduler, or * updating nr_hw_queues, or other things, and we can't run queue * any more, even __blk_mq_hctx_has_pending() can't be called safely. * * And queue will be rerun in blk_mq_unquiesce_queue() if it is * quiesced. */ __blk_mq_run_dispatch_ops(hctx->queue, false, need_run = !blk_queue_quiesced(hctx->queue) && blk_mq_hctx_has_pending(hctx)); if (!need_run) return; if (async || !cpumask_test_cpu(raw_smp_processor_id(), hctx->cpumask)) { blk_mq_delay_run_hw_queue(hctx, 0); return; } blk_mq_run_dispatch_ops(hctx->queue, blk_mq_sched_dispatch_requests(hctx)); } EXPORT_SYMBOL(blk_mq_run_hw_queue); /* * Return prefered queue to dispatch from (if any) for non-mq aware IO * scheduler. */ static struct blk_mq_hw_ctx *blk_mq_get_sq_hctx(struct request_queue *q) { struct blk_mq_ctx *ctx = blk_mq_get_ctx(q); /* * If the IO scheduler does not respect hardware queues when * dispatching, we just don't bother with multiple HW queues and * dispatch from hctx for the current CPU since running multiple queues * just causes lock contention inside the scheduler and pointless cache * bouncing. */ struct blk_mq_hw_ctx *hctx = ctx->hctxs[HCTX_TYPE_DEFAULT]; if (!blk_mq_hctx_stopped(hctx)) return hctx; return NULL; } /** * blk_mq_run_hw_queues - Run all hardware queues in a request queue. * @q: Pointer to the request queue to run. * @async: If we want to run the queue asynchronously. */ void blk_mq_run_hw_queues(struct request_queue *q, bool async) { struct blk_mq_hw_ctx *hctx, *sq_hctx; unsigned long i; sq_hctx = NULL; if (blk_queue_sq_sched(q)) sq_hctx = blk_mq_get_sq_hctx(q); queue_for_each_hw_ctx(q, hctx, i) { if (blk_mq_hctx_stopped(hctx)) continue; /* * Dispatch from this hctx either if there's no hctx preferred * by IO scheduler or if it has requests that bypass the * scheduler. */ if (!sq_hctx || sq_hctx == hctx || !list_empty_careful(&hctx->dispatch)) blk_mq_run_hw_queue(hctx, async); } } EXPORT_SYMBOL(blk_mq_run_hw_queues); /** * blk_mq_delay_run_hw_queues - Run all hardware queues asynchronously. * @q: Pointer to the request queue to run. * @msecs: Milliseconds of delay to wait before running the queues. */ void blk_mq_delay_run_hw_queues(struct request_queue *q, unsigned long msecs) { struct blk_mq_hw_ctx *hctx, *sq_hctx; unsigned long i; sq_hctx = NULL; if (blk_queue_sq_sched(q)) sq_hctx = blk_mq_get_sq_hctx(q); queue_for_each_hw_ctx(q, hctx, i) { if (blk_mq_hctx_stopped(hctx)) continue; /* * If there is already a run_work pending, leave the * pending delay untouched. Otherwise, a hctx can stall * if another hctx is re-delaying the other's work * before the work executes. */ if (delayed_work_pending(&hctx->run_work)) continue; /* * Dispatch from this hctx either if there's no hctx preferred * by IO scheduler or if it has requests that bypass the * scheduler. */ if (!sq_hctx || sq_hctx == hctx || !list_empty_careful(&hctx->dispatch)) blk_mq_delay_run_hw_queue(hctx, msecs); } } EXPORT_SYMBOL(blk_mq_delay_run_hw_queues); /* * This function is often used for pausing .queue_rq() by driver when * there isn't enough resource or some conditions aren't satisfied, and * BLK_STS_RESOURCE is usually returned. * * We do not guarantee that dispatch can be drained or blocked * after blk_mq_stop_hw_queue() returns. Please use * blk_mq_quiesce_queue() for that requirement. */ void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx) { cancel_delayed_work(&hctx->run_work); set_bit(BLK_MQ_S_STOPPED, &hctx->state); } EXPORT_SYMBOL(blk_mq_stop_hw_queue); /* * This function is often used for pausing .queue_rq() by driver when * there isn't enough resource or some conditions aren't satisfied, and * BLK_STS_RESOURCE is usually returned. * * We do not guarantee that dispatch can be drained or blocked * after blk_mq_stop_hw_queues() returns. Please use * blk_mq_quiesce_queue() for that requirement. */ void blk_mq_stop_hw_queues(struct request_queue *q) { struct blk_mq_hw_ctx *hctx; unsigned long i; queue_for_each_hw_ctx(q, hctx, i) blk_mq_stop_hw_queue(hctx); } EXPORT_SYMBOL(blk_mq_stop_hw_queues); void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx) { clear_bit(BLK_MQ_S_STOPPED, &hctx->state); blk_mq_run_hw_queue(hctx, hctx->flags & BLK_MQ_F_BLOCKING); } EXPORT_SYMBOL(blk_mq_start_hw_queue); void blk_mq_start_hw_queues(struct request_queue *q) { struct blk_mq_hw_ctx *hctx; unsigned long i; queue_for_each_hw_ctx(q, hctx, i) blk_mq_start_hw_queue(hctx); } EXPORT_SYMBOL(blk_mq_start_hw_queues); void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx *hctx, bool async) { if (!blk_mq_hctx_stopped(hctx)) return; clear_bit(BLK_MQ_S_STOPPED, &hctx->state); blk_mq_run_hw_queue(hctx, async); } EXPORT_SYMBOL_GPL(blk_mq_start_stopped_hw_queue); void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async) { struct blk_mq_hw_ctx *hctx; unsigned long i; queue_for_each_hw_ctx(q, hctx, i) blk_mq_start_stopped_hw_queue(hctx, async || (hctx->flags & BLK_MQ_F_BLOCKING)); } EXPORT_SYMBOL(blk_mq_start_stopped_hw_queues); static void blk_mq_run_work_fn(struct work_struct *work) { struct blk_mq_hw_ctx *hctx = container_of(work, struct blk_mq_hw_ctx, run_work.work); blk_mq_run_dispatch_ops(hctx->queue, blk_mq_sched_dispatch_requests(hctx)); } /** * blk_mq_request_bypass_insert - Insert a request at dispatch list. * @rq: Pointer to request to be inserted. * @flags: BLK_MQ_INSERT_* * * Should only be used carefully, when the caller knows we want to * bypass a potential IO scheduler on the target device. */ static void blk_mq_request_bypass_insert(struct request *rq, blk_insert_t flags) { struct blk_mq_hw_ctx *hctx = rq->mq_hctx; spin_lock(&hctx->lock); if (flags & BLK_MQ_INSERT_AT_HEAD) list_add(&rq->queuelist, &hctx->dispatch); else list_add_tail(&rq->queuelist, &hctx->dispatch); spin_unlock(&hctx->lock); } static void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx, struct list_head *list, bool run_queue_async) { struct request *rq; enum hctx_type type = hctx->type; /* * Try to issue requests directly if the hw queue isn't busy to save an * extra enqueue & dequeue to the sw queue. */ if (!hctx->dispatch_busy && !run_queue_async) { blk_mq_run_dispatch_ops(hctx->queue, blk_mq_try_issue_list_directly(hctx, list)); if (list_empty(list)) goto out; } /* * preemption doesn't flush plug list, so it's possible ctx->cpu is * offline now */ list_for_each_entry(rq, list, queuelist) { BUG_ON(rq->mq_ctx != ctx); trace_block_rq_insert(rq); if (rq->cmd_flags & REQ_NOWAIT) run_queue_async = true; } spin_lock(&ctx->lock); list_splice_tail_init(list, &ctx->rq_lists[type]); blk_mq_hctx_mark_pending(hctx, ctx); spin_unlock(&ctx->lock); out: blk_mq_run_hw_queue(hctx, run_queue_async); } static void blk_mq_insert_request(struct request *rq, blk_insert_t flags) { struct request_queue *q = rq->q; struct blk_mq_ctx *ctx = rq->mq_ctx; struct blk_mq_hw_ctx *hctx = rq->mq_hctx; if (blk_rq_is_passthrough(rq)) { /* * Passthrough request have to be added to hctx->dispatch * directly. The device may be in a situation where it can't * handle FS request, and always returns BLK_STS_RESOURCE for * them, which gets them added to hctx->dispatch. * * If a passthrough request is required to unblock the queues, * and it is added to the scheduler queue, there is no chance to * dispatch it given we prioritize requests in hctx->dispatch. */ blk_mq_request_bypass_insert(rq, flags); } else if (req_op(rq) == REQ_OP_FLUSH) { /* * Firstly normal IO request is inserted to scheduler queue or * sw queue, meantime we add flush request to dispatch queue( * hctx->dispatch) directly and there is at most one in-flight * flush request for each hw queue, so it doesn't matter to add * flush request to tail or front of the dispatch queue. * * Secondly in case of NCQ, flush request belongs to non-NCQ * command, and queueing it will fail when there is any * in-flight normal IO request(NCQ command). When adding flush * rq to the front of hctx->dispatch, it is easier to introduce * extra time to flush rq's latency because of S_SCHED_RESTART * compared with adding to the tail of dispatch queue, then * chance of flush merge is increased, and less flush requests * will be issued to controller. It is observed that ~10% time * is saved in blktests block/004 on disk attached to AHCI/NCQ * drive when adding flush rq to the front of hctx->dispatch. * * Simply queue flush rq to the front of hctx->dispatch so that * intensive flush workloads can benefit in case of NCQ HW. */ blk_mq_request_bypass_insert(rq, BLK_MQ_INSERT_AT_HEAD); } else if (q->elevator) { LIST_HEAD(list); WARN_ON_ONCE(rq->tag != BLK_MQ_NO_TAG); list_add(&rq->queuelist, &list); q->elevator->type->ops.insert_requests(hctx, &list, flags); } else { trace_block_rq_insert(rq); spin_lock(&ctx->lock); if (flags & BLK_MQ_INSERT_AT_HEAD) list_add(&rq->queuelist, &ctx->rq_lists[hctx->type]); else list_add_tail(&rq->queuelist, &ctx->rq_lists[hctx->type]); blk_mq_hctx_mark_pending(hctx, ctx); spin_unlock(&ctx->lock); } } static void blk_mq_bio_to_request(struct request *rq, struct bio *bio, unsigned int nr_segs) { int err; if (bio->bi_opf & REQ_RAHEAD) rq->cmd_flags |= REQ_FAILFAST_MASK; rq->__sector = bio->bi_iter.bi_sector; blk_rq_bio_prep(rq, bio, nr_segs); /* This can't fail, since GFP_NOIO includes __GFP_DIRECT_RECLAIM. */ err = blk_crypto_rq_bio_prep(rq, bio, GFP_NOIO); WARN_ON_ONCE(err); blk_account_io_start(rq); } static blk_status_t __blk_mq_issue_directly(struct blk_mq_hw_ctx *hctx, struct request *rq, bool last) { struct request_queue *q = rq->q; struct blk_mq_queue_data bd = { .rq = rq, .last = last, }; blk_status_t ret; /* * For OK queue, we are done. For error, caller may kill it. * Any other error (busy), just add it to our list as we * previously would have done. */ ret = q->mq_ops->queue_rq(hctx, &bd); switch (ret) { case BLK_STS_OK: blk_mq_update_dispatch_busy(hctx, false); break; case BLK_STS_RESOURCE: case BLK_STS_DEV_RESOURCE: blk_mq_update_dispatch_busy(hctx, true); __blk_mq_requeue_request(rq); break; default: blk_mq_update_dispatch_busy(hctx, false); break; } return ret; } static bool blk_mq_get_budget_and_tag(struct request *rq) { int budget_token; budget_token = blk_mq_get_dispatch_budget(rq->q); if (budget_token < 0) return false; blk_mq_set_rq_budget_token(rq, budget_token); if (!blk_mq_get_driver_tag(rq)) { blk_mq_put_dispatch_budget(rq->q, budget_token); return false; } return true; } /** * blk_mq_try_issue_directly - Try to send a request directly to device driver. * @hctx: Pointer of the associated hardware queue. * @rq: Pointer to request to be sent. * * If the device has enough resources to accept a new request now, send the * request directly to device driver. Else, insert at hctx->dispatch queue, so * we can try send it another time in the future. Requests inserted at this * queue have higher priority. */ static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx, struct request *rq) { blk_status_t ret; if (blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(rq->q)) { blk_mq_insert_request(rq, 0); return; } if ((rq->rq_flags & RQF_USE_SCHED) || !blk_mq_get_budget_and_tag(rq)) { blk_mq_insert_request(rq, 0); blk_mq_run_hw_queue(hctx, rq->cmd_flags & REQ_NOWAIT); return; } ret = __blk_mq_issue_directly(hctx, rq, true); switch (ret) { case BLK_STS_OK: break; case BLK_STS_RESOURCE: case BLK_STS_DEV_RESOURCE: blk_mq_request_bypass_insert(rq, 0); blk_mq_run_hw_queue(hctx, false); break; default: blk_mq_end_request(rq, ret); break; } } static blk_status_t blk_mq_request_issue_directly(struct request *rq, bool last) { struct blk_mq_hw_ctx *hctx = rq->mq_hctx; if (blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(rq->q)) { blk_mq_insert_request(rq, 0); return BLK_STS_OK; } if (!blk_mq_get_budget_and_tag(rq)) return BLK_STS_RESOURCE; return __blk_mq_issue_directly(hctx, rq, last); } static void blk_mq_plug_issue_direct(struct blk_plug *plug) { struct blk_mq_hw_ctx *hctx = NULL; struct request *rq; int queued = 0; blk_status_t ret = BLK_STS_OK; while ((rq = rq_list_pop(&plug->mq_list))) { bool last = rq_list_empty(plug->mq_list); if (hctx != rq->mq_hctx) { if (hctx) { blk_mq_commit_rqs(hctx, queued, false); queued = 0; } hctx = rq->mq_hctx; } ret = blk_mq_request_issue_directly(rq, last); switch (ret) { case BLK_STS_OK: queued++; break; case BLK_STS_RESOURCE: case BLK_STS_DEV_RESOURCE: blk_mq_request_bypass_insert(rq, 0); blk_mq_run_hw_queue(hctx, false); goto out; default: blk_mq_end_request(rq, ret); break; } } out: if (ret != BLK_STS_OK) blk_mq_commit_rqs(hctx, queued, false); } static void __blk_mq_flush_plug_list(struct request_queue *q, struct blk_plug *plug) { if (blk_queue_quiesced(q)) return; q->mq_ops->queue_rqs(&plug->mq_list); } static void blk_mq_dispatch_plug_list(struct blk_plug *plug, bool from_sched) { struct blk_mq_hw_ctx *this_hctx = NULL; struct blk_mq_ctx *this_ctx = NULL; struct request *requeue_list = NULL; struct request **requeue_lastp = &requeue_list; unsigned int depth = 0; bool is_passthrough = false; LIST_HEAD(list); do { struct request *rq = rq_list_pop(&plug->mq_list); if (!this_hctx) { this_hctx = rq->mq_hctx; this_ctx = rq->mq_ctx; is_passthrough = blk_rq_is_passthrough(rq); } else if (this_hctx != rq->mq_hctx || this_ctx != rq->mq_ctx || is_passthrough != blk_rq_is_passthrough(rq)) { rq_list_add_tail(&requeue_lastp, rq); continue; } list_add(&rq->queuelist, &list); depth++; } while (!rq_list_empty(plug->mq_list)); plug->mq_list = requeue_list; trace_block_unplug(this_hctx->queue, depth, !from_sched); percpu_ref_get(&this_hctx->queue->q_usage_counter); /* passthrough requests should never be issued to the I/O scheduler */ if (is_passthrough) { spin_lock(&this_hctx->lock); list_splice_tail_init(&list, &this_hctx->dispatch); spin_unlock(&this_hctx->lock); blk_mq_run_hw_queue(this_hctx, from_sched); } else if (this_hctx->queue->elevator) { this_hctx->queue->elevator->type->ops.insert_requests(this_hctx, &list, 0); blk_mq_run_hw_queue(this_hctx, from_sched); } else { blk_mq_insert_requests(this_hctx, this_ctx, &list, from_sched); } percpu_ref_put(&this_hctx->queue->q_usage_counter); } void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule) { struct request *rq; /* * We may have been called recursively midway through handling * plug->mq_list via a schedule() in the driver's queue_rq() callback. * To avoid mq_list changing under our feet, clear rq_count early and * bail out specifically if rq_count is 0 rather than checking * whether the mq_list is empty. */ if (plug->rq_count == 0) return; plug->rq_count = 0; if (!plug->multiple_queues && !plug->has_elevator && !from_schedule) { struct request_queue *q; rq = rq_list_peek(&plug->mq_list); q = rq->q; /* * Peek first request and see if we have a ->queue_rqs() hook. * If we do, we can dispatch the whole plug list in one go. We * already know at this point that all requests belong to the * same queue, caller must ensure that's the case. */ if (q->mq_ops->queue_rqs) { blk_mq_run_dispatch_ops(q, __blk_mq_flush_plug_list(q, plug)); if (rq_list_empty(plug->mq_list)) return; } blk_mq_run_dispatch_ops(q, blk_mq_plug_issue_direct(plug)); if (rq_list_empty(plug->mq_list)) return; } do { blk_mq_dispatch_plug_list(plug, from_schedule); } while (!rq_list_empty(plug->mq_list)); } static void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx, struct list_head *list) { int queued = 0; blk_status_t ret = BLK_STS_OK; while (!list_empty(list)) { struct request *rq = list_first_entry(list, struct request, queuelist); list_del_init(&rq->queuelist); ret = blk_mq_request_issue_directly(rq, list_empty(list)); switch (ret) { case BLK_STS_OK: queued++; break; case BLK_STS_RESOURCE: case BLK_STS_DEV_RESOURCE: blk_mq_request_bypass_insert(rq, 0); if (list_empty(list)) blk_mq_run_hw_queue(hctx, false); goto out; default: blk_mq_end_request(rq, ret); break; } } out: if (ret != BLK_STS_OK) blk_mq_commit_rqs(hctx, queued, false); } static bool blk_mq_attempt_bio_merge(struct request_queue *q, struct bio *bio, unsigned int nr_segs) { if (!blk_queue_nomerges(q) && bio_mergeable(bio)) { if (blk_attempt_plug_merge(q, bio, nr_segs)) return true; if (blk_mq_sched_bio_merge(q, bio, nr_segs)) return true; } return false; } static struct request *blk_mq_get_new_requests(struct request_queue *q, struct blk_plug *plug, struct bio *bio, unsigned int nsegs) { struct blk_mq_alloc_data data = { .q = q, .nr_tags = 1, .cmd_flags = bio->bi_opf, }; struct request *rq; if (blk_mq_attempt_bio_merge(q, bio, nsegs)) return NULL; rq_qos_throttle(q, bio); if (plug) { data.nr_tags = plug->nr_ios; plug->nr_ios = 1; data.cached_rq = &plug->cached_rq; } rq = __blk_mq_alloc_requests(&data); if (rq) return rq; rq_qos_cleanup(q, bio); if (bio->bi_opf & REQ_NOWAIT) bio_wouldblock_error(bio); return NULL; } /* * Check if we can use the passed on request for submitting the passed in bio, * and remove it from the request list if it can be used. */ static bool blk_mq_use_cached_rq(struct request *rq, struct blk_plug *plug, struct bio *bio) { enum hctx_type type = blk_mq_get_hctx_type(bio->bi_opf); enum hctx_type hctx_type = rq->mq_hctx->type; WARN_ON_ONCE(rq_list_peek(&plug->cached_rq) != rq); if (type != hctx_type && !(type == HCTX_TYPE_READ && hctx_type == HCTX_TYPE_DEFAULT)) return false; if (op_is_flush(rq->cmd_flags) != op_is_flush(bio->bi_opf)) return false; /* * If any qos ->throttle() end up blocking, we will have flushed the * plug and hence killed the cached_rq list as well. Pop this entry * before we throttle. */ plug->cached_rq = rq_list_next(rq); rq_qos_throttle(rq->q, bio); blk_mq_rq_time_init(rq, 0); rq->cmd_flags = bio->bi_opf; INIT_LIST_HEAD(&rq->queuelist); return true; } /** * blk_mq_submit_bio - Create and send a request to block device. * @bio: Bio pointer. * * Builds up a request structure from @q and @bio and send to the device. The * request may not be queued directly to hardware if: * * This request can be merged with another one * * We want to place request at plug queue for possible future merging * * There is an IO scheduler active at this queue * * It will not queue the request if there is an error with the bio, or at the * request creation. */ void blk_mq_submit_bio(struct bio *bio) { struct request_queue *q = bdev_get_queue(bio->bi_bdev); struct blk_plug *plug = blk_mq_plug(bio); const int is_sync = op_is_sync(bio->bi_opf); struct blk_mq_hw_ctx *hctx; struct request *rq = NULL; unsigned int nr_segs = 1; blk_status_t ret; bio = blk_queue_bounce(bio, q); if (plug) { rq = rq_list_peek(&plug->cached_rq); if (rq && rq->q != q) rq = NULL; } if (rq) { if (unlikely(bio_may_exceed_limits(bio, &q->limits))) { bio = __bio_split_to_limits(bio, &q->limits, &nr_segs); if (!bio) return; } if (!bio_integrity_prep(bio)) return; if (blk_mq_attempt_bio_merge(q, bio, nr_segs)) return; if (blk_mq_use_cached_rq(rq, plug, bio)) goto done; percpu_ref_get(&q->q_usage_counter); } else { if (unlikely(bio_queue_enter(bio))) return; if (unlikely(bio_may_exceed_limits(bio, &q->limits))) { bio = __bio_split_to_limits(bio, &q->limits, &nr_segs); if (!bio) goto fail; } if (!bio_integrity_prep(bio)) goto fail; } rq = blk_mq_get_new_requests(q, plug, bio, nr_segs); if (unlikely(!rq)) { fail: blk_queue_exit(q); return; } done: trace_block_getrq(bio); rq_qos_track(q, rq, bio); blk_mq_bio_to_request(rq, bio, nr_segs); ret = blk_crypto_rq_get_keyslot(rq); if (ret != BLK_STS_OK) { bio->bi_status = ret; bio_endio(bio); blk_mq_free_request(rq); return; } if (op_is_flush(bio->bi_opf) && blk_insert_flush(rq)) return; if (plug) { blk_add_rq_to_plug(plug, rq); return; } hctx = rq->mq_hctx; if ((rq->rq_flags & RQF_USE_SCHED) || (hctx->dispatch_busy && (q->nr_hw_queues == 1 || !is_sync))) { blk_mq_insert_request(rq, 0); blk_mq_run_hw_queue(hctx, true); } else { blk_mq_run_dispatch_ops(q, blk_mq_try_issue_directly(hctx, rq)); } } #ifdef CONFIG_BLK_MQ_STACKING /** * blk_insert_cloned_request - Helper for stacking drivers to submit a request * @rq: the request being queued */ blk_status_t blk_insert_cloned_request(struct request *rq) { struct request_queue *q = rq->q; unsigned int max_sectors = blk_queue_get_max_sectors(q, req_op(rq)); unsigned int max_segments = blk_rq_get_max_segments(rq); blk_status_t ret; if (blk_rq_sectors(rq) > max_sectors) { /* * SCSI device does not have a good way to return if * Write Same/Zero is actually supported. If a device rejects * a non-read/write command (discard, write same,etc.) the * low-level device driver will set the relevant queue limit to * 0 to prevent blk-lib from issuing more of the offending * operations. Commands queued prior to the queue limit being * reset need to be completed with BLK_STS_NOTSUPP to avoid I/O * errors being propagated to upper layers. */ if (max_sectors == 0) return BLK_STS_NOTSUPP; printk(KERN_ERR "%s: over max size limit. (%u > %u)\n", __func__, blk_rq_sectors(rq), max_sectors); return BLK_STS_IOERR; } /* * The queue settings related to segment counting may differ from the * original queue. */ rq->nr_phys_segments = blk_recalc_rq_segments(rq); if (rq->nr_phys_segments > max_segments) { printk(KERN_ERR "%s: over max segments limit. (%u > %u)\n", __func__, rq->nr_phys_segments, max_segments); return BLK_STS_IOERR; } if (q->disk && should_fail_request(q->disk->part0, blk_rq_bytes(rq))) return BLK_STS_IOERR; ret = blk_crypto_rq_get_keyslot(rq); if (ret != BLK_STS_OK) return ret; blk_account_io_start(rq); /* * Since we have a scheduler attached on the top device, * bypass a potential scheduler on the bottom device for * insert. */ blk_mq_run_dispatch_ops(q, ret = blk_mq_request_issue_directly(rq, true)); if (ret) blk_account_io_done(rq, ktime_get_ns()); return ret; } EXPORT_SYMBOL_GPL(blk_insert_cloned_request); /** * blk_rq_unprep_clone - Helper function to free all bios in a cloned request * @rq: the clone request to be cleaned up * * Description: * Free all bios in @rq for a cloned request. */ void blk_rq_unprep_clone(struct request *rq) { struct bio *bio; while ((bio = rq->bio) != NULL) { rq->bio = bio->bi_next; bio_put(bio); } } EXPORT_SYMBOL_GPL(blk_rq_unprep_clone); /** * blk_rq_prep_clone - Helper function to setup clone request * @rq: the request to be setup * @rq_src: original request to be cloned * @bs: bio_set that bios for clone are allocated from * @gfp_mask: memory allocation mask for bio * @bio_ctr: setup function to be called for each clone bio. * Returns %0 for success, non %0 for failure. * @data: private data to be passed to @bio_ctr * * Description: * Clones bios in @rq_src to @rq, and copies attributes of @rq_src to @rq. * Also, pages which the original bios are pointing to are not copied * and the cloned bios just point same pages. * So cloned bios must be completed before original bios, which means * the caller must complete @rq before @rq_src. */ int blk_rq_prep_clone(struct request *rq, struct request *rq_src, struct bio_set *bs, gfp_t gfp_mask, int (*bio_ctr)(struct bio *, struct bio *, void *), void *data) { struct bio *bio, *bio_src; if (!bs) bs = &fs_bio_set; __rq_for_each_bio(bio_src, rq_src) { bio = bio_alloc_clone(rq->q->disk->part0, bio_src, gfp_mask, bs); if (!bio) goto free_and_out; if (bio_ctr && bio_ctr(bio, bio_src, data)) goto free_and_out; if (rq->bio) { rq->biotail->bi_next = bio; rq->biotail = bio; } else { rq->bio = rq->biotail = bio; } bio = NULL; } /* Copy attributes of the original request to the clone request. */ rq->__sector = blk_rq_pos(rq_src); rq->__data_len = blk_rq_bytes(rq_src); if (rq_src->rq_flags & RQF_SPECIAL_PAYLOAD) { rq->rq_flags |= RQF_SPECIAL_PAYLOAD; rq->special_vec = rq_src->special_vec; } rq->nr_phys_segments = rq_src->nr_phys_segments; rq->ioprio = rq_src->ioprio; if (rq->bio && blk_crypto_rq_bio_prep(rq, rq->bio, gfp_mask) < 0) goto free_and_out; return 0; free_and_out: if (bio) bio_put(bio); blk_rq_unprep_clone(rq); return -ENOMEM; } EXPORT_SYMBOL_GPL(blk_rq_prep_clone); #endif /* CONFIG_BLK_MQ_STACKING */ /* * Steal bios from a request and add them to a bio list. * The request must not have been partially completed before. */ void blk_steal_bios(struct bio_list *list, struct request *rq) { if (rq->bio) { if (list->tail) list->tail->bi_next = rq->bio; else list->head = rq->bio; list->tail = rq->biotail; rq->bio = NULL; rq->biotail = NULL; } rq->__data_len = 0; } EXPORT_SYMBOL_GPL(blk_steal_bios); static size_t order_to_size(unsigned int order) { return (size_t)PAGE_SIZE << order; } /* called before freeing request pool in @tags */ static void blk_mq_clear_rq_mapping(struct blk_mq_tags *drv_tags, struct blk_mq_tags *tags) { struct page *page; unsigned long flags; /* * There is no need to clear mapping if driver tags is not initialized * or the mapping belongs to the driver tags. */ if (!drv_tags || drv_tags == tags) return; list_for_each_entry(page, &tags->page_list, lru) { unsigned long start = (unsigned long)page_address(page); unsigned long end = start + order_to_size(page->private); int i; for (i = 0; i < drv_tags->nr_tags; i++) { struct request *rq = drv_tags->rqs[i]; unsigned long rq_addr = (unsigned long)rq; if (rq_addr >= start && rq_addr < end) { WARN_ON_ONCE(req_ref_read(rq) != 0); cmpxchg(&drv_tags->rqs[i], rq, NULL); } } } /* * Wait until all pending iteration is done. * * Request reference is cleared and it is guaranteed to be observed * after the ->lock is released. */ spin_lock_irqsave(&drv_tags->lock, flags); spin_unlock_irqrestore(&drv_tags->lock, flags); } void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags, unsigned int hctx_idx) { struct blk_mq_tags *drv_tags; struct page *page; if (list_empty(&tags->page_list)) return; if (blk_mq_is_shared_tags(set->flags)) drv_tags = set->shared_tags; else drv_tags = set->tags[hctx_idx]; if (tags->static_rqs && set->ops->exit_request) { int i; for (i = 0; i < tags->nr_tags; i++) { struct request *rq = tags->static_rqs[i]; if (!rq) continue; set->ops->exit_request(set, rq, hctx_idx); tags->static_rqs[i] = NULL; } } blk_mq_clear_rq_mapping(drv_tags, tags); while (!list_empty(&tags->page_list)) { page = list_first_entry(&tags->page_list, struct page, lru); list_del_init(&page->lru); /* * Remove kmemleak object previously allocated in * blk_mq_alloc_rqs(). */ kmemleak_free(page_address(page)); __free_pages(page, page->private); } } void blk_mq_free_rq_map(struct blk_mq_tags *tags) { kfree(tags->rqs); tags->rqs = NULL; kfree(tags->static_rqs); tags->static_rqs = NULL; blk_mq_free_tags(tags); } static enum hctx_type hctx_idx_to_type(struct blk_mq_tag_set *set, unsigned int hctx_idx) { int i; for (i = 0; i < set->nr_maps; i++) { unsigned int start = set->map[i].queue_offset; unsigned int end = start + set->map[i].nr_queues; if (hctx_idx >= start && hctx_idx < end) break; } if (i >= set->nr_maps) i = HCTX_TYPE_DEFAULT; return i; } static int blk_mq_get_hctx_node(struct blk_mq_tag_set *set, unsigned int hctx_idx) { enum hctx_type type = hctx_idx_to_type(set, hctx_idx); return blk_mq_hw_queue_to_node(&set->map[type], hctx_idx); } static struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set, unsigned int hctx_idx, unsigned int nr_tags, unsigned int reserved_tags) { int node = blk_mq_get_hctx_node(set, hctx_idx); struct blk_mq_tags *tags; if (node == NUMA_NO_NODE) node = set->numa_node; tags = blk_mq_init_tags(nr_tags, reserved_tags, node, BLK_MQ_FLAG_TO_ALLOC_POLICY(set->flags)); if (!tags) return NULL; tags->rqs = kcalloc_node(nr_tags, sizeof(struct request *), GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY, node); if (!tags->rqs) goto err_free_tags; tags->static_rqs = kcalloc_node(nr_tags, sizeof(struct request *), GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY, node); if (!tags->static_rqs) goto err_free_rqs; return tags; err_free_rqs: kfree(tags->rqs); err_free_tags: blk_mq_free_tags(tags); return NULL; } static int blk_mq_init_request(struct blk_mq_tag_set *set, struct request *rq, unsigned int hctx_idx, int node) { int ret; if (set->ops->init_request) { ret = set->ops->init_request(set, rq, hctx_idx, node); if (ret) return ret; } WRITE_ONCE(rq->state, MQ_RQ_IDLE); return 0; } static int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags, unsigned int hctx_idx, unsigned int depth) { unsigned int i, j, entries_per_page, max_order = 4; int node = blk_mq_get_hctx_node(set, hctx_idx); size_t rq_size, left; if (node == NUMA_NO_NODE) node = set->numa_node; INIT_LIST_HEAD(&tags->page_list); /* * rq_size is the size of the request plus driver payload, rounded * to the cacheline size */ rq_size = round_up(sizeof(struct request) + set->cmd_size, cache_line_size()); left = rq_size * depth; for (i = 0; i < depth; ) { int this_order = max_order; struct page *page; int to_do; void *p; while (this_order && left < order_to_size(this_order - 1)) this_order--; do { page = alloc_pages_node(node, GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY | __GFP_ZERO, this_order); if (page) break; if (!this_order--) break; if (order_to_size(this_order) < rq_size) break; } while (1); if (!page) goto fail; page->private = this_order; list_add_tail(&page->lru, &tags->page_list); p = page_address(page); /* * Allow kmemleak to scan these pages as they contain pointers * to additional allocations like via ops->init_request(). */ kmemleak_alloc(p, order_to_size(this_order), 1, GFP_NOIO); entries_per_page = order_to_size(this_order) / rq_size; to_do = min(entries_per_page, depth - i); left -= to_do * rq_size; for (j = 0; j < to_do; j++) { struct request *rq = p; tags->static_rqs[i] = rq; if (blk_mq_init_request(set, rq, hctx_idx, node)) { tags->static_rqs[i] = NULL; goto fail; } p += rq_size; i++; } } return 0; fail: blk_mq_free_rqs(set, tags, hctx_idx); return -ENOMEM; } struct rq_iter_data { struct blk_mq_hw_ctx *hctx; bool has_rq; }; static bool blk_mq_has_request(struct request *rq, void *data) { struct rq_iter_data *iter_data = data; if (rq->mq_hctx != iter_data->hctx) return true; iter_data->has_rq = true; return false; } static bool blk_mq_hctx_has_requests(struct blk_mq_hw_ctx *hctx) { struct blk_mq_tags *tags = hctx->sched_tags ? hctx->sched_tags : hctx->tags; struct rq_iter_data data = { .hctx = hctx, }; blk_mq_all_tag_iter(tags, blk_mq_has_request, &data); return data.has_rq; } static inline bool blk_mq_last_cpu_in_hctx(unsigned int cpu, struct blk_mq_hw_ctx *hctx) { if (cpumask_first_and(hctx->cpumask, cpu_online_mask) != cpu) return false; if (cpumask_next_and(cpu, hctx->cpumask, cpu_online_mask) < nr_cpu_ids) return false; return true; } static int blk_mq_hctx_notify_offline(unsigned int cpu, struct hlist_node *node) { struct blk_mq_hw_ctx *hctx = hlist_entry_safe(node, struct blk_mq_hw_ctx, cpuhp_online); if (!cpumask_test_cpu(cpu, hctx->cpumask) || !blk_mq_last_cpu_in_hctx(cpu, hctx)) return 0; /* * Prevent new request from being allocated on the current hctx. * * The smp_mb__after_atomic() Pairs with the implied barrier in * test_and_set_bit_lock in sbitmap_get(). Ensures the inactive flag is * seen once we return from the tag allocator. */ set_bit(BLK_MQ_S_INACTIVE, &hctx->state); smp_mb__after_atomic(); /* * Try to grab a reference to the queue and wait for any outstanding * requests. If we could not grab a reference the queue has been * frozen and there are no requests. */ if (percpu_ref_tryget(&hctx->queue->q_usage_counter)) { while (blk_mq_hctx_has_requests(hctx)) msleep(5); percpu_ref_put(&hctx->queue->q_usage_counter); } return 0; } static int blk_mq_hctx_notify_online(unsigned int cpu, struct hlist_node *node) { struct blk_mq_hw_ctx *hctx = hlist_entry_safe(node, struct blk_mq_hw_ctx, cpuhp_online); if (cpumask_test_cpu(cpu, hctx->cpumask)) clear_bit(BLK_MQ_S_INACTIVE, &hctx->state); return 0; } /* * 'cpu' is going away. splice any existing rq_list entries from this * software queue to the hw queue dispatch list, and ensure that it * gets run. */ static int blk_mq_hctx_notify_dead(unsigned int cpu, struct hlist_node *node) { struct blk_mq_hw_ctx *hctx; struct blk_mq_ctx *ctx; LIST_HEAD(tmp); enum hctx_type type; hctx = hlist_entry_safe(node, struct blk_mq_hw_ctx, cpuhp_dead); if (!cpumask_test_cpu(cpu, hctx->cpumask)) return 0; ctx = __blk_mq_get_ctx(hctx->queue, cpu); type = hctx->type; spin_lock(&ctx->lock); if (!list_empty(&ctx->rq_lists[type])) { list_splice_init(&ctx->rq_lists[type], &tmp); blk_mq_hctx_clear_pending(hctx, ctx); } spin_unlock(&ctx->lock); if (list_empty(&tmp)) return 0; spin_lock(&hctx->lock); list_splice_tail_init(&tmp, &hctx->dispatch); spin_unlock(&hctx->lock); blk_mq_run_hw_queue(hctx, true); return 0; } static void blk_mq_remove_cpuhp(struct blk_mq_hw_ctx *hctx) { if (!(hctx->flags & BLK_MQ_F_STACKING)) cpuhp_state_remove_instance_nocalls(CPUHP_AP_BLK_MQ_ONLINE, &hctx->cpuhp_online); cpuhp_state_remove_instance_nocalls(CPUHP_BLK_MQ_DEAD, &hctx->cpuhp_dead); } /* * Before freeing hw queue, clearing the flush request reference in * tags->rqs[] for avoiding potential UAF. */ static void blk_mq_clear_flush_rq_mapping(struct blk_mq_tags *tags, unsigned int queue_depth, struct request *flush_rq) { int i; unsigned long flags; /* The hw queue may not be mapped yet */ if (!tags) return; WARN_ON_ONCE(req_ref_read(flush_rq) != 0); for (i = 0; i < queue_depth; i++) cmpxchg(&tags->rqs[i], flush_rq, NULL); /* * Wait until all pending iteration is done. * * Request reference is cleared and it is guaranteed to be observed * after the ->lock is released. */ spin_lock_irqsave(&tags->lock, flags); spin_unlock_irqrestore(&tags->lock, flags); } /* hctx->ctxs will be freed in queue's release handler */ static void blk_mq_exit_hctx(struct request_queue *q, struct blk_mq_tag_set *set, struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx) { struct request *flush_rq = hctx->fq->flush_rq; if (blk_mq_hw_queue_mapped(hctx)) blk_mq_tag_idle(hctx); if (blk_queue_init_done(q)) blk_mq_clear_flush_rq_mapping(set->tags[hctx_idx], set->queue_depth, flush_rq); if (set->ops->exit_request) set->ops->exit_request(set, flush_rq, hctx_idx); if (set->ops->exit_hctx) set->ops->exit_hctx(hctx, hctx_idx); blk_mq_remove_cpuhp(hctx); xa_erase(&q->hctx_table, hctx_idx); spin_lock(&q->unused_hctx_lock); list_add(&hctx->hctx_list, &q->unused_hctx_list); spin_unlock(&q->unused_hctx_lock); } static void blk_mq_exit_hw_queues(struct request_queue *q, struct blk_mq_tag_set *set, int nr_queue) { struct blk_mq_hw_ctx *hctx; unsigned long i; queue_for_each_hw_ctx(q, hctx, i) { if (i == nr_queue) break; blk_mq_exit_hctx(q, set, hctx, i); } } static int blk_mq_init_hctx(struct request_queue *q, struct blk_mq_tag_set *set, struct blk_mq_hw_ctx *hctx, unsigned hctx_idx) { hctx->queue_num = hctx_idx; if (!(hctx->flags & BLK_MQ_F_STACKING)) cpuhp_state_add_instance_nocalls(CPUHP_AP_BLK_MQ_ONLINE, &hctx->cpuhp_online); cpuhp_state_add_instance_nocalls(CPUHP_BLK_MQ_DEAD, &hctx->cpuhp_dead); hctx->tags = set->tags[hctx_idx]; if (set->ops->init_hctx && set->ops->init_hctx(hctx, set->driver_data, hctx_idx)) goto unregister_cpu_notifier; if (blk_mq_init_request(set, hctx->fq->flush_rq, hctx_idx, hctx->numa_node)) goto exit_hctx; if (xa_insert(&q->hctx_table, hctx_idx, hctx, GFP_KERNEL)) goto exit_flush_rq; return 0; exit_flush_rq: if (set->ops->exit_request) set->ops->exit_request(set, hctx->fq->flush_rq, hctx_idx); exit_hctx: if (set->ops->exit_hctx) set->ops->exit_hctx(hctx, hctx_idx); unregister_cpu_notifier: blk_mq_remove_cpuhp(hctx); return -1; } static struct blk_mq_hw_ctx * blk_mq_alloc_hctx(struct request_queue *q, struct blk_mq_tag_set *set, int node) { struct blk_mq_hw_ctx *hctx; gfp_t gfp = GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY; hctx = kzalloc_node(sizeof(struct blk_mq_hw_ctx), gfp, node); if (!hctx) goto fail_alloc_hctx; if (!zalloc_cpumask_var_node(&hctx->cpumask, gfp, node)) goto free_hctx; atomic_set(&hctx->nr_active, 0); if (node == NUMA_NO_NODE) node = set->numa_node; hctx->numa_node = node; INIT_DELAYED_WORK(&hctx->run_work, blk_mq_run_work_fn); spin_lock_init(&hctx->lock); INIT_LIST_HEAD(&hctx->dispatch); hctx->queue = q; hctx->flags = set->flags & ~BLK_MQ_F_TAG_QUEUE_SHARED; INIT_LIST_HEAD(&hctx->hctx_list); /* * Allocate space for all possible cpus to avoid allocation at * runtime */ hctx->ctxs = kmalloc_array_node(nr_cpu_ids, sizeof(void *), gfp, node); if (!hctx->ctxs) goto free_cpumask; if (sbitmap_init_node(&hctx->ctx_map, nr_cpu_ids, ilog2(8), gfp, node, false, false)) goto free_ctxs; hctx->nr_ctx = 0; spin_lock_init(&hctx->dispatch_wait_lock); init_waitqueue_func_entry(&hctx->dispatch_wait, blk_mq_dispatch_wake); INIT_LIST_HEAD(&hctx->dispatch_wait.entry); hctx->fq = blk_alloc_flush_queue(hctx->numa_node, set->cmd_size, gfp); if (!hctx->fq) goto free_bitmap; blk_mq_hctx_kobj_init(hctx); return hctx; free_bitmap: sbitmap_free(&hctx->ctx_map); free_ctxs: kfree(hctx->ctxs); free_cpumask: free_cpumask_var(hctx->cpumask); free_hctx: kfree(hctx); fail_alloc_hctx: return NULL; } static void blk_mq_init_cpu_queues(struct request_queue *q, unsigned int nr_hw_queues) { struct blk_mq_tag_set *set = q->tag_set; unsigned int i, j; for_each_possible_cpu(i) { struct blk_mq_ctx *__ctx = per_cpu_ptr(q->queue_ctx, i); struct blk_mq_hw_ctx *hctx; int k; __ctx->cpu = i; spin_lock_init(&__ctx->lock); for (k = HCTX_TYPE_DEFAULT; k < HCTX_MAX_TYPES; k++) INIT_LIST_HEAD(&__ctx->rq_lists[k]); __ctx->queue = q; /* * Set local node, IFF we have more than one hw queue. If * not, we remain on the home node of the device */ for (j = 0; j < set->nr_maps; j++) { hctx = blk_mq_map_queue_type(q, j, i); if (nr_hw_queues > 1 && hctx->numa_node == NUMA_NO_NODE) hctx->numa_node = cpu_to_node(i); } } } struct blk_mq_tags *blk_mq_alloc_map_and_rqs(struct blk_mq_tag_set *set, unsigned int hctx_idx, unsigned int depth) { struct blk_mq_tags *tags; int ret; tags = blk_mq_alloc_rq_map(set, hctx_idx, depth, set->reserved_tags); if (!tags) return NULL; ret = blk_mq_alloc_rqs(set, tags, hctx_idx, depth); if (ret) { blk_mq_free_rq_map(tags); return NULL; } return tags; } static bool __blk_mq_alloc_map_and_rqs(struct blk_mq_tag_set *set, int hctx_idx) { if (blk_mq_is_shared_tags(set->flags)) { set->tags[hctx_idx] = set->shared_tags; return true; } set->tags[hctx_idx] = blk_mq_alloc_map_and_rqs(set, hctx_idx, set->queue_depth); return set->tags[hctx_idx]; } void blk_mq_free_map_and_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags, unsigned int hctx_idx) { if (tags) { blk_mq_free_rqs(set, tags, hctx_idx); blk_mq_free_rq_map(tags); } } static void __blk_mq_free_map_and_rqs(struct blk_mq_tag_set *set, unsigned int hctx_idx) { if (!blk_mq_is_shared_tags(set->flags)) blk_mq_free_map_and_rqs(set, set->tags[hctx_idx], hctx_idx); set->tags[hctx_idx] = NULL; } static void blk_mq_map_swqueue(struct request_queue *q) { unsigned int j, hctx_idx; unsigned long i; struct blk_mq_hw_ctx *hctx; struct blk_mq_ctx *ctx; struct blk_mq_tag_set *set = q->tag_set; queue_for_each_hw_ctx(q, hctx, i) { cpumask_clear(hctx->cpumask); hctx->nr_ctx = 0; hctx->dispatch_from = NULL; } /* * Map software to hardware queues. * * If the cpu isn't present, the cpu is mapped to first hctx. */ for_each_possible_cpu(i) { ctx = per_cpu_ptr(q->queue_ctx, i); for (j = 0; j < set->nr_maps; j++) { if (!set->map[j].nr_queues) { ctx->hctxs[j] = blk_mq_map_queue_type(q, HCTX_TYPE_DEFAULT, i); continue; } hctx_idx = set->map[j].mq_map[i]; /* unmapped hw queue can be remapped after CPU topo changed */ if (!set->tags[hctx_idx] && !__blk_mq_alloc_map_and_rqs(set, hctx_idx)) { /* * If tags initialization fail for some hctx, * that hctx won't be brought online. In this * case, remap the current ctx to hctx[0] which * is guaranteed to always have tags allocated */ set->map[j].mq_map[i] = 0; } hctx = blk_mq_map_queue_type(q, j, i); ctx->hctxs[j] = hctx; /* * If the CPU is already set in the mask, then we've * mapped this one already. This can happen if * devices share queues across queue maps. */ if (cpumask_test_cpu(i, hctx->cpumask)) continue; cpumask_set_cpu(i, hctx->cpumask); hctx->type = j; ctx->index_hw[hctx->type] = hctx->nr_ctx; hctx->ctxs[hctx->nr_ctx++] = ctx; /* * If the nr_ctx type overflows, we have exceeded the * amount of sw queues we can support. */ BUG_ON(!hctx->nr_ctx); } for (; j < HCTX_MAX_TYPES; j++) ctx->hctxs[j] = blk_mq_map_queue_type(q, HCTX_TYPE_DEFAULT, i); } queue_for_each_hw_ctx(q, hctx, i) { /* * If no software queues are mapped to this hardware queue, * disable it and free the request entries. */ if (!hctx->nr_ctx) { /* Never unmap queue 0. We need it as a * fallback in case of a new remap fails * allocation */ if (i) __blk_mq_free_map_and_rqs(set, i); hctx->tags = NULL; continue; } hctx->tags = set->tags[i]; WARN_ON(!hctx->tags); /* * Set the map size to the number of mapped software queues. * This is more accurate and more efficient than looping * over all possibly mapped software queues. */ sbitmap_resize(&hctx->ctx_map, hctx->nr_ctx); /* * Initialize batch roundrobin counts */ hctx->next_cpu = blk_mq_first_mapped_cpu(hctx); hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH; } } /* * Caller needs to ensure that we're either frozen/quiesced, or that * the queue isn't live yet. */ static void queue_set_hctx_shared(struct request_queue *q, bool shared) { struct blk_mq_hw_ctx *hctx; unsigned long i; queue_for_each_hw_ctx(q, hctx, i) { if (shared) { hctx->flags |= BLK_MQ_F_TAG_QUEUE_SHARED; } else { blk_mq_tag_idle(hctx); hctx->flags &= ~BLK_MQ_F_TAG_QUEUE_SHARED; } } } static void blk_mq_update_tag_set_shared(struct blk_mq_tag_set *set, bool shared) { struct request_queue *q; lockdep_assert_held(&set->tag_list_lock); list_for_each_entry(q, &set->tag_list, tag_set_list) { blk_mq_freeze_queue(q); queue_set_hctx_shared(q, shared); blk_mq_unfreeze_queue(q); } } static void blk_mq_del_queue_tag_set(struct request_queue *q) { struct blk_mq_tag_set *set = q->tag_set; mutex_lock(&set->tag_list_lock); list_del(&q->tag_set_list); if (list_is_singular(&set->tag_list)) { /* just transitioned to unshared */ set->flags &= ~BLK_MQ_F_TAG_QUEUE_SHARED; /* update existing queue */ blk_mq_update_tag_set_shared(set, false); } mutex_unlock(&set->tag_list_lock); INIT_LIST_HEAD(&q->tag_set_list); } static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set, struct request_queue *q) { mutex_lock(&set->tag_list_lock); /* * Check to see if we're transitioning to shared (from 1 to 2 queues). */ if (!list_empty(&set->tag_list) && !(set->flags & BLK_MQ_F_TAG_QUEUE_SHARED)) { set->flags |= BLK_MQ_F_TAG_QUEUE_SHARED; /* update existing queue */ blk_mq_update_tag_set_shared(set, true); } if (set->flags & BLK_MQ_F_TAG_QUEUE_SHARED) queue_set_hctx_shared(q, true); list_add_tail(&q->tag_set_list, &set->tag_list); mutex_unlock(&set->tag_list_lock); } /* All allocations will be freed in release handler of q->mq_kobj */ static int blk_mq_alloc_ctxs(struct request_queue *q) { struct blk_mq_ctxs *ctxs; int cpu; ctxs = kzalloc(sizeof(*ctxs), GFP_KERNEL); if (!ctxs) return -ENOMEM; ctxs->queue_ctx = alloc_percpu(struct blk_mq_ctx); if (!ctxs->queue_ctx) goto fail; for_each_possible_cpu(cpu) { struct blk_mq_ctx *ctx = per_cpu_ptr(ctxs->queue_ctx, cpu); ctx->ctxs = ctxs; } q->mq_kobj = &ctxs->kobj; q->queue_ctx = ctxs->queue_ctx; return 0; fail: kfree(ctxs); return -ENOMEM; } /* * It is the actual release handler for mq, but we do it from * request queue's release handler for avoiding use-after-free * and headache because q->mq_kobj shouldn't have been introduced, * but we can't group ctx/kctx kobj without it. */ void blk_mq_release(struct request_queue *q) { struct blk_mq_hw_ctx *hctx, *next; unsigned long i; queue_for_each_hw_ctx(q, hctx, i) WARN_ON_ONCE(hctx && list_empty(&hctx->hctx_list)); /* all hctx are in .unused_hctx_list now */ list_for_each_entry_safe(hctx, next, &q->unused_hctx_list, hctx_list) { list_del_init(&hctx->hctx_list); kobject_put(&hctx->kobj); } xa_destroy(&q->hctx_table); /* * release .mq_kobj and sw queue's kobject now because * both share lifetime with request queue. */ blk_mq_sysfs_deinit(q); } static struct request_queue *blk_mq_init_queue_data(struct blk_mq_tag_set *set, void *queuedata) { struct request_queue *q; int ret; q = blk_alloc_queue(set->numa_node); if (!q) return ERR_PTR(-ENOMEM); q->queuedata = queuedata; ret = blk_mq_init_allocated_queue(set, q); if (ret) { blk_put_queue(q); return ERR_PTR(ret); } return q; } struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set) { return blk_mq_init_queue_data(set, NULL); } EXPORT_SYMBOL(blk_mq_init_queue); /** * blk_mq_destroy_queue - shutdown a request queue * @q: request queue to shutdown * * This shuts down a request queue allocated by blk_mq_init_queue(). All future * requests will be failed with -ENODEV. The caller is responsible for dropping * the reference from blk_mq_init_queue() by calling blk_put_queue(). * * Context: can sleep */ void blk_mq_destroy_queue(struct request_queue *q) { WARN_ON_ONCE(!queue_is_mq(q)); WARN_ON_ONCE(blk_queue_registered(q)); might_sleep(); blk_queue_flag_set(QUEUE_FLAG_DYING, q); blk_queue_start_drain(q); blk_mq_freeze_queue_wait(q); blk_sync_queue(q); blk_mq_cancel_work_sync(q); blk_mq_exit_queue(q); } EXPORT_SYMBOL(blk_mq_destroy_queue); struct gendisk *__blk_mq_alloc_disk(struct blk_mq_tag_set *set, void *queuedata, struct lock_class_key *lkclass) { struct request_queue *q; struct gendisk *disk; q = blk_mq_init_queue_data(set, queuedata); if (IS_ERR(q)) return ERR_CAST(q); disk = __alloc_disk_node(q, set->numa_node, lkclass); if (!disk) { blk_mq_destroy_queue(q); blk_put_queue(q); return ERR_PTR(-ENOMEM); } set_bit(GD_OWNS_QUEUE, &disk->state); return disk; } EXPORT_SYMBOL(__blk_mq_alloc_disk); struct gendisk *blk_mq_alloc_disk_for_queue(struct request_queue *q, struct lock_class_key *lkclass) { struct gendisk *disk; if (!blk_get_queue(q)) return NULL; disk = __alloc_disk_node(q, NUMA_NO_NODE, lkclass); if (!disk) blk_put_queue(q); return disk; } EXPORT_SYMBOL(blk_mq_alloc_disk_for_queue); static struct blk_mq_hw_ctx *blk_mq_alloc_and_init_hctx( struct blk_mq_tag_set *set, struct request_queue *q, int hctx_idx, int node) { struct blk_mq_hw_ctx *hctx = NULL, *tmp; /* reuse dead hctx first */ spin_lock(&q->unused_hctx_lock); list_for_each_entry(tmp, &q->unused_hctx_list, hctx_list) { if (tmp->numa_node == node) { hctx = tmp; break; } } if (hctx) list_del_init(&hctx->hctx_list); spin_unlock(&q->unused_hctx_lock); if (!hctx) hctx = blk_mq_alloc_hctx(q, set, node); if (!hctx) goto fail; if (blk_mq_init_hctx(q, set, hctx, hctx_idx)) goto free_hctx; return hctx; free_hctx: kobject_put(&hctx->kobj); fail: return NULL; } static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set, struct request_queue *q) { struct blk_mq_hw_ctx *hctx; unsigned long i, j; /* protect against switching io scheduler */ mutex_lock(&q->sysfs_lock); for (i = 0; i < set->nr_hw_queues; i++) { int old_node; int node = blk_mq_get_hctx_node(set, i); struct blk_mq_hw_ctx *old_hctx = xa_load(&q->hctx_table, i); if (old_hctx) { old_node = old_hctx->numa_node; blk_mq_exit_hctx(q, set, old_hctx, i); } if (!blk_mq_alloc_and_init_hctx(set, q, i, node)) { if (!old_hctx) break; pr_warn("Allocate new hctx on node %d fails, fallback to previous one on node %d\n", node, old_node); hctx = blk_mq_alloc_and_init_hctx(set, q, i, old_node); WARN_ON_ONCE(!hctx); } } /* * Increasing nr_hw_queues fails. Free the newly allocated * hctxs and keep the previous q->nr_hw_queues. */ if (i != set->nr_hw_queues) { j = q->nr_hw_queues; } else { j = i; q->nr_hw_queues = set->nr_hw_queues; } xa_for_each_start(&q->hctx_table, j, hctx, j) blk_mq_exit_hctx(q, set, hctx, j); mutex_unlock(&q->sysfs_lock); } static void blk_mq_update_poll_flag(struct request_queue *q) { struct blk_mq_tag_set *set = q->tag_set; if (set->nr_maps > HCTX_TYPE_POLL && set->map[HCTX_TYPE_POLL].nr_queues) blk_queue_flag_set(QUEUE_FLAG_POLL, q); else blk_queue_flag_clear(QUEUE_FLAG_POLL, q); } int blk_mq_init_allocated_queue(struct blk_mq_tag_set *set, struct request_queue *q) { /* mark the queue as mq asap */ q->mq_ops = set->ops; if (blk_mq_alloc_ctxs(q)) goto err_exit; /* init q->mq_kobj and sw queues' kobjects */ blk_mq_sysfs_init(q); INIT_LIST_HEAD(&q->unused_hctx_list); spin_lock_init(&q->unused_hctx_lock); xa_init(&q->hctx_table); blk_mq_realloc_hw_ctxs(set, q); if (!q->nr_hw_queues) goto err_hctxs; INIT_WORK(&q->timeout_work, blk_mq_timeout_work); blk_queue_rq_timeout(q, set->timeout ? set->timeout : 30 * HZ); q->tag_set = set; q->queue_flags |= QUEUE_FLAG_MQ_DEFAULT; blk_mq_update_poll_flag(q); INIT_DELAYED_WORK(&q->requeue_work, blk_mq_requeue_work); INIT_LIST_HEAD(&q->flush_list); INIT_LIST_HEAD(&q->requeue_list); spin_lock_init(&q->requeue_lock); q->nr_requests = set->queue_depth; blk_mq_init_cpu_queues(q, set->nr_hw_queues); blk_mq_add_queue_tag_set(set, q); blk_mq_map_swqueue(q); return 0; err_hctxs: blk_mq_release(q); err_exit: q->mq_ops = NULL; return -ENOMEM; } EXPORT_SYMBOL(blk_mq_init_allocated_queue); /* tags can _not_ be used after returning from blk_mq_exit_queue */ void blk_mq_exit_queue(struct request_queue *q) { struct blk_mq_tag_set *set = q->tag_set; /* Checks hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED. */ blk_mq_exit_hw_queues(q, set, set->nr_hw_queues); /* May clear BLK_MQ_F_TAG_QUEUE_SHARED in hctx->flags. */ blk_mq_del_queue_tag_set(q); } static int __blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set) { int i; if (blk_mq_is_shared_tags(set->flags)) { set->shared_tags = blk_mq_alloc_map_and_rqs(set, BLK_MQ_NO_HCTX_IDX, set->queue_depth); if (!set->shared_tags) return -ENOMEM; } for (i = 0; i < set->nr_hw_queues; i++) { if (!__blk_mq_alloc_map_and_rqs(set, i)) goto out_unwind; cond_resched(); } return 0; out_unwind: while (--i >= 0) __blk_mq_free_map_and_rqs(set, i); if (blk_mq_is_shared_tags(set->flags)) { blk_mq_free_map_and_rqs(set, set->shared_tags, BLK_MQ_NO_HCTX_IDX); } return -ENOMEM; } /* * Allocate the request maps associated with this tag_set. Note that this * may reduce the depth asked for, if memory is tight. set->queue_depth * will be updated to reflect the allocated depth. */ static int blk_mq_alloc_set_map_and_rqs(struct blk_mq_tag_set *set) { unsigned int depth; int err; depth = set->queue_depth; do { err = __blk_mq_alloc_rq_maps(set); if (!err) break; set->queue_depth >>= 1; if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN) { err = -ENOMEM; break; } } while (set->queue_depth); if (!set->queue_depth || err) { pr_err("blk-mq: failed to allocate request map\n"); return -ENOMEM; } if (depth != set->queue_depth) pr_info("blk-mq: reduced tag depth (%u -> %u)\n", depth, set->queue_depth); return 0; } static void blk_mq_update_queue_map(struct blk_mq_tag_set *set) { /* * blk_mq_map_queues() and multiple .map_queues() implementations * expect that set->map[HCTX_TYPE_DEFAULT].nr_queues is set to the * number of hardware queues. */ if (set->nr_maps == 1) set->map[HCTX_TYPE_DEFAULT].nr_queues = set->nr_hw_queues; if (set->ops->map_queues && !is_kdump_kernel()) { int i; /* * transport .map_queues is usually done in the following * way: * * for (queue = 0; queue < set->nr_hw_queues; queue++) { * mask = get_cpu_mask(queue) * for_each_cpu(cpu, mask) * set->map[x].mq_map[cpu] = queue; * } * * When we need to remap, the table has to be cleared for * killing stale mapping since one CPU may not be mapped * to any hw queue. */ for (i = 0; i < set->nr_maps; i++) blk_mq_clear_mq_map(&set->map[i]); set->ops->map_queues(set); } else { BUG_ON(set->nr_maps > 1); blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]); } } static int blk_mq_realloc_tag_set_tags(struct blk_mq_tag_set *set, int new_nr_hw_queues) { struct blk_mq_tags **new_tags; int i; if (set->nr_hw_queues >= new_nr_hw_queues) goto done; new_tags = kcalloc_node(new_nr_hw_queues, sizeof(struct blk_mq_tags *), GFP_KERNEL, set->numa_node); if (!new_tags) return -ENOMEM; if (set->tags) memcpy(new_tags, set->tags, set->nr_hw_queues * sizeof(*set->tags)); kfree(set->tags); set->tags = new_tags; for (i = set->nr_hw_queues; i < new_nr_hw_queues; i++) { if (!__blk_mq_alloc_map_and_rqs(set, i)) { while (--i >= set->nr_hw_queues) __blk_mq_free_map_and_rqs(set, i); return -ENOMEM; } cond_resched(); } done: set->nr_hw_queues = new_nr_hw_queues; return 0; } /* * Alloc a tag set to be associated with one or more request queues. * May fail with EINVAL for various error conditions. May adjust the * requested depth down, if it's too large. In that case, the set * value will be stored in set->queue_depth. */ int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set) { int i, ret; BUILD_BUG_ON(BLK_MQ_MAX_DEPTH > 1 << BLK_MQ_UNIQUE_TAG_BITS); if (!set->nr_hw_queues) return -EINVAL; if (!set->queue_depth) return -EINVAL; if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN) return -EINVAL; if (!set->ops->queue_rq) return -EINVAL; if (!set->ops->get_budget ^ !set->ops->put_budget) return -EINVAL; if (set->queue_depth > BLK_MQ_MAX_DEPTH) { pr_info("blk-mq: reduced tag depth to %u\n", BLK_MQ_MAX_DEPTH); set->queue_depth = BLK_MQ_MAX_DEPTH; } if (!set->nr_maps) set->nr_maps = 1; else if (set->nr_maps > HCTX_MAX_TYPES) return -EINVAL; /* * If a crashdump is active, then we are potentially in a very * memory constrained environment. Limit us to 1 queue and * 64 tags to prevent using too much memory. */ if (is_kdump_kernel()) { set->nr_hw_queues = 1; set->nr_maps = 1; set->queue_depth = min(64U, set->queue_depth); } /* * There is no use for more h/w queues than cpus if we just have * a single map */ if (set->nr_maps == 1 && set->nr_hw_queues > nr_cpu_ids) set->nr_hw_queues = nr_cpu_ids; if (set->flags & BLK_MQ_F_BLOCKING) { set->srcu = kmalloc(sizeof(*set->srcu), GFP_KERNEL); if (!set->srcu) return -ENOMEM; ret = init_srcu_struct(set->srcu); if (ret) goto out_free_srcu; } ret = -ENOMEM; set->tags = kcalloc_node(set->nr_hw_queues, sizeof(struct blk_mq_tags *), GFP_KERNEL, set->numa_node); if (!set->tags) goto out_cleanup_srcu; for (i = 0; i < set->nr_maps; i++) { set->map[i].mq_map = kcalloc_node(nr_cpu_ids, sizeof(set->map[i].mq_map[0]), GFP_KERNEL, set->numa_node); if (!set->map[i].mq_map) goto out_free_mq_map; set->map[i].nr_queues = is_kdump_kernel() ? 1 : set->nr_hw_queues; } blk_mq_update_queue_map(set); ret = blk_mq_alloc_set_map_and_rqs(set); if (ret) goto out_free_mq_map; mutex_init(&set->tag_list_lock); INIT_LIST_HEAD(&set->tag_list); return 0; out_free_mq_map: for (i = 0; i < set->nr_maps; i++) { kfree(set->map[i].mq_map); set->map[i].mq_map = NULL; } kfree(set->tags); set->tags = NULL; out_cleanup_srcu: if (set->flags & BLK_MQ_F_BLOCKING) cleanup_srcu_struct(set->srcu); out_free_srcu: if (set->flags & BLK_MQ_F_BLOCKING) kfree(set->srcu); return ret; } EXPORT_SYMBOL(blk_mq_alloc_tag_set); /* allocate and initialize a tagset for a simple single-queue device */ int blk_mq_alloc_sq_tag_set(struct blk_mq_tag_set *set, const struct blk_mq_ops *ops, unsigned int queue_depth, unsigned int set_flags) { memset(set, 0, sizeof(*set)); set->ops = ops; set->nr_hw_queues = 1; set->nr_maps = 1; set->queue_depth = queue_depth; set->numa_node = NUMA_NO_NODE; set->flags = set_flags; return blk_mq_alloc_tag_set(set); } EXPORT_SYMBOL_GPL(blk_mq_alloc_sq_tag_set); void blk_mq_free_tag_set(struct blk_mq_tag_set *set) { int i, j; for (i = 0; i < set->nr_hw_queues; i++) __blk_mq_free_map_and_rqs(set, i); if (blk_mq_is_shared_tags(set->flags)) { blk_mq_free_map_and_rqs(set, set->shared_tags, BLK_MQ_NO_HCTX_IDX); } for (j = 0; j < set->nr_maps; j++) { kfree(set->map[j].mq_map); set->map[j].mq_map = NULL; } kfree(set->tags); set->tags = NULL; if (set->flags & BLK_MQ_F_BLOCKING) { cleanup_srcu_struct(set->srcu); kfree(set->srcu); } } EXPORT_SYMBOL(blk_mq_free_tag_set); int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr) { struct blk_mq_tag_set *set = q->tag_set; struct blk_mq_hw_ctx *hctx; int ret; unsigned long i; if (!set) return -EINVAL; if (q->nr_requests == nr) return 0; blk_mq_freeze_queue(q); blk_mq_quiesce_queue(q); ret = 0; queue_for_each_hw_ctx(q, hctx, i) { if (!hctx->tags) continue; /* * If we're using an MQ scheduler, just update the scheduler * queue depth. This is similar to what the old code would do. */ if (hctx->sched_tags) { ret = blk_mq_tag_update_depth(hctx, &hctx->sched_tags, nr, true); } else { ret = blk_mq_tag_update_depth(hctx, &hctx->tags, nr, false); } if (ret) break; if (q->elevator && q->elevator->type->ops.depth_updated) q->elevator->type->ops.depth_updated(hctx); } if (!ret) { q->nr_requests = nr; if (blk_mq_is_shared_tags(set->flags)) { if (q->elevator) blk_mq_tag_update_sched_shared_tags(q); else blk_mq_tag_resize_shared_tags(set, nr); } } blk_mq_unquiesce_queue(q); blk_mq_unfreeze_queue(q); return ret; } /* * request_queue and elevator_type pair. * It is just used by __blk_mq_update_nr_hw_queues to cache * the elevator_type associated with a request_queue. */ struct blk_mq_qe_pair { struct list_head node; struct request_queue *q; struct elevator_type *type; }; /* * Cache the elevator_type in qe pair list and switch the * io scheduler to 'none' */ static bool blk_mq_elv_switch_none(struct list_head *head, struct request_queue *q) { struct blk_mq_qe_pair *qe; qe = kmalloc(sizeof(*qe), GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY); if (!qe) return false; /* q->elevator needs protection from ->sysfs_lock */ mutex_lock(&q->sysfs_lock); /* the check has to be done with holding sysfs_lock */ if (!q->elevator) { kfree(qe); goto unlock; } INIT_LIST_HEAD(&qe->node); qe->q = q; qe->type = q->elevator->type; /* keep a reference to the elevator module as we'll switch back */ __elevator_get(qe->type); list_add(&qe->node, head); elevator_disable(q); unlock: mutex_unlock(&q->sysfs_lock); return true; } static struct blk_mq_qe_pair *blk_lookup_qe_pair(struct list_head *head, struct request_queue *q) { struct blk_mq_qe_pair *qe; list_for_each_entry(qe, head, node) if (qe->q == q) return qe; return NULL; } static void blk_mq_elv_switch_back(struct list_head *head, struct request_queue *q) { struct blk_mq_qe_pair *qe; struct elevator_type *t; qe = blk_lookup_qe_pair(head, q); if (!qe) return; t = qe->type; list_del(&qe->node); kfree(qe); mutex_lock(&q->sysfs_lock); elevator_switch(q, t); /* drop the reference acquired in blk_mq_elv_switch_none */ elevator_put(t); mutex_unlock(&q->sysfs_lock); } static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues) { struct request_queue *q; LIST_HEAD(head); int prev_nr_hw_queues = set->nr_hw_queues; int i; lockdep_assert_held(&set->tag_list_lock); if (set->nr_maps == 1 && nr_hw_queues > nr_cpu_ids) nr_hw_queues = nr_cpu_ids; if (nr_hw_queues < 1) return; if (set->nr_maps == 1 && nr_hw_queues == set->nr_hw_queues) return; list_for_each_entry(q, &set->tag_list, tag_set_list) blk_mq_freeze_queue(q); /* * Switch IO scheduler to 'none', cleaning up the data associated * with the previous scheduler. We will switch back once we are done * updating the new sw to hw queue mappings. */ list_for_each_entry(q, &set->tag_list, tag_set_list) if (!blk_mq_elv_switch_none(&head, q)) goto switch_back; list_for_each_entry(q, &set->tag_list, tag_set_list) { blk_mq_debugfs_unregister_hctxs(q); blk_mq_sysfs_unregister_hctxs(q); } if (blk_mq_realloc_tag_set_tags(set, nr_hw_queues) < 0) goto reregister; fallback: blk_mq_update_queue_map(set); list_for_each_entry(q, &set->tag_list, tag_set_list) { blk_mq_realloc_hw_ctxs(set, q); blk_mq_update_poll_flag(q); if (q->nr_hw_queues != set->nr_hw_queues) { int i = prev_nr_hw_queues; pr_warn("Increasing nr_hw_queues to %d fails, fallback to %d\n", nr_hw_queues, prev_nr_hw_queues); for (; i < set->nr_hw_queues; i++) __blk_mq_free_map_and_rqs(set, i); set->nr_hw_queues = prev_nr_hw_queues; goto fallback; } blk_mq_map_swqueue(q); } reregister: list_for_each_entry(q, &set->tag_list, tag_set_list) { blk_mq_sysfs_register_hctxs(q); blk_mq_debugfs_register_hctxs(q); } switch_back: list_for_each_entry(q, &set->tag_list, tag_set_list) blk_mq_elv_switch_back(&head, q); list_for_each_entry(q, &set->tag_list, tag_set_list) blk_mq_unfreeze_queue(q); /* Free the excess tags when nr_hw_queues shrink. */ for (i = set->nr_hw_queues; i < prev_nr_hw_queues; i++) __blk_mq_free_map_and_rqs(set, i); } void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues) { mutex_lock(&set->tag_list_lock); __blk_mq_update_nr_hw_queues(set, nr_hw_queues); mutex_unlock(&set->tag_list_lock); } EXPORT_SYMBOL_GPL(blk_mq_update_nr_hw_queues); static int blk_hctx_poll(struct request_queue *q, struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob, unsigned int flags) { long state = get_current_state(); int ret; do { ret = q->mq_ops->poll(hctx, iob); if (ret > 0) { __set_current_state(TASK_RUNNING); return ret; } if (signal_pending_state(state, current)) __set_current_state(TASK_RUNNING); if (task_is_running(current)) return 1; if (ret < 0 || (flags & BLK_POLL_ONESHOT)) break; cpu_relax(); } while (!need_resched()); __set_current_state(TASK_RUNNING); return 0; } int blk_mq_poll(struct request_queue *q, blk_qc_t cookie, struct io_comp_batch *iob, unsigned int flags) { struct blk_mq_hw_ctx *hctx = xa_load(&q->hctx_table, cookie); return blk_hctx_poll(q, hctx, iob, flags); } int blk_rq_poll(struct request *rq, struct io_comp_batch *iob, unsigned int poll_flags) { struct request_queue *q = rq->q; int ret; if (!blk_rq_is_poll(rq)) return 0; if (!percpu_ref_tryget(&q->q_usage_counter)) return 0; ret = blk_hctx_poll(q, rq->mq_hctx, iob, poll_flags); blk_queue_exit(q); return ret; } EXPORT_SYMBOL_GPL(blk_rq_poll); unsigned int blk_mq_rq_cpu(struct request *rq) { return rq->mq_ctx->cpu; } EXPORT_SYMBOL(blk_mq_rq_cpu); void blk_mq_cancel_work_sync(struct request_queue *q) { struct blk_mq_hw_ctx *hctx; unsigned long i; cancel_delayed_work_sync(&q->requeue_work); queue_for_each_hw_ctx(q, hctx, i) cancel_delayed_work_sync(&hctx->run_work); } static int __init blk_mq_init(void) { int i; for_each_possible_cpu(i) init_llist_head(&per_cpu(blk_cpu_done, i)); for_each_possible_cpu(i) INIT_CSD(&per_cpu(blk_cpu_csd, i), __blk_mq_complete_request_remote, NULL); open_softirq(BLOCK_SOFTIRQ, blk_done_softirq); cpuhp_setup_state_nocalls(CPUHP_BLOCK_SOFTIRQ_DEAD, "block/softirq:dead", NULL, blk_softirq_cpu_dead); cpuhp_setup_state_multi(CPUHP_BLK_MQ_DEAD, "block/mq:dead", NULL, blk_mq_hctx_notify_dead); cpuhp_setup_state_multi(CPUHP_AP_BLK_MQ_ONLINE, "block/mq:online", blk_mq_hctx_notify_online, blk_mq_hctx_notify_offline); return 0; } subsys_initcall(blk_mq_init); |
6 3 3 3 2 1 2 2 2 2 2 2 2 1 1 4 3 1 6 3 1 1 1 8 3 2 1 3 2 5 3 1 1 5 3 1 1 1 1 1 16 10 1 3 2 4 2 13 8 4 1 3 2 2 2 2 4 2 2 2 2 2 2 10 34 5 2 2 1 10 22 2 4 3 1 16 8 1 1 1 5 4 1 1 4 8 7 1 1 1 10 7 1 1 1 9 7 1 1 1 14 9 2 1 1 1 2 1 1 1 1 1 1 1 2 1 1 19 7 5 1 5 1 11 8 1 1 1 11 9 1 1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 | /* * Copyright (c) 2017 Mellanox Technologies. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the names of the copyright holders nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include <linux/module.h> #include <linux/pid.h> #include <linux/pid_namespace.h> #include <linux/mutex.h> #include <net/netlink.h> #include <rdma/rdma_cm.h> #include <rdma/rdma_netlink.h> #include "core_priv.h" #include "cma_priv.h" #include "restrack.h" #include "uverbs.h" /* * This determines whether a non-privileged user is allowed to specify a * controlled QKEY or not, when true non-privileged user is allowed to specify * a controlled QKEY. */ static bool privileged_qkey; typedef int (*res_fill_func_t)(struct sk_buff*, bool, struct rdma_restrack_entry*, uint32_t); /* * Sort array elements by the netlink attribute name */ static const struct nla_policy nldev_policy[RDMA_NLDEV_ATTR_MAX] = { [RDMA_NLDEV_ATTR_CHARDEV] = { .type = NLA_U64 }, [RDMA_NLDEV_ATTR_CHARDEV_ABI] = { .type = NLA_U64 }, [RDMA_NLDEV_ATTR_CHARDEV_NAME] = { .type = NLA_NUL_STRING, .len = RDMA_NLDEV_ATTR_EMPTY_STRING }, [RDMA_NLDEV_ATTR_CHARDEV_TYPE] = { .type = NLA_NUL_STRING, .len = RDMA_NLDEV_ATTR_CHARDEV_TYPE_SIZE }, [RDMA_NLDEV_ATTR_DEV_DIM] = { .type = NLA_U8 }, [RDMA_NLDEV_ATTR_DEV_INDEX] = { .type = NLA_U32 }, [RDMA_NLDEV_ATTR_DEV_NAME] = { .type = NLA_NUL_STRING, .len = IB_DEVICE_NAME_MAX }, [RDMA_NLDEV_ATTR_DEV_NODE_TYPE] = { .type = NLA_U8 }, [RDMA_NLDEV_ATTR_DEV_PROTOCOL] = { .type = NLA_NUL_STRING, .len = RDMA_NLDEV_ATTR_EMPTY_STRING }, [RDMA_NLDEV_ATTR_DRIVER] = { .type = NLA_NESTED }, [RDMA_NLDEV_ATTR_DRIVER_ENTRY] = { .type = NLA_NESTED }, [RDMA_NLDEV_ATTR_DRIVER_PRINT_TYPE] = { .type = NLA_U8 }, [RDMA_NLDEV_ATTR_DRIVER_STRING] = { .type = NLA_NUL_STRING, .len = RDMA_NLDEV_ATTR_EMPTY_STRING }, [RDMA_NLDEV_ATTR_DRIVER_S32] = { .type = NLA_S32 }, [RDMA_NLDEV_ATTR_DRIVER_S64] = { .type = NLA_S64 }, [RDMA_NLDEV_ATTR_DRIVER_U32] = { .type = NLA_U32 }, [RDMA_NLDEV_ATTR_DRIVER_U64] = { .type = NLA_U64 }, [RDMA_NLDEV_ATTR_FW_VERSION] = { .type = NLA_NUL_STRING, .len = RDMA_NLDEV_ATTR_EMPTY_STRING }, [RDMA_NLDEV_ATTR_LID] = { .type = NLA_U32 }, [RDMA_NLDEV_ATTR_LINK_TYPE] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ }, [RDMA_NLDEV_ATTR_LMC] = { .type = NLA_U8 }, [RDMA_NLDEV_ATTR_NDEV_INDEX] = { .type = NLA_U32 }, [RDMA_NLDEV_ATTR_NDEV_NAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ }, [RDMA_NLDEV_ATTR_NODE_GUID] = { .type = NLA_U64 }, [RDMA_NLDEV_ATTR_PORT_INDEX] = { .type = NLA_U32 }, [RDMA_NLDEV_ATTR_PORT_PHYS_STATE] = { .type = NLA_U8 }, [RDMA_NLDEV_ATTR_PORT_STATE] = { .type = NLA_U8 }, [RDMA_NLDEV_ATTR_RES_CM_ID] = { .type = NLA_NESTED }, [RDMA_NLDEV_ATTR_RES_CM_IDN] = { .type = NLA_U32 }, [RDMA_NLDEV_ATTR_RES_CM_ID_ENTRY] = { .type = NLA_NESTED }, [RDMA_NLDEV_ATTR_RES_CQ] = { .type = NLA_NESTED }, [RDMA_NLDEV_ATTR_RES_CQE] = { .type = NLA_U32 }, [RDMA_NLDEV_ATTR_RES_CQN] = { .type = NLA_U32 }, [RDMA_NLDEV_ATTR_RES_CQ_ENTRY] = { .type = NLA_NESTED }, [RDMA_NLDEV_ATTR_RES_CTX] = { .type = NLA_NESTED }, [RDMA_NLDEV_ATTR_RES_CTXN] = { .type = NLA_U32 }, [RDMA_NLDEV_ATTR_RES_CTX_ENTRY] = { .type = NLA_NESTED }, [RDMA_NLDEV_ATTR_RES_DST_ADDR] = { .len = sizeof(struct __kernel_sockaddr_storage) }, [RDMA_NLDEV_ATTR_RES_IOVA] = { .type = NLA_U64 }, [RDMA_NLDEV_ATTR_RES_KERN_NAME] = { .type = NLA_NUL_STRING, .len = RDMA_NLDEV_ATTR_EMPTY_STRING }, [RDMA_NLDEV_ATTR_RES_LKEY] = { .type = NLA_U32 }, [RDMA_NLDEV_ATTR_RES_LOCAL_DMA_LKEY] = { .type = NLA_U32 }, [RDMA_NLDEV_ATTR_RES_LQPN] = { .type = NLA_U32 }, [RDMA_NLDEV_ATTR_RES_MR] = { .type = NLA_NESTED }, [RDMA_NLDEV_ATTR_RES_MRLEN] = { .type = NLA_U64 }, [RDMA_NLDEV_ATTR_RES_MRN] = { .type = NLA_U32 }, [RDMA_NLDEV_ATTR_RES_MR_ENTRY] = { .type = NLA_NESTED }, [RDMA_NLDEV_ATTR_RES_PATH_MIG_STATE] = { .type = NLA_U8 }, [RDMA_NLDEV_ATTR_RES_PD] = { .type = NLA_NESTED }, [RDMA_NLDEV_ATTR_RES_PDN] = { .type = NLA_U32 }, [RDMA_NLDEV_ATTR_RES_PD_ENTRY] = { .type = NLA_NESTED }, [RDMA_NLDEV_ATTR_RES_PID] = { .type = NLA_U32 }, [RDMA_NLDEV_ATTR_RES_POLL_CTX] = { .type = NLA_U8 }, [RDMA_NLDEV_ATTR_RES_PS] = { .type = NLA_U32 }, [RDMA_NLDEV_ATTR_RES_QP] = { .type = NLA_NESTED }, [RDMA_NLDEV_ATTR_RES_QP_ENTRY] = { .type = NLA_NESTED }, [RDMA_NLDEV_ATTR_RES_RAW] = { .type = NLA_BINARY }, [RDMA_NLDEV_ATTR_RES_RKEY] = { .type = NLA_U32 }, [RDMA_NLDEV_ATTR_RES_RQPN] = { .type = NLA_U32 }, [RDMA_NLDEV_ATTR_RES_RQ_PSN] = { .type = NLA_U32 }, [RDMA_NLDEV_ATTR_RES_SQ_PSN] = { .type = NLA_U32 }, [RDMA_NLDEV_ATTR_RES_SRC_ADDR] = { .len = sizeof(struct __kernel_sockaddr_storage) }, [RDMA_NLDEV_ATTR_RES_STATE] = { .type = NLA_U8 }, [RDMA_NLDEV_ATTR_RES_SUMMARY] = { .type = NLA_NESTED }, [RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY] = { .type = NLA_NESTED }, [RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY_CURR]= { .type = NLA_U64 }, [RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY_NAME]= { .type = NLA_NUL_STRING, .len = RDMA_NLDEV_ATTR_EMPTY_STRING }, [RDMA_NLDEV_ATTR_RES_TYPE] = { .type = NLA_U8 }, [RDMA_NLDEV_ATTR_RES_UNSAFE_GLOBAL_RKEY]= { .type = NLA_U32 }, [RDMA_NLDEV_ATTR_RES_USECNT] = { .type = NLA_U64 }, [RDMA_NLDEV_ATTR_RES_SRQ] = { .type = NLA_NESTED }, [RDMA_NLDEV_ATTR_RES_SRQN] = { .type = NLA_U32 }, [RDMA_NLDEV_ATTR_RES_SRQ_ENTRY] = { .type = NLA_NESTED }, [RDMA_NLDEV_ATTR_MIN_RANGE] = { .type = NLA_U32 }, [RDMA_NLDEV_ATTR_MAX_RANGE] = { .type = NLA_U32 }, [RDMA_NLDEV_ATTR_SM_LID] = { .type = NLA_U32 }, [RDMA_NLDEV_ATTR_SUBNET_PREFIX] = { .type = NLA_U64 }, [RDMA_NLDEV_ATTR_STAT_AUTO_MODE_MASK] = { .type = NLA_U32 }, [RDMA_NLDEV_ATTR_STAT_MODE] = { .type = NLA_U32 }, [RDMA_NLDEV_ATTR_STAT_RES] = { .type = NLA_U32 }, [RDMA_NLDEV_ATTR_STAT_COUNTER] = { .type = NLA_NESTED }, [RDMA_NLDEV_ATTR_STAT_COUNTER_ENTRY] = { .type = NLA_NESTED }, [RDMA_NLDEV_ATTR_STAT_COUNTER_ID] = { .type = NLA_U32 }, [RDMA_NLDEV_ATTR_STAT_HWCOUNTERS] = { .type = NLA_NESTED }, [RDMA_NLDEV_ATTR_STAT_HWCOUNTER_ENTRY] = { .type = NLA_NESTED }, [RDMA_NLDEV_ATTR_STAT_HWCOUNTER_ENTRY_NAME] = { .type = NLA_NUL_STRING }, [RDMA_NLDEV_ATTR_STAT_HWCOUNTER_ENTRY_VALUE] = { .type = NLA_U64 }, [RDMA_NLDEV_ATTR_SYS_IMAGE_GUID] = { .type = NLA_U64 }, [RDMA_NLDEV_ATTR_UVERBS_DRIVER_ID] = { .type = NLA_U32 }, [RDMA_NLDEV_NET_NS_FD] = { .type = NLA_U32 }, [RDMA_NLDEV_SYS_ATTR_NETNS_MODE] = { .type = NLA_U8 }, [RDMA_NLDEV_SYS_ATTR_COPY_ON_FORK] = { .type = NLA_U8 }, [RDMA_NLDEV_ATTR_STAT_HWCOUNTER_INDEX] = { .type = NLA_U32 }, [RDMA_NLDEV_ATTR_STAT_HWCOUNTER_DYNAMIC] = { .type = NLA_U8 }, [RDMA_NLDEV_SYS_ATTR_PRIVILEGED_QKEY_MODE] = { .type = NLA_U8 }, }; static int put_driver_name_print_type(struct sk_buff *msg, const char *name, enum rdma_nldev_print_type print_type) { if (nla_put_string(msg, RDMA_NLDEV_ATTR_DRIVER_STRING, name)) return -EMSGSIZE; if (print_type != RDMA_NLDEV_PRINT_TYPE_UNSPEC && nla_put_u8(msg, RDMA_NLDEV_ATTR_DRIVER_PRINT_TYPE, print_type)) return -EMSGSIZE; return 0; } static int _rdma_nl_put_driver_u32(struct sk_buff *msg, const char *name, enum rdma_nldev_print_type print_type, u32 value) { if (put_driver_name_print_type(msg, name, print_type)) return -EMSGSIZE; if (nla_put_u32(msg, RDMA_NLDEV_ATTR_DRIVER_U32, value)) return -EMSGSIZE; return 0; } static int _rdma_nl_put_driver_u64(struct sk_buff *msg, const char *name, enum rdma_nldev_print_type print_type, u64 value) { if (put_driver_name_print_type(msg, name, print_type)) return -EMSGSIZE; if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_DRIVER_U64, value, RDMA_NLDEV_ATTR_PAD)) return -EMSGSIZE; return 0; } int rdma_nl_put_driver_string(struct sk_buff *msg, const char *name, const char *str) { if (put_driver_name_print_type(msg, name, RDMA_NLDEV_PRINT_TYPE_UNSPEC)) return -EMSGSIZE; if (nla_put_string(msg, RDMA_NLDEV_ATTR_DRIVER_STRING, str)) return -EMSGSIZE; return 0; } EXPORT_SYMBOL(rdma_nl_put_driver_string); int rdma_nl_put_driver_u32(struct sk_buff *msg, const char *name, u32 value) { return _rdma_nl_put_driver_u32(msg, name, RDMA_NLDEV_PRINT_TYPE_UNSPEC, value); } EXPORT_SYMBOL(rdma_nl_put_driver_u32); int rdma_nl_put_driver_u32_hex(struct sk_buff *msg, const char *name, u32 value) { return _rdma_nl_put_driver_u32(msg, name, RDMA_NLDEV_PRINT_TYPE_HEX, value); } EXPORT_SYMBOL(rdma_nl_put_driver_u32_hex); int rdma_nl_put_driver_u64(struct sk_buff *msg, const char *name, u64 value) { return _rdma_nl_put_driver_u64(msg, name, RDMA_NLDEV_PRINT_TYPE_UNSPEC, value); } EXPORT_SYMBOL(rdma_nl_put_driver_u64); int rdma_nl_put_driver_u64_hex(struct sk_buff *msg, const char *name, u64 value) { return _rdma_nl_put_driver_u64(msg, name, RDMA_NLDEV_PRINT_TYPE_HEX, value); } EXPORT_SYMBOL(rdma_nl_put_driver_u64_hex); bool rdma_nl_get_privileged_qkey(void) { return privileged_qkey || capable(CAP_NET_RAW); } EXPORT_SYMBOL(rdma_nl_get_privileged_qkey); static int fill_nldev_handle(struct sk_buff *msg, struct ib_device *device) { if (nla_put_u32(msg, RDMA_NLDEV_ATTR_DEV_INDEX, device->index)) return -EMSGSIZE; if (nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_NAME, dev_name(&device->dev))) return -EMSGSIZE; return 0; } static int fill_dev_info(struct sk_buff *msg, struct ib_device *device) { char fw[IB_FW_VERSION_NAME_MAX]; int ret = 0; u32 port; if (fill_nldev_handle(msg, device)) return -EMSGSIZE; if (nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, rdma_end_port(device))) return -EMSGSIZE; BUILD_BUG_ON(sizeof(device->attrs.device_cap_flags) != sizeof(u64)); if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_CAP_FLAGS, device->attrs.device_cap_flags, RDMA_NLDEV_ATTR_PAD)) return -EMSGSIZE; ib_get_device_fw_str(device, fw); /* Device without FW has strlen(fw) = 0 */ if (strlen(fw) && nla_put_string(msg, RDMA_NLDEV_ATTR_FW_VERSION, fw)) return -EMSGSIZE; if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_NODE_GUID, be64_to_cpu(device->node_guid), RDMA_NLDEV_ATTR_PAD)) return -EMSGSIZE; if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_SYS_IMAGE_GUID, be64_to_cpu(device->attrs.sys_image_guid), RDMA_NLDEV_ATTR_PAD)) return -EMSGSIZE; if (nla_put_u8(msg, RDMA_NLDEV_ATTR_DEV_NODE_TYPE, device->node_type)) return -EMSGSIZE; if (nla_put_u8(msg, RDMA_NLDEV_ATTR_DEV_DIM, device->use_cq_dim)) return -EMSGSIZE; /* * Link type is determined on first port and mlx4 device * which can potentially have two different link type for the same * IB device is considered as better to be avoided in the future, */ port = rdma_start_port(device); if (rdma_cap_opa_mad(device, port)) ret = nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_PROTOCOL, "opa"); else if (rdma_protocol_ib(device, port)) ret = nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_PROTOCOL, "ib"); else if (rdma_protocol_iwarp(device, port)) ret = nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_PROTOCOL, "iw"); else if (rdma_protocol_roce(device, port)) ret = nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_PROTOCOL, "roce"); else if (rdma_protocol_usnic(device, port)) ret = nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_PROTOCOL, "usnic"); return ret; } static int fill_port_info(struct sk_buff *msg, struct ib_device *device, u32 port, const struct net *net) { struct net_device *netdev = NULL; struct ib_port_attr attr; int ret; u64 cap_flags = 0; if (fill_nldev_handle(msg, device)) return -EMSGSIZE; if (nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, port)) return -EMSGSIZE; ret = ib_query_port(device, port, &attr); if (ret) return ret; if (rdma_protocol_ib(device, port)) { BUILD_BUG_ON((sizeof(attr.port_cap_flags) + sizeof(attr.port_cap_flags2)) > sizeof(u64)); cap_flags = attr.port_cap_flags | ((u64)attr.port_cap_flags2 << 32); if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_CAP_FLAGS, cap_flags, RDMA_NLDEV_ATTR_PAD)) return -EMSGSIZE; if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_SUBNET_PREFIX, attr.subnet_prefix, RDMA_NLDEV_ATTR_PAD)) return -EMSGSIZE; if (nla_put_u32(msg, RDMA_NLDEV_ATTR_LID, attr.lid)) return -EMSGSIZE; if (nla_put_u32(msg, RDMA_NLDEV_ATTR_SM_LID, attr.sm_lid)) return -EMSGSIZE; if (nla_put_u8(msg, RDMA_NLDEV_ATTR_LMC, attr.lmc)) return -EMSGSIZE; } if (nla_put_u8(msg, RDMA_NLDEV_ATTR_PORT_STATE, attr.state)) return -EMSGSIZE; if (nla_put_u8(msg, RDMA_NLDEV_ATTR_PORT_PHYS_STATE, attr.phys_state)) return -EMSGSIZE; netdev = ib_device_get_netdev(device, port); if (netdev && net_eq(dev_net(netdev), net)) { ret = nla_put_u32(msg, RDMA_NLDEV_ATTR_NDEV_INDEX, netdev->ifindex); if (ret) goto out; ret = nla_put_string(msg, RDMA_NLDEV_ATTR_NDEV_NAME, netdev->name); } out: dev_put(netdev); return ret; } static int fill_res_info_entry(struct sk_buff *msg, const char *name, u64 curr) { struct nlattr *entry_attr; entry_attr = nla_nest_start_noflag(msg, RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY); if (!entry_attr) return -EMSGSIZE; if (nla_put_string(msg, RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY_NAME, name)) goto err; if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY_CURR, curr, RDMA_NLDEV_ATTR_PAD)) goto err; nla_nest_end(msg, entry_attr); return 0; err: nla_nest_cancel(msg, entry_attr); return -EMSGSIZE; } static int fill_res_info(struct sk_buff *msg, struct ib_device *device) { static const char * const names[RDMA_RESTRACK_MAX] = { [RDMA_RESTRACK_PD] = "pd", [RDMA_RESTRACK_CQ] = "cq", [RDMA_RESTRACK_QP] = "qp", [RDMA_RESTRACK_CM_ID] = "cm_id", [RDMA_RESTRACK_MR] = "mr", [RDMA_RESTRACK_CTX] = "ctx", [RDMA_RESTRACK_SRQ] = "srq", }; struct nlattr *table_attr; int ret, i, curr; if (fill_nldev_handle(msg, device)) return -EMSGSIZE; table_attr = nla_nest_start_noflag(msg, RDMA_NLDEV_ATTR_RES_SUMMARY); if (!table_attr) return -EMSGSIZE; for (i = 0; i < RDMA_RESTRACK_MAX; i++) { if (!names[i]) continue; curr = rdma_restrack_count(device, i); ret = fill_res_info_entry(msg, names[i], curr); if (ret) goto err; } nla_nest_end(msg, table_attr); return 0; err: nla_nest_cancel(msg, table_attr); return ret; } static int fill_res_name_pid(struct sk_buff *msg, struct rdma_restrack_entry *res) { int err = 0; /* * For user resources, user is should read /proc/PID/comm to get the * name of the task file. */ if (rdma_is_kernel_res(res)) { err = nla_put_string(msg, RDMA_NLDEV_ATTR_RES_KERN_NAME, res->kern_name); } else { pid_t pid; pid = task_pid_vnr(res->task); /* * Task is dead and in zombie state. * There is no need to print PID anymore. */ if (pid) /* * This part is racy, task can be killed and PID will * be zero right here but it is ok, next query won't * return PID. We don't promise real-time reflection * of SW objects. */ err = nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_PID, pid); } return err ? -EMSGSIZE : 0; } static int fill_res_qp_entry_query(struct sk_buff *msg, struct rdma_restrack_entry *res, struct ib_device *dev, struct ib_qp *qp) { struct ib_qp_init_attr qp_init_attr; struct ib_qp_attr qp_attr; int ret; ret = ib_query_qp(qp, &qp_attr, 0, &qp_init_attr); if (ret) return ret; if (qp->qp_type == IB_QPT_RC || qp->qp_type == IB_QPT_UC) { if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_RQPN, qp_attr.dest_qp_num)) goto err; if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_RQ_PSN, qp_attr.rq_psn)) goto err; } if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_SQ_PSN, qp_attr.sq_psn)) goto err; if (qp->qp_type == IB_QPT_RC || qp->qp_type == IB_QPT_UC || qp->qp_type == IB_QPT_XRC_INI || qp->qp_type == IB_QPT_XRC_TGT) { if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_PATH_MIG_STATE, qp_attr.path_mig_state)) goto err; } if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_TYPE, qp->qp_type)) goto err; if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_STATE, qp_attr.qp_state)) goto err; if (dev->ops.fill_res_qp_entry) return dev->ops.fill_res_qp_entry(msg, qp); return 0; err: return -EMSGSIZE; } static int fill_res_qp_entry(struct sk_buff *msg, bool has_cap_net_admin, struct rdma_restrack_entry *res, uint32_t port) { struct ib_qp *qp = container_of(res, struct ib_qp, res); struct ib_device *dev = qp->device; int ret; if (port && port != qp->port) return -EAGAIN; /* In create_qp() port is not set yet */ if (qp->port && nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, qp->port)) return -EMSGSIZE; ret = nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LQPN, qp->qp_num); if (ret) return -EMSGSIZE; if (!rdma_is_kernel_res(res) && nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_PDN, qp->pd->res.id)) return -EMSGSIZE; ret = fill_res_name_pid(msg, res); if (ret) return -EMSGSIZE; return fill_res_qp_entry_query(msg, res, dev, qp); } static int fill_res_qp_raw_entry(struct sk_buff *msg, bool has_cap_net_admin, struct rdma_restrack_entry *res, uint32_t port) { struct ib_qp *qp = container_of(res, struct ib_qp, res); struct ib_device *dev = qp->device; if (port && port != qp->port) return -EAGAIN; if (!dev->ops.fill_res_qp_entry_raw) return -EINVAL; return dev->ops.fill_res_qp_entry_raw(msg, qp); } static int fill_res_cm_id_entry(struct sk_buff *msg, bool has_cap_net_admin, struct rdma_restrack_entry *res, uint32_t port) { struct rdma_id_private *id_priv = container_of(res, struct rdma_id_private, res); struct ib_device *dev = id_priv->id.device; struct rdma_cm_id *cm_id = &id_priv->id; if (port && port != cm_id->port_num) return -EAGAIN; if (cm_id->port_num && nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, cm_id->port_num)) goto err; if (id_priv->qp_num) { if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LQPN, id_priv->qp_num)) goto err; if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_TYPE, cm_id->qp_type)) goto err; } if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_PS, cm_id->ps)) goto err; if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_STATE, id_priv->state)) goto err; if (cm_id->route.addr.src_addr.ss_family && nla_put(msg, RDMA_NLDEV_ATTR_RES_SRC_ADDR, sizeof(cm_id->route.addr.src_addr), &cm_id->route.addr.src_addr)) goto err; if (cm_id->route.addr.dst_addr.ss_family && nla_put(msg, RDMA_NLDEV_ATTR_RES_DST_ADDR, sizeof(cm_id->route.addr.dst_addr), &cm_id->route.addr.dst_addr)) goto err; if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CM_IDN, res->id)) goto err; if (fill_res_name_pid(msg, res)) goto err; if (dev->ops.fill_res_cm_id_entry) return dev->ops.fill_res_cm_id_entry(msg, cm_id); return 0; err: return -EMSGSIZE; } static int fill_res_cq_entry(struct sk_buff *msg, bool has_cap_net_admin, struct rdma_restrack_entry *res, uint32_t port) { struct ib_cq *cq = container_of(res, struct ib_cq, res); struct ib_device *dev = cq->device; if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CQE, cq->cqe)) return -EMSGSIZE; if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_RES_USECNT, atomic_read(&cq->usecnt), RDMA_NLDEV_ATTR_PAD)) return -EMSGSIZE; /* Poll context is only valid for kernel CQs */ if (rdma_is_kernel_res(res) && nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_POLL_CTX, cq->poll_ctx)) return -EMSGSIZE; if (nla_put_u8(msg, RDMA_NLDEV_ATTR_DEV_DIM, (cq->dim != NULL))) return -EMSGSIZE; if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CQN, res->id)) return -EMSGSIZE; if (!rdma_is_kernel_res(res) && nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CTXN, cq->uobject->uevent.uobject.context->res.id)) return -EMSGSIZE; if (fill_res_name_pid(msg, res)) return -EMSGSIZE; return (dev->ops.fill_res_cq_entry) ? dev->ops.fill_res_cq_entry(msg, cq) : 0; } static int fill_res_cq_raw_entry(struct sk_buff *msg, bool has_cap_net_admin, struct rdma_restrack_entry *res, uint32_t port) { struct ib_cq *cq = container_of(res, struct ib_cq, res); struct ib_device *dev = cq->device; if (!dev->ops.fill_res_cq_entry_raw) return -EINVAL; return dev->ops.fill_res_cq_entry_raw(msg, cq); } static int fill_res_mr_entry(struct sk_buff *msg, bool has_cap_net_admin, struct rdma_restrack_entry *res, uint32_t port) { struct ib_mr *mr = container_of(res, struct ib_mr, res); struct ib_device *dev = mr->pd->device; if (has_cap_net_admin) { if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_RKEY, mr->rkey)) return -EMSGSIZE; if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LKEY, mr->lkey)) return -EMSGSIZE; } if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_RES_MRLEN, mr->length, RDMA_NLDEV_ATTR_PAD)) return -EMSGSIZE; if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_MRN, res->id)) return -EMSGSIZE; if (!rdma_is_kernel_res(res) && nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_PDN, mr->pd->res.id)) return -EMSGSIZE; if (fill_res_name_pid(msg, res)) return -EMSGSIZE; return (dev->ops.fill_res_mr_entry) ? dev->ops.fill_res_mr_entry(msg, mr) : 0; } static int fill_res_mr_raw_entry(struct sk_buff *msg, bool has_cap_net_admin, struct rdma_restrack_entry *res, uint32_t port) { struct ib_mr *mr = container_of(res, struct ib_mr, res); struct ib_device *dev = mr->pd->device; if (!dev->ops.fill_res_mr_entry_raw) return -EINVAL; return dev->ops.fill_res_mr_entry_raw(msg, mr); } static int fill_res_pd_entry(struct sk_buff *msg, bool has_cap_net_admin, struct rdma_restrack_entry *res, uint32_t port) { struct ib_pd *pd = container_of(res, struct ib_pd, res); if (has_cap_net_admin) { if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LOCAL_DMA_LKEY, pd->local_dma_lkey)) goto err; if ((pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY) && nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_UNSAFE_GLOBAL_RKEY, pd->unsafe_global_rkey)) goto err; } if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_RES_USECNT, atomic_read(&pd->usecnt), RDMA_NLDEV_ATTR_PAD)) goto err; if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_PDN, res->id)) goto err; if (!rdma_is_kernel_res(res) && nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CTXN, pd->uobject->context->res.id)) goto err; return fill_res_name_pid(msg, res); err: return -EMSGSIZE; } static int fill_res_ctx_entry(struct sk_buff *msg, bool has_cap_net_admin, struct rdma_restrack_entry *res, uint32_t port) { struct ib_ucontext *ctx = container_of(res, struct ib_ucontext, res); if (rdma_is_kernel_res(res)) return 0; if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CTXN, ctx->res.id)) return -EMSGSIZE; return fill_res_name_pid(msg, res); } static int fill_res_range_qp_entry(struct sk_buff *msg, uint32_t min_range, uint32_t max_range) { struct nlattr *entry_attr; if (!min_range) return 0; entry_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_RES_QP_ENTRY); if (!entry_attr) return -EMSGSIZE; if (min_range == max_range) { if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LQPN, min_range)) goto err; } else { if (nla_put_u32(msg, RDMA_NLDEV_ATTR_MIN_RANGE, min_range)) goto err; if (nla_put_u32(msg, RDMA_NLDEV_ATTR_MAX_RANGE, max_range)) goto err; } nla_nest_end(msg, entry_attr); return 0; err: nla_nest_cancel(msg, entry_attr); return -EMSGSIZE; } static int fill_res_srq_qps(struct sk_buff *msg, struct ib_srq *srq) { uint32_t min_range = 0, prev = 0; struct rdma_restrack_entry *res; struct rdma_restrack_root *rt; struct nlattr *table_attr; struct ib_qp *qp = NULL; unsigned long id = 0; table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_RES_QP); if (!table_attr) return -EMSGSIZE; rt = &srq->device->res[RDMA_RESTRACK_QP]; xa_lock(&rt->xa); xa_for_each(&rt->xa, id, res) { if (!rdma_restrack_get(res)) continue; qp = container_of(res, struct ib_qp, res); if (!qp->srq || (qp->srq->res.id != srq->res.id)) { rdma_restrack_put(res); continue; } if (qp->qp_num < prev) /* qp_num should be ascending */ goto err_loop; if (min_range == 0) { min_range = qp->qp_num; } else if (qp->qp_num > (prev + 1)) { if (fill_res_range_qp_entry(msg, min_range, prev)) goto err_loop; min_range = qp->qp_num; } prev = qp->qp_num; rdma_restrack_put(res); } xa_unlock(&rt->xa); if (fill_res_range_qp_entry(msg, min_range, prev)) goto err; nla_nest_end(msg, table_attr); return 0; err_loop: rdma_restrack_put(res); xa_unlock(&rt->xa); err: nla_nest_cancel(msg, table_attr); return -EMSGSIZE; } static int fill_res_srq_entry(struct sk_buff *msg, bool has_cap_net_admin, struct rdma_restrack_entry *res, uint32_t port) { struct ib_srq *srq = container_of(res, struct ib_srq, res); struct ib_device *dev = srq->device; if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_SRQN, srq->res.id)) goto err; if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_TYPE, srq->srq_type)) goto err; if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_PDN, srq->pd->res.id)) goto err; if (ib_srq_has_cq(srq->srq_type)) { if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CQN, srq->ext.cq->res.id)) goto err; } if (fill_res_srq_qps(msg, srq)) goto err; if (fill_res_name_pid(msg, res)) goto err; if (dev->ops.fill_res_srq_entry) return dev->ops.fill_res_srq_entry(msg, srq); return 0; err: return -EMSGSIZE; } static int fill_res_srq_raw_entry(struct sk_buff *msg, bool has_cap_net_admin, struct rdma_restrack_entry *res, uint32_t port) { struct ib_srq *srq = container_of(res, struct ib_srq, res); struct ib_device *dev = srq->device; if (!dev->ops.fill_res_srq_entry_raw) return -EINVAL; return dev->ops.fill_res_srq_entry_raw(msg, srq); } static int fill_stat_counter_mode(struct sk_buff *msg, struct rdma_counter *counter) { struct rdma_counter_mode *m = &counter->mode; if (nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_MODE, m->mode)) return -EMSGSIZE; if (m->mode == RDMA_COUNTER_MODE_AUTO) { if ((m->mask & RDMA_COUNTER_MASK_QP_TYPE) && nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_TYPE, m->param.qp_type)) return -EMSGSIZE; if ((m->mask & RDMA_COUNTER_MASK_PID) && fill_res_name_pid(msg, &counter->res)) return -EMSGSIZE; } return 0; } static int fill_stat_counter_qp_entry(struct sk_buff *msg, u32 qpn) { struct nlattr *entry_attr; entry_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_RES_QP_ENTRY); if (!entry_attr) return -EMSGSIZE; if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LQPN, qpn)) goto err; nla_nest_end(msg, entry_attr); return 0; err: nla_nest_cancel(msg, entry_attr); return -EMSGSIZE; } static int fill_stat_counter_qps(struct sk_buff *msg, struct rdma_counter *counter) { struct rdma_restrack_entry *res; struct rdma_restrack_root *rt; struct nlattr *table_attr; struct ib_qp *qp = NULL; unsigned long id = 0; int ret = 0; table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_RES_QP); if (!table_attr) return -EMSGSIZE; rt = &counter->device->res[RDMA_RESTRACK_QP]; xa_lock(&rt->xa); xa_for_each(&rt->xa, id, res) { qp = container_of(res, struct ib_qp, res); if (!qp->counter || (qp->counter->id != counter->id)) continue; ret = fill_stat_counter_qp_entry(msg, qp->qp_num); if (ret) goto err; } xa_unlock(&rt->xa); nla_nest_end(msg, table_attr); return 0; err: xa_unlock(&rt->xa); nla_nest_cancel(msg, table_attr); return ret; } int rdma_nl_stat_hwcounter_entry(struct sk_buff *msg, const char *name, u64 value) { struct nlattr *entry_attr; entry_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_STAT_HWCOUNTER_ENTRY); if (!entry_attr) return -EMSGSIZE; if (nla_put_string(msg, RDMA_NLDEV_ATTR_STAT_HWCOUNTER_ENTRY_NAME, name)) goto err; if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_STAT_HWCOUNTER_ENTRY_VALUE, value, RDMA_NLDEV_ATTR_PAD)) goto err; nla_nest_end(msg, entry_attr); return 0; err: nla_nest_cancel(msg, entry_attr); return -EMSGSIZE; } EXPORT_SYMBOL(rdma_nl_stat_hwcounter_entry); static int fill_stat_mr_entry(struct sk_buff *msg, bool has_cap_net_admin, struct rdma_restrack_entry *res, uint32_t port) { struct ib_mr *mr = container_of(res, struct ib_mr, res); struct ib_device *dev = mr->pd->device; if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_MRN, res->id)) goto err; if (dev->ops.fill_stat_mr_entry) return dev->ops.fill_stat_mr_entry(msg, mr); return 0; err: return -EMSGSIZE; } static int fill_stat_counter_hwcounters(struct sk_buff *msg, struct rdma_counter *counter) { struct rdma_hw_stats *st = counter->stats; struct nlattr *table_attr; int i; table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_STAT_HWCOUNTERS); if (!table_attr) return -EMSGSIZE; mutex_lock(&st->lock); for (i = 0; i < st->num_counters; i++) { if (test_bit(i, st->is_disabled)) continue; if (rdma_nl_stat_hwcounter_entry(msg, st->descs[i].name, st->value[i])) goto err; } mutex_unlock(&st->lock); nla_nest_end(msg, table_attr); return 0; err: mutex_unlock(&st->lock); nla_nest_cancel(msg, table_attr); return -EMSGSIZE; } static int fill_res_counter_entry(struct sk_buff *msg, bool has_cap_net_admin, struct rdma_restrack_entry *res, uint32_t port) { struct rdma_counter *counter = container_of(res, struct rdma_counter, res); if (port && port != counter->port) return -EAGAIN; /* Dump it even query failed */ rdma_counter_query_stats(counter); if (nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, counter->port) || nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_COUNTER_ID, counter->id) || fill_stat_counter_mode(msg, counter) || fill_stat_counter_qps(msg, counter) || fill_stat_counter_hwcounters(msg, counter)) return -EMSGSIZE; return 0; } static int nldev_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh, struct netlink_ext_ack *extack) { struct nlattr *tb[RDMA_NLDEV_ATTR_MAX]; struct ib_device *device; struct sk_buff *msg; u32 index; int err; err = nlmsg_parse_deprecated(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, nldev_policy, extack); if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX]) return -EINVAL; index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]); device = ib_device_get_by_index(sock_net(skb->sk), index); if (!device) return -EINVAL; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) { err = -ENOMEM; goto err; } nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_GET), 0, 0); if (!nlh) { err = -EMSGSIZE; goto err_free; } err = fill_dev_info(msg, device); if (err) goto err_free; nlmsg_end(msg, nlh); ib_device_put(device); return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid); err_free: nlmsg_free(msg); err: ib_device_put(device); return err; } static int nldev_set_doit(struct sk_buff *skb, struct nlmsghdr *nlh, struct netlink_ext_ack *extack) { struct nlattr *tb[RDMA_NLDEV_ATTR_MAX]; struct ib_device *device; u32 index; int err; err = nlmsg_parse_deprecated(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, nldev_policy, extack); if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX]) return -EINVAL; index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]); device = ib_device_get_by_index(sock_net(skb->sk), index); if (!device) return -EINVAL; if (tb[RDMA_NLDEV_ATTR_DEV_NAME]) { char name[IB_DEVICE_NAME_MAX] = {}; nla_strscpy(name, tb[RDMA_NLDEV_ATTR_DEV_NAME], IB_DEVICE_NAME_MAX); if (strlen(name) == 0) { err = -EINVAL; goto done; } err = ib_device_rename(device, name); goto done; } if (tb[RDMA_NLDEV_NET_NS_FD]) { u32 ns_fd; ns_fd = nla_get_u32(tb[RDMA_NLDEV_NET_NS_FD]); err = ib_device_set_netns_put(skb, device, ns_fd); goto put_done; } if (tb[RDMA_NLDEV_ATTR_DEV_DIM]) { u8 use_dim; use_dim = nla_get_u8(tb[RDMA_NLDEV_ATTR_DEV_DIM]); err = ib_device_set_dim(device, use_dim); goto done; } done: ib_device_put(device); put_done: return err; } static int _nldev_get_dumpit(struct ib_device *device, struct sk_buff *skb, struct netlink_callback *cb, unsigned int idx) { int start = cb->args[0]; struct nlmsghdr *nlh; if (idx < start) return 0; nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_GET), 0, NLM_F_MULTI); if (!nlh || fill_dev_info(skb, device)) { nlmsg_cancel(skb, nlh); goto out; } nlmsg_end(skb, nlh); idx++; out: cb->args[0] = idx; return skb->len; } static int nldev_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb) { /* * There is no need to take lock, because * we are relying on ib_core's locking. */ return ib_enum_all_devs(_nldev_get_dumpit, skb, cb); } static int nldev_port_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh, struct netlink_ext_ack *extack) { struct nlattr *tb[RDMA_NLDEV_ATTR_MAX]; struct ib_device *device; struct sk_buff *msg; u32 index; u32 port; int err; err = nlmsg_parse_deprecated(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, nldev_policy, extack); if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX] || !tb[RDMA_NLDEV_ATTR_PORT_INDEX]) return -EINVAL; index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]); device = ib_device_get_by_index(sock_net(skb->sk), index); if (!device) return -EINVAL; port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]); if (!rdma_is_port_valid(device, port)) { err = -EINVAL; goto err; } msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) { err = -ENOMEM; goto err; } nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_GET), 0, 0); if (!nlh) { err = -EMSGSIZE; goto err_free; } err = fill_port_info(msg, device, port, sock_net(skb->sk)); if (err) goto err_free; nlmsg_end(msg, nlh); ib_device_put(device); return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid); err_free: nlmsg_free(msg); err: ib_device_put(device); return err; } static int nldev_port_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb) { struct nlattr *tb[RDMA_NLDEV_ATTR_MAX]; struct ib_device *device; int start = cb->args[0]; struct nlmsghdr *nlh; u32 idx = 0; u32 ifindex; int err; unsigned int p; err = nlmsg_parse_deprecated(cb->nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, nldev_policy, NULL); if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX]) return -EINVAL; ifindex = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]); device = ib_device_get_by_index(sock_net(skb->sk), ifindex); if (!device) return -EINVAL; rdma_for_each_port (device, p) { /* * The dumpit function returns all information from specific * index. This specific index is taken from the netlink * messages request sent by user and it is available * in cb->args[0]. * * Usually, the user doesn't fill this field and it causes * to return everything. * */ if (idx < start) { idx++; continue; } nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_PORT_GET), 0, NLM_F_MULTI); if (!nlh || fill_port_info(skb, device, p, sock_net(skb->sk))) { nlmsg_cancel(skb, nlh); goto out; } idx++; nlmsg_end(skb, nlh); } out: ib_device_put(device); cb->args[0] = idx; return skb->len; } static int nldev_res_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh, struct netlink_ext_ack *extack) { struct nlattr *tb[RDMA_NLDEV_ATTR_MAX]; struct ib_device *device; struct sk_buff *msg; u32 index; int ret; ret = nlmsg_parse_deprecated(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, nldev_policy, extack); if (ret || !tb[RDMA_NLDEV_ATTR_DEV_INDEX]) return -EINVAL; index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]); device = ib_device_get_by_index(sock_net(skb->sk), index); if (!device) return -EINVAL; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) { ret = -ENOMEM; goto err; } nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_RES_GET), 0, 0); if (!nlh) { ret = -EMSGSIZE; goto err_free; } ret = fill_res_info(msg, device); if (ret) goto err_free; nlmsg_end(msg, nlh); ib_device_put(device); return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid); err_free: nlmsg_free(msg); err: ib_device_put(device); return ret; } static int _nldev_res_get_dumpit(struct ib_device *device, struct sk_buff *skb, struct netlink_callback *cb, unsigned int idx) { int start = cb->args[0]; struct nlmsghdr *nlh; if (idx < start) return 0; nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_RES_GET), 0, NLM_F_MULTI); if (!nlh || fill_res_info(skb, device)) { nlmsg_cancel(skb, nlh); goto out; } nlmsg_end(skb, nlh); idx++; out: cb->args[0] = idx; return skb->len; } static int nldev_res_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb) { return ib_enum_all_devs(_nldev_res_get_dumpit, skb, cb); } struct nldev_fill_res_entry { enum rdma_nldev_attr nldev_attr; u8 flags; u32 entry; u32 id; }; enum nldev_res_flags { NLDEV_PER_DEV = 1 << 0, }; static const struct nldev_fill_res_entry fill_entries[RDMA_RESTRACK_MAX] = { [RDMA_RESTRACK_QP] = { .nldev_attr = RDMA_NLDEV_ATTR_RES_QP, .entry = RDMA_NLDEV_ATTR_RES_QP_ENTRY, .id = RDMA_NLDEV_ATTR_RES_LQPN, }, [RDMA_RESTRACK_CM_ID] = { .nldev_attr = RDMA_NLDEV_ATTR_RES_CM_ID, .entry = RDMA_NLDEV_ATTR_RES_CM_ID_ENTRY, .id = RDMA_NLDEV_ATTR_RES_CM_IDN, }, [RDMA_RESTRACK_CQ] = { .nldev_attr = RDMA_NLDEV_ATTR_RES_CQ, .flags = NLDEV_PER_DEV, .entry = RDMA_NLDEV_ATTR_RES_CQ_ENTRY, .id = RDMA_NLDEV_ATTR_RES_CQN, }, [RDMA_RESTRACK_MR] = { .nldev_attr = RDMA_NLDEV_ATTR_RES_MR, .flags = NLDEV_PER_DEV, .entry = RDMA_NLDEV_ATTR_RES_MR_ENTRY, .id = RDMA_NLDEV_ATTR_RES_MRN, }, [RDMA_RESTRACK_PD] = { .nldev_attr = RDMA_NLDEV_ATTR_RES_PD, .flags = NLDEV_PER_DEV, .entry = RDMA_NLDEV_ATTR_RES_PD_ENTRY, .id = RDMA_NLDEV_ATTR_RES_PDN, }, [RDMA_RESTRACK_COUNTER] = { .nldev_attr = RDMA_NLDEV_ATTR_STAT_COUNTER, .entry = RDMA_NLDEV_ATTR_STAT_COUNTER_ENTRY, .id = RDMA_NLDEV_ATTR_STAT_COUNTER_ID, }, [RDMA_RESTRACK_CTX] = { .nldev_attr = RDMA_NLDEV_ATTR_RES_CTX, .flags = NLDEV_PER_DEV, .entry = RDMA_NLDEV_ATTR_RES_CTX_ENTRY, .id = RDMA_NLDEV_ATTR_RES_CTXN, }, [RDMA_RESTRACK_SRQ] = { .nldev_attr = RDMA_NLDEV_ATTR_RES_SRQ, .flags = NLDEV_PER_DEV, .entry = RDMA_NLDEV_ATTR_RES_SRQ_ENTRY, .id = RDMA_NLDEV_ATTR_RES_SRQN, }, }; static int res_get_common_doit(struct sk_buff *skb, struct nlmsghdr *nlh, struct netlink_ext_ack *extack, enum rdma_restrack_type res_type, res_fill_func_t fill_func) { const struct nldev_fill_res_entry *fe = &fill_entries[res_type]; struct nlattr *tb[RDMA_NLDEV_ATTR_MAX]; struct rdma_restrack_entry *res; struct ib_device *device; u32 index, id, port = 0; bool has_cap_net_admin; struct sk_buff *msg; int ret; ret = nlmsg_parse_deprecated(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, nldev_policy, extack); if (ret || !tb[RDMA_NLDEV_ATTR_DEV_INDEX] || !fe->id || !tb[fe->id]) return -EINVAL; index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]); device = ib_device_get_by_index(sock_net(skb->sk), index); if (!device) return -EINVAL; if (tb[RDMA_NLDEV_ATTR_PORT_INDEX]) { port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]); if (!rdma_is_port_valid(device, port)) { ret = -EINVAL; goto err; } } if ((port && fe->flags & NLDEV_PER_DEV) || (!port && ~fe->flags & NLDEV_PER_DEV)) { ret = -EINVAL; goto err; } id = nla_get_u32(tb[fe->id]); res = rdma_restrack_get_byid(device, res_type, id); if (IS_ERR(res)) { ret = PTR_ERR(res); goto err; } msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) { ret = -ENOMEM; goto err_get; } nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NL_GET_OP(nlh->nlmsg_type)), 0, 0); if (!nlh || fill_nldev_handle(msg, device)) { ret = -EMSGSIZE; goto err_free; } has_cap_net_admin = netlink_capable(skb, CAP_NET_ADMIN); ret = fill_func(msg, has_cap_net_admin, res, port); if (ret) goto err_free; rdma_restrack_put(res); nlmsg_end(msg, nlh); ib_device_put(device); return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid); err_free: nlmsg_free(msg); err_get: rdma_restrack_put(res); err: ib_device_put(device); return ret; } static int res_get_common_dumpit(struct sk_buff *skb, struct netlink_callback *cb, enum rdma_restrack_type res_type, res_fill_func_t fill_func) { const struct nldev_fill_res_entry *fe = &fill_entries[res_type]; struct nlattr *tb[RDMA_NLDEV_ATTR_MAX]; struct rdma_restrack_entry *res; struct rdma_restrack_root *rt; int err, ret = 0, idx = 0; struct nlattr *table_attr; struct nlattr *entry_attr; struct ib_device *device; int start = cb->args[0]; bool has_cap_net_admin; struct nlmsghdr *nlh; unsigned long id; u32 index, port = 0; bool filled = false; err = nlmsg_parse_deprecated(cb->nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, nldev_policy, NULL); /* * Right now, we are expecting the device index to get res information, * but it is possible to extend this code to return all devices in * one shot by checking the existence of RDMA_NLDEV_ATTR_DEV_INDEX. * if it doesn't exist, we will iterate over all devices. * * But it is not needed for now. */ if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX]) return -EINVAL; index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]); device = ib_device_get_by_index(sock_net(skb->sk), index); if (!device) return -EINVAL; /* * If no PORT_INDEX is supplied, we will return all QPs from that device */ if (tb[RDMA_NLDEV_ATTR_PORT_INDEX]) { port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]); if (!rdma_is_port_valid(device, port)) { ret = -EINVAL; goto err_index; } } nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NL_GET_OP(cb->nlh->nlmsg_type)), 0, NLM_F_MULTI); if (!nlh || fill_nldev_handle(skb, device)) { ret = -EMSGSIZE; goto err; } table_attr = nla_nest_start_noflag(skb, fe->nldev_attr); if (!table_attr) { ret = -EMSGSIZE; goto err; } has_cap_net_admin = netlink_capable(cb->skb, CAP_NET_ADMIN); rt = &device->res[res_type]; xa_lock(&rt->xa); /* * FIXME: if the skip ahead is something common this loop should * use xas_for_each & xas_pause to optimize, we can have a lot of * objects. */ xa_for_each(&rt->xa, id, res) { if (idx < start || !rdma_restrack_get(res)) goto next; xa_unlock(&rt->xa); filled = true; entry_attr = nla_nest_start_noflag(skb, fe->entry); if (!entry_attr) { ret = -EMSGSIZE; rdma_restrack_put(res); goto msg_full; } ret = fill_func(skb, has_cap_net_admin, res, port); rdma_restrack_put(res); if (ret) { nla_nest_cancel(skb, entry_attr); if (ret == -EMSGSIZE) goto msg_full; if (ret == -EAGAIN) goto again; goto res_err; } nla_nest_end(skb, entry_attr); again: xa_lock(&rt->xa); next: idx++; } xa_unlock(&rt->xa); msg_full: nla_nest_end(skb, table_attr); nlmsg_end(skb, nlh); cb->args[0] = idx; /* * No more entries to fill, cancel the message and * return 0 to mark end of dumpit. */ if (!filled) goto err; ib_device_put(device); return skb->len; res_err: nla_nest_cancel(skb, table_attr); err: nlmsg_cancel(skb, nlh); err_index: ib_device_put(device); return ret; } #define RES_GET_FUNCS(name, type) \ static int nldev_res_get_##name##_dumpit(struct sk_buff *skb, \ struct netlink_callback *cb) \ { \ return res_get_common_dumpit(skb, cb, type, \ fill_res_##name##_entry); \ } \ static int nldev_res_get_##name##_doit(struct sk_buff *skb, \ struct nlmsghdr *nlh, \ struct netlink_ext_ack *extack) \ { \ return res_get_common_doit(skb, nlh, extack, type, \ fill_res_##name##_entry); \ } RES_GET_FUNCS(qp, RDMA_RESTRACK_QP); RES_GET_FUNCS(qp_raw, RDMA_RESTRACK_QP); RES_GET_FUNCS(cm_id, RDMA_RESTRACK_CM_ID); RES_GET_FUNCS(cq, RDMA_RESTRACK_CQ); RES_GET_FUNCS(cq_raw, RDMA_RESTRACK_CQ); RES_GET_FUNCS(pd, RDMA_RESTRACK_PD); RES_GET_FUNCS(mr, RDMA_RESTRACK_MR); RES_GET_FUNCS(mr_raw, RDMA_RESTRACK_MR); RES_GET_FUNCS(counter, RDMA_RESTRACK_COUNTER); RES_GET_FUNCS(ctx, RDMA_RESTRACK_CTX); RES_GET_FUNCS(srq, RDMA_RESTRACK_SRQ); RES_GET_FUNCS(srq_raw, RDMA_RESTRACK_SRQ); static LIST_HEAD(link_ops); static DECLARE_RWSEM(link_ops_rwsem); static const struct rdma_link_ops *link_ops_get(const char *type) { const struct rdma_link_ops *ops; list_for_each_entry(ops, &link_ops, list) { if (!strcmp(ops->type, type)) goto out; } ops = NULL; out: return ops; } void rdma_link_register(struct rdma_link_ops *ops) { down_write(&link_ops_rwsem); if (WARN_ON_ONCE(link_ops_get(ops->type))) goto out; list_add(&ops->list, &link_ops); out: up_write(&link_ops_rwsem); } EXPORT_SYMBOL(rdma_link_register); void rdma_link_unregister(struct rdma_link_ops *ops) { down_write(&link_ops_rwsem); list_del(&ops->list); up_write(&link_ops_rwsem); } EXPORT_SYMBOL(rdma_link_unregister); static int nldev_newlink(struct sk_buff *skb, struct nlmsghdr *nlh, struct netlink_ext_ack *extack) { struct nlattr *tb[RDMA_NLDEV_ATTR_MAX]; char ibdev_name[IB_DEVICE_NAME_MAX]; const struct rdma_link_ops *ops; char ndev_name[IFNAMSIZ]; struct net_device *ndev; char type[IFNAMSIZ]; int err; err = nlmsg_parse_deprecated(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, nldev_policy, extack); if (err || !tb[RDMA_NLDEV_ATTR_DEV_NAME] || !tb[RDMA_NLDEV_ATTR_LINK_TYPE] || !tb[RDMA_NLDEV_ATTR_NDEV_NAME]) return -EINVAL; nla_strscpy(ibdev_name, tb[RDMA_NLDEV_ATTR_DEV_NAME], sizeof(ibdev_name)); if (strchr(ibdev_name, '%') || strlen(ibdev_name) == 0) return -EINVAL; nla_strscpy(type, tb[RDMA_NLDEV_ATTR_LINK_TYPE], sizeof(type)); nla_strscpy(ndev_name, tb[RDMA_NLDEV_ATTR_NDEV_NAME], sizeof(ndev_name)); ndev = dev_get_by_name(sock_net(skb->sk), ndev_name); if (!ndev) return -ENODEV; down_read(&link_ops_rwsem); ops = link_ops_get(type); #ifdef CONFIG_MODULES if (!ops) { up_read(&link_ops_rwsem); request_module("rdma-link-%s", type); down_read(&link_ops_rwsem); ops = link_ops_get(type); } #endif err = ops ? ops->newlink(ibdev_name, ndev) : -EINVAL; up_read(&link_ops_rwsem); dev_put(ndev); return err; } static int nldev_dellink(struct sk_buff *skb, struct nlmsghdr *nlh, struct netlink_ext_ack *extack) { struct nlattr *tb[RDMA_NLDEV_ATTR_MAX]; struct ib_device *device; u32 index; int err; err = nlmsg_parse_deprecated(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, nldev_policy, extack); if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX]) return -EINVAL; index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]); device = ib_device_get_by_index(sock_net(skb->sk), index); if (!device) return -EINVAL; if (!(device->attrs.kernel_cap_flags & IBK_ALLOW_USER_UNREG)) { ib_device_put(device); return -EINVAL; } ib_unregister_device_and_put(device); return 0; } static int nldev_get_chardev(struct sk_buff *skb, struct nlmsghdr *nlh, struct netlink_ext_ack *extack) { struct nlattr *tb[RDMA_NLDEV_ATTR_MAX]; char client_name[RDMA_NLDEV_ATTR_CHARDEV_TYPE_SIZE]; struct ib_client_nl_info data = {}; struct ib_device *ibdev = NULL; struct sk_buff *msg; u32 index; int err; err = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, nldev_policy, extack); if (err || !tb[RDMA_NLDEV_ATTR_CHARDEV_TYPE]) return -EINVAL; nla_strscpy(client_name, tb[RDMA_NLDEV_ATTR_CHARDEV_TYPE], sizeof(client_name)); if (tb[RDMA_NLDEV_ATTR_DEV_INDEX]) { index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]); ibdev = ib_device_get_by_index(sock_net(skb->sk), index); if (!ibdev) return -EINVAL; if (tb[RDMA_NLDEV_ATTR_PORT_INDEX]) { data.port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]); if (!rdma_is_port_valid(ibdev, data.port)) { err = -EINVAL; goto out_put; } } else { data.port = -1; } } else if (tb[RDMA_NLDEV_ATTR_PORT_INDEX]) { return -EINVAL; } msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) { err = -ENOMEM; goto out_put; } nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_GET_CHARDEV), 0, 0); if (!nlh) { err = -EMSGSIZE; goto out_nlmsg; } data.nl_msg = msg; err = ib_get_client_nl_info(ibdev, client_name, &data); if (err) goto out_nlmsg; err = nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_CHARDEV, huge_encode_dev(data.cdev->devt), RDMA_NLDEV_ATTR_PAD); if (err) goto out_data; err = nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_CHARDEV_ABI, data.abi, RDMA_NLDEV_ATTR_PAD); if (err) goto out_data; if (nla_put_string(msg, RDMA_NLDEV_ATTR_CHARDEV_NAME, dev_name(data.cdev))) { err = -EMSGSIZE; goto out_data; } nlmsg_end(msg, nlh); put_device(data.cdev); if (ibdev) ib_device_put(ibdev); return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid); out_data: put_device(data.cdev); out_nlmsg: nlmsg_free(msg); out_put: if (ibdev) ib_device_put(ibdev); return err; } static int nldev_sys_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh, struct netlink_ext_ack *extack) { struct nlattr *tb[RDMA_NLDEV_ATTR_MAX]; struct sk_buff *msg; int err; err = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, nldev_policy, extack); if (err) return err; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) return -ENOMEM; nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_SYS_GET), 0, 0); if (!nlh) { nlmsg_free(msg); return -EMSGSIZE; } err = nla_put_u8(msg, RDMA_NLDEV_SYS_ATTR_NETNS_MODE, (u8)ib_devices_shared_netns); if (err) { nlmsg_free(msg); return err; } err = nla_put_u8(msg, RDMA_NLDEV_SYS_ATTR_PRIVILEGED_QKEY_MODE, (u8)privileged_qkey); if (err) { nlmsg_free(msg); return err; } /* * Copy-on-fork is supported. * See commits: * 70e806e4e645 ("mm: Do early cow for pinned pages during fork() for ptes") * 4eae4efa2c29 ("hugetlb: do early cow when page pinned on src mm") * for more details. Don't backport this without them. * * Return value ignored on purpose, assume copy-on-fork is not * supported in case of failure. */ nla_put_u8(msg, RDMA_NLDEV_SYS_ATTR_COPY_ON_FORK, 1); nlmsg_end(msg, nlh); return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid); } static int nldev_set_sys_set_netns_doit(struct nlattr *tb[]) { u8 enable; int err; enable = nla_get_u8(tb[RDMA_NLDEV_SYS_ATTR_NETNS_MODE]); /* Only 0 and 1 are supported */ if (enable > 1) return -EINVAL; err = rdma_compatdev_set(enable); return err; } static int nldev_set_sys_set_pqkey_doit(struct nlattr *tb[]) { u8 enable; enable = nla_get_u8(tb[RDMA_NLDEV_SYS_ATTR_PRIVILEGED_QKEY_MODE]); /* Only 0 and 1 are supported */ if (enable > 1) return -EINVAL; privileged_qkey = enable; return 0; } static int nldev_set_sys_set_doit(struct sk_buff *skb, struct nlmsghdr *nlh, struct netlink_ext_ack *extack) { struct nlattr *tb[RDMA_NLDEV_ATTR_MAX]; int err; err = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, nldev_policy, extack); if (err) return -EINVAL; if (tb[RDMA_NLDEV_SYS_ATTR_NETNS_MODE]) return nldev_set_sys_set_netns_doit(tb); if (tb[RDMA_NLDEV_SYS_ATTR_PRIVILEGED_QKEY_MODE]) return nldev_set_sys_set_pqkey_doit(tb); return -EINVAL; } static int nldev_stat_set_mode_doit(struct sk_buff *msg, struct netlink_ext_ack *extack, struct nlattr *tb[], struct ib_device *device, u32 port) { u32 mode, mask = 0, qpn, cntn = 0; int ret; /* Currently only counter for QP is supported */ if (!tb[RDMA_NLDEV_ATTR_STAT_RES] || nla_get_u32(tb[RDMA_NLDEV_ATTR_STAT_RES]) != RDMA_NLDEV_ATTR_RES_QP) return -EINVAL; mode = nla_get_u32(tb[RDMA_NLDEV_ATTR_STAT_MODE]); if (mode == RDMA_COUNTER_MODE_AUTO) { if (tb[RDMA_NLDEV_ATTR_STAT_AUTO_MODE_MASK]) mask = nla_get_u32( tb[RDMA_NLDEV_ATTR_STAT_AUTO_MODE_MASK]); return rdma_counter_set_auto_mode(device, port, mask, extack); } if (!tb[RDMA_NLDEV_ATTR_RES_LQPN]) return -EINVAL; qpn = nla_get_u32(tb[RDMA_NLDEV_ATTR_RES_LQPN]); if (tb[RDMA_NLDEV_ATTR_STAT_COUNTER_ID]) { cntn = nla_get_u32(tb[RDMA_NLDEV_ATTR_STAT_COUNTER_ID]); ret = rdma_counter_bind_qpn(device, port, qpn, cntn); if (ret) return ret; } else { ret = rdma_counter_bind_qpn_alloc(device, port, qpn, &cntn); if (ret) return ret; } if (nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_COUNTER_ID, cntn) || nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LQPN, qpn)) { ret = -EMSGSIZE; goto err_fill; } return 0; err_fill: rdma_counter_unbind_qpn(device, port, qpn, cntn); return ret; } static int nldev_stat_set_counter_dynamic_doit(struct nlattr *tb[], struct ib_device *device, u32 port) { struct rdma_hw_stats *stats; struct nlattr *entry_attr; unsigned long *target; int rem, i, ret = 0; u32 index; stats = ib_get_hw_stats_port(device, port); if (!stats) return -EINVAL; target = kcalloc(BITS_TO_LONGS(stats->num_counters), sizeof(*stats->is_disabled), GFP_KERNEL); if (!target) return -ENOMEM; nla_for_each_nested(entry_attr, tb[RDMA_NLDEV_ATTR_STAT_HWCOUNTERS], rem) { index = nla_get_u32(entry_attr); if ((index >= stats->num_counters) || !(stats->descs[index].flags & IB_STAT_FLAG_OPTIONAL)) { ret = -EINVAL; goto out; } set_bit(index, target); } for (i = 0; i < stats->num_counters; i++) { if (!(stats->descs[i].flags & IB_STAT_FLAG_OPTIONAL)) continue; ret = rdma_counter_modify(device, port, i, test_bit(i, target)); if (ret) goto out; } out: kfree(target); return ret; } static int nldev_stat_set_doit(struct sk_buff *skb, struct nlmsghdr *nlh, struct netlink_ext_ack *extack) { struct nlattr *tb[RDMA_NLDEV_ATTR_MAX]; struct ib_device *device; struct sk_buff *msg; u32 index, port; int ret; ret = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, nldev_policy, extack); if (ret || !tb[RDMA_NLDEV_ATTR_DEV_INDEX] || !tb[RDMA_NLDEV_ATTR_PORT_INDEX]) return -EINVAL; index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]); device = ib_device_get_by_index(sock_net(skb->sk), index); if (!device) return -EINVAL; port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]); if (!rdma_is_port_valid(device, port)) { ret = -EINVAL; goto err_put_device; } if (!tb[RDMA_NLDEV_ATTR_STAT_MODE] && !tb[RDMA_NLDEV_ATTR_STAT_HWCOUNTERS]) { ret = -EINVAL; goto err_put_device; } msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) { ret = -ENOMEM; goto err_put_device; } nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_STAT_SET), 0, 0); if (!nlh || fill_nldev_handle(msg, device) || nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, port)) { ret = -EMSGSIZE; goto err_free_msg; } if (tb[RDMA_NLDEV_ATTR_STAT_MODE]) { ret = nldev_stat_set_mode_doit(msg, extack, tb, device, port); if (ret) goto err_free_msg; } if (tb[RDMA_NLDEV_ATTR_STAT_HWCOUNTERS]) { ret = nldev_stat_set_counter_dynamic_doit(tb, device, port); if (ret) goto err_free_msg; } nlmsg_end(msg, nlh); ib_device_put(device); return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid); err_free_msg: nlmsg_free(msg); err_put_device: ib_device_put(device); return ret; } static int nldev_stat_del_doit(struct sk_buff *skb, struct nlmsghdr *nlh, struct netlink_ext_ack *extack) { struct nlattr *tb[RDMA_NLDEV_ATTR_MAX]; struct ib_device *device; struct sk_buff *msg; u32 index, port, qpn, cntn; int ret; ret = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, nldev_policy, extack); if (ret || !tb[RDMA_NLDEV_ATTR_STAT_RES] || !tb[RDMA_NLDEV_ATTR_DEV_INDEX] || !tb[RDMA_NLDEV_ATTR_PORT_INDEX] || !tb[RDMA_NLDEV_ATTR_STAT_COUNTER_ID] || !tb[RDMA_NLDEV_ATTR_RES_LQPN]) return -EINVAL; if (nla_get_u32(tb[RDMA_NLDEV_ATTR_STAT_RES]) != RDMA_NLDEV_ATTR_RES_QP) return -EINVAL; index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]); device = ib_device_get_by_index(sock_net(skb->sk), index); if (!device) return -EINVAL; port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]); if (!rdma_is_port_valid(device, port)) { ret = -EINVAL; goto err; } msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) { ret = -ENOMEM; goto err; } nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_STAT_SET), 0, 0); if (!nlh) { ret = -EMSGSIZE; goto err_fill; } cntn = nla_get_u32(tb[RDMA_NLDEV_ATTR_STAT_COUNTER_ID]); qpn = nla_get_u32(tb[RDMA_NLDEV_ATTR_RES_LQPN]); if (fill_nldev_handle(msg, device) || nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, port) || nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_COUNTER_ID, cntn) || nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LQPN, qpn)) { ret = -EMSGSIZE; goto err_fill; } ret = rdma_counter_unbind_qpn(device, port, qpn, cntn); if (ret) goto err_fill; nlmsg_end(msg, nlh); ib_device_put(device); return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid); err_fill: nlmsg_free(msg); err: ib_device_put(device); return ret; } static int stat_get_doit_default_counter(struct sk_buff *skb, struct nlmsghdr *nlh, struct netlink_ext_ack *extack, struct nlattr *tb[]) { struct rdma_hw_stats *stats; struct nlattr *table_attr; struct ib_device *device; int ret, num_cnts, i; struct sk_buff *msg; u32 index, port; u64 v; if (!tb[RDMA_NLDEV_ATTR_DEV_INDEX] || !tb[RDMA_NLDEV_ATTR_PORT_INDEX]) return -EINVAL; index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]); device = ib_device_get_by_index(sock_net(skb->sk), index); if (!device) return -EINVAL; if (!device->ops.alloc_hw_port_stats || !device->ops.get_hw_stats) { ret = -EINVAL; goto err; } port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]); stats = ib_get_hw_stats_port(device, port); if (!stats) { ret = -EINVAL; goto err; } msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) { ret = -ENOMEM; goto err; } nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_STAT_GET), 0, 0); if (!nlh || fill_nldev_handle(msg, device) || nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, port)) { ret = -EMSGSIZE; goto err_msg; } mutex_lock(&stats->lock); num_cnts = device->ops.get_hw_stats(device, stats, port, 0); if (num_cnts < 0) { ret = -EINVAL; goto err_stats; } table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_STAT_HWCOUNTERS); if (!table_attr) { ret = -EMSGSIZE; goto err_stats; } for (i = 0; i < num_cnts; i++) { if (test_bit(i, stats->is_disabled)) continue; v = stats->value[i] + rdma_counter_get_hwstat_value(device, port, i); if (rdma_nl_stat_hwcounter_entry(msg, stats->descs[i].name, v)) { ret = -EMSGSIZE; goto err_table; } } nla_nest_end(msg, table_attr); mutex_unlock(&stats->lock); nlmsg_end(msg, nlh); ib_device_put(device); return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid); err_table: nla_nest_cancel(msg, table_attr); err_stats: mutex_unlock(&stats->lock); err_msg: nlmsg_free(msg); err: ib_device_put(device); return ret; } static int stat_get_doit_qp(struct sk_buff *skb, struct nlmsghdr *nlh, struct netlink_ext_ack *extack, struct nlattr *tb[]) { static enum rdma_nl_counter_mode mode; static enum rdma_nl_counter_mask mask; struct ib_device *device; struct sk_buff *msg; u32 index, port; int ret; if (tb[RDMA_NLDEV_ATTR_STAT_COUNTER_ID]) return nldev_res_get_counter_doit(skb, nlh, extack); if (!tb[RDMA_NLDEV_ATTR_STAT_MODE] || !tb[RDMA_NLDEV_ATTR_DEV_INDEX] || !tb[RDMA_NLDEV_ATTR_PORT_INDEX]) return -EINVAL; index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]); device = ib_device_get_by_index(sock_net(skb->sk), index); if (!device) return -EINVAL; port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]); if (!rdma_is_port_valid(device, port)) { ret = -EINVAL; goto err; } msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) { ret = -ENOMEM; goto err; } nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_STAT_GET), 0, 0); if (!nlh) { ret = -EMSGSIZE; goto err_msg; } ret = rdma_counter_get_mode(device, port, &mode, &mask); if (ret) goto err_msg; if (fill_nldev_handle(msg, device) || nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, port) || nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_MODE, mode)) { ret = -EMSGSIZE; goto err_msg; } if ((mode == RDMA_COUNTER_MODE_AUTO) && nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_AUTO_MODE_MASK, mask)) { ret = -EMSGSIZE; goto err_msg; } nlmsg_end(msg, nlh); ib_device_put(device); return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid); err_msg: nlmsg_free(msg); err: ib_device_put(device); return ret; } static int nldev_stat_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh, struct netlink_ext_ack *extack) { struct nlattr *tb[RDMA_NLDEV_ATTR_MAX]; int ret; ret = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, nldev_policy, extack); if (ret) return -EINVAL; if (!tb[RDMA_NLDEV_ATTR_STAT_RES]) return stat_get_doit_default_counter(skb, nlh, extack, tb); switch (nla_get_u32(tb[RDMA_NLDEV_ATTR_STAT_RES])) { case RDMA_NLDEV_ATTR_RES_QP: ret = stat_get_doit_qp(skb, nlh, extack, tb); break; case RDMA_NLDEV_ATTR_RES_MR: ret = res_get_common_doit(skb, nlh, extack, RDMA_RESTRACK_MR, fill_stat_mr_entry); break; default: ret = -EINVAL; break; } return ret; } static int nldev_stat_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb) { struct nlattr *tb[RDMA_NLDEV_ATTR_MAX]; int ret; ret = nlmsg_parse(cb->nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, nldev_policy, NULL); if (ret || !tb[RDMA_NLDEV_ATTR_STAT_RES]) return -EINVAL; switch (nla_get_u32(tb[RDMA_NLDEV_ATTR_STAT_RES])) { case RDMA_NLDEV_ATTR_RES_QP: ret = nldev_res_get_counter_dumpit(skb, cb); break; case RDMA_NLDEV_ATTR_RES_MR: ret = res_get_common_dumpit(skb, cb, RDMA_RESTRACK_MR, fill_stat_mr_entry); break; default: ret = -EINVAL; break; } return ret; } static int nldev_stat_get_counter_status_doit(struct sk_buff *skb, struct nlmsghdr *nlh, struct netlink_ext_ack *extack) { struct nlattr *tb[RDMA_NLDEV_ATTR_MAX], *table, *entry; struct rdma_hw_stats *stats; struct ib_device *device; struct sk_buff *msg; u32 devid, port; int ret, i; ret = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, nldev_policy, extack); if (ret || !tb[RDMA_NLDEV_ATTR_DEV_INDEX] || !tb[RDMA_NLDEV_ATTR_PORT_INDEX]) return -EINVAL; devid = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]); device = ib_device_get_by_index(sock_net(skb->sk), devid); if (!device) return -EINVAL; port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]); if (!rdma_is_port_valid(device, port)) { ret = -EINVAL; goto err; } stats = ib_get_hw_stats_port(device, port); if (!stats) { ret = -EINVAL; goto err; } msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) { ret = -ENOMEM; goto err; } nlh = nlmsg_put( msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_STAT_GET_STATUS), 0, 0); ret = -EMSGSIZE; if (!nlh || fill_nldev_handle(msg, device) || nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, port)) goto err_msg; table = nla_nest_start(msg, RDMA_NLDEV_ATTR_STAT_HWCOUNTERS); if (!table) goto err_msg; mutex_lock(&stats->lock); for (i = 0; i < stats->num_counters; i++) { entry = nla_nest_start(msg, RDMA_NLDEV_ATTR_STAT_HWCOUNTER_ENTRY); if (!entry) goto err_msg_table; if (nla_put_string(msg, RDMA_NLDEV_ATTR_STAT_HWCOUNTER_ENTRY_NAME, stats->descs[i].name) || nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_HWCOUNTER_INDEX, i)) goto err_msg_entry; if ((stats->descs[i].flags & IB_STAT_FLAG_OPTIONAL) && (nla_put_u8(msg, RDMA_NLDEV_ATTR_STAT_HWCOUNTER_DYNAMIC, !test_bit(i, stats->is_disabled)))) goto err_msg_entry; nla_nest_end(msg, entry); } mutex_unlock(&stats->lock); nla_nest_end(msg, table); nlmsg_end(msg, nlh); ib_device_put(device); return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid); err_msg_entry: nla_nest_cancel(msg, entry); err_msg_table: mutex_unlock(&stats->lock); nla_nest_cancel(msg, table); err_msg: nlmsg_free(msg); err: ib_device_put(device); return ret; } static const struct rdma_nl_cbs nldev_cb_table[RDMA_NLDEV_NUM_OPS] = { [RDMA_NLDEV_CMD_GET] = { .doit = nldev_get_doit, .dump = nldev_get_dumpit, }, [RDMA_NLDEV_CMD_GET_CHARDEV] = { .doit = nldev_get_chardev, }, [RDMA_NLDEV_CMD_SET] = { .doit = nldev_set_doit, .flags = RDMA_NL_ADMIN_PERM, }, [RDMA_NLDEV_CMD_NEWLINK] = { .doit = nldev_newlink, .flags = RDMA_NL_ADMIN_PERM, }, [RDMA_NLDEV_CMD_DELLINK] = { .doit = nldev_dellink, .flags = RDMA_NL_ADMIN_PERM, }, [RDMA_NLDEV_CMD_PORT_GET] = { .doit = nldev_port_get_doit, .dump = nldev_port_get_dumpit, }, [RDMA_NLDEV_CMD_RES_GET] = { .doit = nldev_res_get_doit, .dump = nldev_res_get_dumpit, }, [RDMA_NLDEV_CMD_RES_QP_GET] = { .doit = nldev_res_get_qp_doit, .dump = nldev_res_get_qp_dumpit, }, [RDMA_NLDEV_CMD_RES_CM_ID_GET] = { .doit = nldev_res_get_cm_id_doit, .dump = nldev_res_get_cm_id_dumpit, }, [RDMA_NLDEV_CMD_RES_CQ_GET] = { .doit = nldev_res_get_cq_doit, .dump = nldev_res_get_cq_dumpit, }, [RDMA_NLDEV_CMD_RES_MR_GET] = { .doit = nldev_res_get_mr_doit, .dump = nldev_res_get_mr_dumpit, }, [RDMA_NLDEV_CMD_RES_PD_GET] = { .doit = nldev_res_get_pd_doit, .dump = nldev_res_get_pd_dumpit, }, [RDMA_NLDEV_CMD_RES_CTX_GET] = { .doit = nldev_res_get_ctx_doit, .dump = nldev_res_get_ctx_dumpit, }, [RDMA_NLDEV_CMD_RES_SRQ_GET] = { .doit = nldev_res_get_srq_doit, .dump = nldev_res_get_srq_dumpit, }, [RDMA_NLDEV_CMD_SYS_GET] = { .doit = nldev_sys_get_doit, }, [RDMA_NLDEV_CMD_SYS_SET] = { .doit = nldev_set_sys_set_doit, .flags = RDMA_NL_ADMIN_PERM, }, [RDMA_NLDEV_CMD_STAT_SET] = { .doit = nldev_stat_set_doit, .flags = RDMA_NL_ADMIN_PERM, }, [RDMA_NLDEV_CMD_STAT_GET] = { .doit = nldev_stat_get_doit, .dump = nldev_stat_get_dumpit, }, [RDMA_NLDEV_CMD_STAT_DEL] = { .doit = nldev_stat_del_doit, .flags = RDMA_NL_ADMIN_PERM, }, [RDMA_NLDEV_CMD_RES_QP_GET_RAW] = { .doit = nldev_res_get_qp_raw_doit, .dump = nldev_res_get_qp_raw_dumpit, .flags = RDMA_NL_ADMIN_PERM, }, [RDMA_NLDEV_CMD_RES_CQ_GET_RAW] = { .doit = nldev_res_get_cq_raw_doit, .dump = nldev_res_get_cq_raw_dumpit, .flags = RDMA_NL_ADMIN_PERM, }, [RDMA_NLDEV_CMD_RES_MR_GET_RAW] = { .doit = nldev_res_get_mr_raw_doit, .dump = nldev_res_get_mr_raw_dumpit, .flags = RDMA_NL_ADMIN_PERM, }, [RDMA_NLDEV_CMD_RES_SRQ_GET_RAW] = { .doit = nldev_res_get_srq_raw_doit, .dump = nldev_res_get_srq_raw_dumpit, .flags = RDMA_NL_ADMIN_PERM, }, [RDMA_NLDEV_CMD_STAT_GET_STATUS] = { .doit = nldev_stat_get_counter_status_doit, }, }; void __init nldev_init(void) { rdma_nl_register(RDMA_NL_NLDEV, nldev_cb_table); } void nldev_exit(void) { rdma_nl_unregister(RDMA_NL_NLDEV); } MODULE_ALIAS_RDMA_NETLINK(RDMA_NL_NLDEV, 5); |
45 45 19 19 8 8 2 2 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 | // SPDX-License-Identifier: GPL-2.0-or-later /* * ALSA timer back-end using hrtimer * Copyright (C) 2008 Takashi Iwai */ #include <linux/init.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/hrtimer.h> #include <sound/core.h> #include <sound/timer.h> MODULE_AUTHOR("Takashi Iwai <tiwai@suse.de>"); MODULE_DESCRIPTION("ALSA hrtimer backend"); MODULE_LICENSE("GPL"); MODULE_ALIAS("snd-timer-" __stringify(SNDRV_TIMER_GLOBAL_HRTIMER)); #define NANO_SEC 1000000000UL /* 10^9 in sec */ static unsigned int resolution; struct snd_hrtimer { struct snd_timer *timer; struct hrtimer hrt; bool in_callback; }; static enum hrtimer_restart snd_hrtimer_callback(struct hrtimer *hrt) { struct snd_hrtimer *stime = container_of(hrt, struct snd_hrtimer, hrt); struct snd_timer *t = stime->timer; ktime_t delta; unsigned long ticks; enum hrtimer_restart ret = HRTIMER_NORESTART; spin_lock(&t->lock); if (!t->running) goto out; /* fast path */ stime->in_callback = true; ticks = t->sticks; spin_unlock(&t->lock); /* calculate the drift */ delta = ktime_sub(hrt->base->get_time(), hrtimer_get_expires(hrt)); if (delta > 0) ticks += ktime_divns(delta, ticks * resolution); snd_timer_interrupt(stime->timer, ticks); spin_lock(&t->lock); if (t->running) { hrtimer_add_expires_ns(hrt, t->sticks * resolution); ret = HRTIMER_RESTART; } stime->in_callback = false; out: spin_unlock(&t->lock); return ret; } static int snd_hrtimer_open(struct snd_timer *t) { struct snd_hrtimer *stime; stime = kzalloc(sizeof(*stime), GFP_KERNEL); if (!stime) return -ENOMEM; hrtimer_init(&stime->hrt, CLOCK_MONOTONIC, HRTIMER_MODE_REL); stime->timer = t; stime->hrt.function = snd_hrtimer_callback; t->private_data = stime; return 0; } static int snd_hrtimer_close(struct snd_timer *t) { struct snd_hrtimer *stime = t->private_data; if (stime) { spin_lock_irq(&t->lock); t->running = 0; /* just to be sure */ stime->in_callback = 1; /* skip start/stop */ spin_unlock_irq(&t->lock); hrtimer_cancel(&stime->hrt); kfree(stime); t->private_data = NULL; } return 0; } static int snd_hrtimer_start(struct snd_timer *t) { struct snd_hrtimer *stime = t->private_data; if (stime->in_callback) return 0; hrtimer_start(&stime->hrt, ns_to_ktime(t->sticks * resolution), HRTIMER_MODE_REL); return 0; } static int snd_hrtimer_stop(struct snd_timer *t) { struct snd_hrtimer *stime = t->private_data; if (stime->in_callback) return 0; hrtimer_try_to_cancel(&stime->hrt); return 0; } static const struct snd_timer_hardware hrtimer_hw __initconst = { .flags = SNDRV_TIMER_HW_AUTO | SNDRV_TIMER_HW_WORK, .open = snd_hrtimer_open, .close = snd_hrtimer_close, .start = snd_hrtimer_start, .stop = snd_hrtimer_stop, }; /* * entry functions */ static struct snd_timer *mytimer; static int __init snd_hrtimer_init(void) { struct snd_timer *timer; int err; resolution = hrtimer_resolution; /* Create a new timer and set up the fields */ err = snd_timer_global_new("hrtimer", SNDRV_TIMER_GLOBAL_HRTIMER, &timer); if (err < 0) return err; timer->module = THIS_MODULE; strcpy(timer->name, "HR timer"); timer->hw = hrtimer_hw; timer->hw.resolution = resolution; timer->hw.ticks = NANO_SEC / resolution; timer->max_instances = 100; /* lower the limit */ err = snd_timer_global_register(timer); if (err < 0) { snd_timer_global_free(timer); return err; } mytimer = timer; /* remember this */ return 0; } static void __exit snd_hrtimer_exit(void) { if (mytimer) { snd_timer_global_free(mytimer); mytimer = NULL; } } module_init(snd_hrtimer_init); module_exit(snd_hrtimer_exit); |
1141 477 2825 1142 3 1141 1 1142 1141 1 1141 926 2241 511 2240 2106 2106 1827 1826 1827 927 1274 932 2104 933 934 932 1517 1516 1283 477 840 387 1283 1515 1516 477 477 477 477 258 108 108 1 108 107 228 228 228 1 1 1 228 348 198 323 193 172 129 207 206 206 207 66 179 357 14 358 1626 1623 80 7 7 3 1 4 1053 3 168 916 227 227 227 224 14 227 227 45 227 227 45 230 230 227 230 6 230 230 230 230 135 135 134 135 134 102 1 135 135 135 144 1169 1171 1069 144 107 89 27 27 2193 82 60 46 46 199 199 199 67 65 2 128 186 7 53 158 67 67 128 20 170 171 6 185 186 186 150 182 183 7 7 5969 5975 5966 5969 5969 5934 72 2043 2043 39 147 1924 2040 1986 2014 2020 39 39 2166 62 4 99 99 80 19 772 1684 45 2198 2167 2167 102 102 283 283 273 273 15 283 283 283 283 276 121 163 174 254 107 226 276 163 163 163 24 26 26 24 24 24 156 138 152 156 156 121 276 275 276 283 104 377 287 19 273 117 119 78 79 40 116 115 5 117 115 10 10 2 95 18 82 2 3 33 90 20 83 95 94 95 4 1 1 1 89 18 18 92 11 22 8 89 19 80 92 4 89 92 4 89 75 22 92 119 119 119 43 76 478 477 478 58 250 40 58 227 250 100 250 185 248 40 227 284 283 284 22 272 2 188 280 71 271 22 272 72 222 272 40 40 19 1 1 1 1 1 17 17 17 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 | /* * mm/rmap.c - physical to virtual reverse mappings * * Copyright 2001, Rik van Riel <riel@conectiva.com.br> * Released under the General Public License (GPL). * * Simple, low overhead reverse mapping scheme. * Please try to keep this thing as modular as possible. * * Provides methods for unmapping each kind of mapped page: * the anon methods track anonymous pages, and * the file methods track pages belonging to an inode. * * Original design by Rik van Riel <riel@conectiva.com.br> 2001 * File methods by Dave McCracken <dmccr@us.ibm.com> 2003, 2004 * Anonymous methods by Andrea Arcangeli <andrea@suse.de> 2004 * Contributions by Hugh Dickins 2003, 2004 */ /* * Lock ordering in mm: * * inode->i_rwsem (while writing or truncating, not reading or faulting) * mm->mmap_lock * mapping->invalidate_lock (in filemap_fault) * page->flags PG_locked (lock_page) * hugetlbfs_i_mmap_rwsem_key (in huge_pmd_share, see hugetlbfs below) * vma_start_write * mapping->i_mmap_rwsem * anon_vma->rwsem * mm->page_table_lock or pte_lock * swap_lock (in swap_duplicate, swap_info_get) * mmlist_lock (in mmput, drain_mmlist and others) * mapping->private_lock (in block_dirty_folio) * folio_lock_memcg move_lock (in block_dirty_folio) * i_pages lock (widely used) * lruvec->lru_lock (in folio_lruvec_lock_irq) * inode->i_lock (in set_page_dirty's __mark_inode_dirty) * bdi.wb->list_lock (in set_page_dirty's __mark_inode_dirty) * sb_lock (within inode_lock in fs/fs-writeback.c) * i_pages lock (widely used, in set_page_dirty, * in arch-dependent flush_dcache_mmap_lock, * within bdi.wb->list_lock in __sync_single_inode) * * anon_vma->rwsem,mapping->i_mmap_rwsem (memory_failure, collect_procs_anon) * ->tasklist_lock * pte map lock * * hugetlbfs PageHuge() take locks in this order: * hugetlb_fault_mutex (hugetlbfs specific page fault mutex) * vma_lock (hugetlb specific lock for pmd_sharing) * mapping->i_mmap_rwsem (also used for hugetlb pmd sharing) * page->flags PG_locked (lock_page) */ #include <linux/mm.h> #include <linux/sched/mm.h> #include <linux/sched/task.h> #include <linux/pagemap.h> #include <linux/swap.h> #include <linux/swapops.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/ksm.h> #include <linux/rmap.h> #include <linux/rcupdate.h> #include <linux/export.h> #include <linux/memcontrol.h> #include <linux/mmu_notifier.h> #include <linux/migrate.h> #include <linux/hugetlb.h> #include <linux/huge_mm.h> #include <linux/backing-dev.h> #include <linux/page_idle.h> #include <linux/memremap.h> #include <linux/userfaultfd_k.h> #include <linux/mm_inline.h> #include <asm/tlbflush.h> #define CREATE_TRACE_POINTS #include <trace/events/tlb.h> #include <trace/events/migrate.h> #include "internal.h" static struct kmem_cache *anon_vma_cachep; static struct kmem_cache *anon_vma_chain_cachep; static inline struct anon_vma *anon_vma_alloc(void) { struct anon_vma *anon_vma; anon_vma = kmem_cache_alloc(anon_vma_cachep, GFP_KERNEL); if (anon_vma) { atomic_set(&anon_vma->refcount, 1); anon_vma->num_children = 0; anon_vma->num_active_vmas = 0; anon_vma->parent = anon_vma; /* * Initialise the anon_vma root to point to itself. If called * from fork, the root will be reset to the parents anon_vma. */ anon_vma->root = anon_vma; } return anon_vma; } static inline void anon_vma_free(struct anon_vma *anon_vma) { VM_BUG_ON(atomic_read(&anon_vma->refcount)); /* * Synchronize against folio_lock_anon_vma_read() such that * we can safely hold the lock without the anon_vma getting * freed. * * Relies on the full mb implied by the atomic_dec_and_test() from * put_anon_vma() against the acquire barrier implied by * down_read_trylock() from folio_lock_anon_vma_read(). This orders: * * folio_lock_anon_vma_read() VS put_anon_vma() * down_read_trylock() atomic_dec_and_test() * LOCK MB * atomic_read() rwsem_is_locked() * * LOCK should suffice since the actual taking of the lock must * happen _before_ what follows. */ might_sleep(); if (rwsem_is_locked(&anon_vma->root->rwsem)) { anon_vma_lock_write(anon_vma); anon_vma_unlock_write(anon_vma); } kmem_cache_free(anon_vma_cachep, anon_vma); } static inline struct anon_vma_chain *anon_vma_chain_alloc(gfp_t gfp) { return kmem_cache_alloc(anon_vma_chain_cachep, gfp); } static void anon_vma_chain_free(struct anon_vma_chain *anon_vma_chain) { kmem_cache_free(anon_vma_chain_cachep, anon_vma_chain); } static void anon_vma_chain_link(struct vm_area_struct *vma, struct anon_vma_chain *avc, struct anon_vma *anon_vma) { avc->vma = vma; avc->anon_vma = anon_vma; list_add(&avc->same_vma, &vma->anon_vma_chain); anon_vma_interval_tree_insert(avc, &anon_vma->rb_root); } /** * __anon_vma_prepare - attach an anon_vma to a memory region * @vma: the memory region in question * * This makes sure the memory mapping described by 'vma' has * an 'anon_vma' attached to it, so that we can associate the * anonymous pages mapped into it with that anon_vma. * * The common case will be that we already have one, which * is handled inline by anon_vma_prepare(). But if * not we either need to find an adjacent mapping that we * can re-use the anon_vma from (very common when the only * reason for splitting a vma has been mprotect()), or we * allocate a new one. * * Anon-vma allocations are very subtle, because we may have * optimistically looked up an anon_vma in folio_lock_anon_vma_read() * and that may actually touch the rwsem even in the newly * allocated vma (it depends on RCU to make sure that the * anon_vma isn't actually destroyed). * * As a result, we need to do proper anon_vma locking even * for the new allocation. At the same time, we do not want * to do any locking for the common case of already having * an anon_vma. * * This must be called with the mmap_lock held for reading. */ int __anon_vma_prepare(struct vm_area_struct *vma) { struct mm_struct *mm = vma->vm_mm; struct anon_vma *anon_vma, *allocated; struct anon_vma_chain *avc; might_sleep(); avc = anon_vma_chain_alloc(GFP_KERNEL); if (!avc) goto out_enomem; anon_vma = find_mergeable_anon_vma(vma); allocated = NULL; if (!anon_vma) { anon_vma = anon_vma_alloc(); if (unlikely(!anon_vma)) goto out_enomem_free_avc; anon_vma->num_children++; /* self-parent link for new root */ allocated = anon_vma; } anon_vma_lock_write(anon_vma); /* page_table_lock to protect against threads */ spin_lock(&mm->page_table_lock); if (likely(!vma->anon_vma)) { vma->anon_vma = anon_vma; anon_vma_chain_link(vma, avc, anon_vma); anon_vma->num_active_vmas++; allocated = NULL; avc = NULL; } spin_unlock(&mm->page_table_lock); anon_vma_unlock_write(anon_vma); if (unlikely(allocated)) put_anon_vma(allocated); if (unlikely(avc)) anon_vma_chain_free(avc); return 0; out_enomem_free_avc: anon_vma_chain_free(avc); out_enomem: return -ENOMEM; } /* * This is a useful helper function for locking the anon_vma root as * we traverse the vma->anon_vma_chain, looping over anon_vma's that * have the same vma. * * Such anon_vma's should have the same root, so you'd expect to see * just a single mutex_lock for the whole traversal. */ static inline struct anon_vma *lock_anon_vma_root(struct anon_vma *root, struct anon_vma *anon_vma) { struct anon_vma *new_root = anon_vma->root; if (new_root != root) { if (WARN_ON_ONCE(root)) up_write(&root->rwsem); root = new_root; down_write(&root->rwsem); } return root; } static inline void unlock_anon_vma_root(struct anon_vma *root) { if (root) up_write(&root->rwsem); } /* * Attach the anon_vmas from src to dst. * Returns 0 on success, -ENOMEM on failure. * * anon_vma_clone() is called by vma_expand(), vma_merge(), __split_vma(), * copy_vma() and anon_vma_fork(). The first four want an exact copy of src, * while the last one, anon_vma_fork(), may try to reuse an existing anon_vma to * prevent endless growth of anon_vma. Since dst->anon_vma is set to NULL before * call, we can identify this case by checking (!dst->anon_vma && * src->anon_vma). * * If (!dst->anon_vma && src->anon_vma) is true, this function tries to find * and reuse existing anon_vma which has no vmas and only one child anon_vma. * This prevents degradation of anon_vma hierarchy to endless linear chain in * case of constantly forking task. On the other hand, an anon_vma with more * than one child isn't reused even if there was no alive vma, thus rmap * walker has a good chance of avoiding scanning the whole hierarchy when it * searches where page is mapped. */ int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src) { struct anon_vma_chain *avc, *pavc; struct anon_vma *root = NULL; list_for_each_entry_reverse(pavc, &src->anon_vma_chain, same_vma) { struct anon_vma *anon_vma; avc = anon_vma_chain_alloc(GFP_NOWAIT | __GFP_NOWARN); if (unlikely(!avc)) { unlock_anon_vma_root(root); root = NULL; avc = anon_vma_chain_alloc(GFP_KERNEL); if (!avc) goto enomem_failure; } anon_vma = pavc->anon_vma; root = lock_anon_vma_root(root, anon_vma); anon_vma_chain_link(dst, avc, anon_vma); /* * Reuse existing anon_vma if it has no vma and only one * anon_vma child. * * Root anon_vma is never reused: * it has self-parent reference and at least one child. */ if (!dst->anon_vma && src->anon_vma && anon_vma->num_children < 2 && anon_vma->num_active_vmas == 0) dst->anon_vma = anon_vma; } if (dst->anon_vma) dst->anon_vma->num_active_vmas++; unlock_anon_vma_root(root); return 0; enomem_failure: /* * dst->anon_vma is dropped here otherwise its num_active_vmas can * be incorrectly decremented in unlink_anon_vmas(). * We can safely do this because callers of anon_vma_clone() don't care * about dst->anon_vma if anon_vma_clone() failed. */ dst->anon_vma = NULL; unlink_anon_vmas(dst); return -ENOMEM; } /* * Attach vma to its own anon_vma, as well as to the anon_vmas that * the corresponding VMA in the parent process is attached to. * Returns 0 on success, non-zero on failure. */ int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma) { struct anon_vma_chain *avc; struct anon_vma *anon_vma; int error; /* Don't bother if the parent process has no anon_vma here. */ if (!pvma->anon_vma) return 0; /* Drop inherited anon_vma, we'll reuse existing or allocate new. */ vma->anon_vma = NULL; /* * First, attach the new VMA to the parent VMA's anon_vmas, * so rmap can find non-COWed pages in child processes. */ error = anon_vma_clone(vma, pvma); if (error) return error; /* An existing anon_vma has been reused, all done then. */ if (vma->anon_vma) return 0; /* Then add our own anon_vma. */ anon_vma = anon_vma_alloc(); if (!anon_vma) goto out_error; anon_vma->num_active_vmas++; avc = anon_vma_chain_alloc(GFP_KERNEL); if (!avc) goto out_error_free_anon_vma; /* * The root anon_vma's rwsem is the lock actually used when we * lock any of the anon_vmas in this anon_vma tree. */ anon_vma->root = pvma->anon_vma->root; anon_vma->parent = pvma->anon_vma; /* * With refcounts, an anon_vma can stay around longer than the * process it belongs to. The root anon_vma needs to be pinned until * this anon_vma is freed, because the lock lives in the root. */ get_anon_vma(anon_vma->root); /* Mark this anon_vma as the one where our new (COWed) pages go. */ vma->anon_vma = anon_vma; anon_vma_lock_write(anon_vma); anon_vma_chain_link(vma, avc, anon_vma); anon_vma->parent->num_children++; anon_vma_unlock_write(anon_vma); return 0; out_error_free_anon_vma: put_anon_vma(anon_vma); out_error: unlink_anon_vmas(vma); return -ENOMEM; } void unlink_anon_vmas(struct vm_area_struct *vma) { struct anon_vma_chain *avc, *next; struct anon_vma *root = NULL; /* * Unlink each anon_vma chained to the VMA. This list is ordered * from newest to oldest, ensuring the root anon_vma gets freed last. */ list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) { struct anon_vma *anon_vma = avc->anon_vma; root = lock_anon_vma_root(root, anon_vma); anon_vma_interval_tree_remove(avc, &anon_vma->rb_root); /* * Leave empty anon_vmas on the list - we'll need * to free them outside the lock. */ if (RB_EMPTY_ROOT(&anon_vma->rb_root.rb_root)) { anon_vma->parent->num_children--; continue; } list_del(&avc->same_vma); anon_vma_chain_free(avc); } if (vma->anon_vma) { vma->anon_vma->num_active_vmas--; /* * vma would still be needed after unlink, and anon_vma will be prepared * when handle fault. */ vma->anon_vma = NULL; } unlock_anon_vma_root(root); /* * Iterate the list once more, it now only contains empty and unlinked * anon_vmas, destroy them. Could not do before due to __put_anon_vma() * needing to write-acquire the anon_vma->root->rwsem. */ list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) { struct anon_vma *anon_vma = avc->anon_vma; VM_WARN_ON(anon_vma->num_children); VM_WARN_ON(anon_vma->num_active_vmas); put_anon_vma(anon_vma); list_del(&avc->same_vma); anon_vma_chain_free(avc); } } static void anon_vma_ctor(void *data) { struct anon_vma *anon_vma = data; init_rwsem(&anon_vma->rwsem); atomic_set(&anon_vma->refcount, 0); anon_vma->rb_root = RB_ROOT_CACHED; } void __init anon_vma_init(void) { anon_vma_cachep = kmem_cache_create("anon_vma", sizeof(struct anon_vma), 0, SLAB_TYPESAFE_BY_RCU|SLAB_PANIC|SLAB_ACCOUNT, anon_vma_ctor); anon_vma_chain_cachep = KMEM_CACHE(anon_vma_chain, SLAB_PANIC|SLAB_ACCOUNT); } /* * Getting a lock on a stable anon_vma from a page off the LRU is tricky! * * Since there is no serialization what so ever against folio_remove_rmap_*() * the best this function can do is return a refcount increased anon_vma * that might have been relevant to this page. * * The page might have been remapped to a different anon_vma or the anon_vma * returned may already be freed (and even reused). * * In case it was remapped to a different anon_vma, the new anon_vma will be a * child of the old anon_vma, and the anon_vma lifetime rules will therefore * ensure that any anon_vma obtained from the page will still be valid for as * long as we observe page_mapped() [ hence all those page_mapped() tests ]. * * All users of this function must be very careful when walking the anon_vma * chain and verify that the page in question is indeed mapped in it * [ something equivalent to page_mapped_in_vma() ]. * * Since anon_vma's slab is SLAB_TYPESAFE_BY_RCU and we know from * folio_remove_rmap_*() that the anon_vma pointer from page->mapping is valid * if there is a mapcount, we can dereference the anon_vma after observing * those. * * NOTE: the caller should normally hold folio lock when calling this. If * not, the caller needs to double check the anon_vma didn't change after * taking the anon_vma lock for either read or write (UFFDIO_MOVE can modify it * concurrently without folio lock protection). See folio_lock_anon_vma_read() * which has already covered that, and comment above remap_pages(). */ struct anon_vma *folio_get_anon_vma(struct folio *folio) { struct anon_vma *anon_vma = NULL; unsigned long anon_mapping; rcu_read_lock(); anon_mapping = (unsigned long)READ_ONCE(folio->mapping); if ((anon_mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON) goto out; if (!folio_mapped(folio)) goto out; anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON); if (!atomic_inc_not_zero(&anon_vma->refcount)) { anon_vma = NULL; goto out; } /* * If this folio is still mapped, then its anon_vma cannot have been * freed. But if it has been unmapped, we have no security against the * anon_vma structure being freed and reused (for another anon_vma: * SLAB_TYPESAFE_BY_RCU guarantees that - so the atomic_inc_not_zero() * above cannot corrupt). */ if (!folio_mapped(folio)) { rcu_read_unlock(); put_anon_vma(anon_vma); return NULL; } out: rcu_read_unlock(); return anon_vma; } /* * Similar to folio_get_anon_vma() except it locks the anon_vma. * * Its a little more complex as it tries to keep the fast path to a single * atomic op -- the trylock. If we fail the trylock, we fall back to getting a * reference like with folio_get_anon_vma() and then block on the mutex * on !rwc->try_lock case. */ struct anon_vma *folio_lock_anon_vma_read(struct folio *folio, struct rmap_walk_control *rwc) { struct anon_vma *anon_vma = NULL; struct anon_vma *root_anon_vma; unsigned long anon_mapping; retry: rcu_read_lock(); anon_mapping = (unsigned long)READ_ONCE(folio->mapping); if ((anon_mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON) goto out; if (!folio_mapped(folio)) goto out; anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON); root_anon_vma = READ_ONCE(anon_vma->root); if (down_read_trylock(&root_anon_vma->rwsem)) { /* * folio_move_anon_rmap() might have changed the anon_vma as we * might not hold the folio lock here. */ if (unlikely((unsigned long)READ_ONCE(folio->mapping) != anon_mapping)) { up_read(&root_anon_vma->rwsem); rcu_read_unlock(); goto retry; } /* * If the folio is still mapped, then this anon_vma is still * its anon_vma, and holding the mutex ensures that it will * not go away, see anon_vma_free(). */ if (!folio_mapped(folio)) { up_read(&root_anon_vma->rwsem); anon_vma = NULL; } goto out; } if (rwc && rwc->try_lock) { anon_vma = NULL; rwc->contended = true; goto out; } /* trylock failed, we got to sleep */ if (!atomic_inc_not_zero(&anon_vma->refcount)) { anon_vma = NULL; goto out; } if (!folio_mapped(folio)) { rcu_read_unlock(); put_anon_vma(anon_vma); return NULL; } /* we pinned the anon_vma, its safe to sleep */ rcu_read_unlock(); anon_vma_lock_read(anon_vma); /* * folio_move_anon_rmap() might have changed the anon_vma as we might * not hold the folio lock here. */ if (unlikely((unsigned long)READ_ONCE(folio->mapping) != anon_mapping)) { anon_vma_unlock_read(anon_vma); put_anon_vma(anon_vma); anon_vma = NULL; goto retry; } if (atomic_dec_and_test(&anon_vma->refcount)) { /* * Oops, we held the last refcount, release the lock * and bail -- can't simply use put_anon_vma() because * we'll deadlock on the anon_vma_lock_write() recursion. */ anon_vma_unlock_read(anon_vma); __put_anon_vma(anon_vma); anon_vma = NULL; } return anon_vma; out: rcu_read_unlock(); return anon_vma; } #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH /* * Flush TLB entries for recently unmapped pages from remote CPUs. It is * important if a PTE was dirty when it was unmapped that it's flushed * before any IO is initiated on the page to prevent lost writes. Similarly, * it must be flushed before freeing to prevent data leakage. */ void try_to_unmap_flush(void) { struct tlbflush_unmap_batch *tlb_ubc = ¤t->tlb_ubc; if (!tlb_ubc->flush_required) return; arch_tlbbatch_flush(&tlb_ubc->arch); tlb_ubc->flush_required = false; tlb_ubc->writable = false; } /* Flush iff there are potentially writable TLB entries that can race with IO */ void try_to_unmap_flush_dirty(void) { struct tlbflush_unmap_batch *tlb_ubc = ¤t->tlb_ubc; if (tlb_ubc->writable) try_to_unmap_flush(); } /* * Bits 0-14 of mm->tlb_flush_batched record pending generations. * Bits 16-30 of mm->tlb_flush_batched bit record flushed generations. */ #define TLB_FLUSH_BATCH_FLUSHED_SHIFT 16 #define TLB_FLUSH_BATCH_PENDING_MASK \ ((1 << (TLB_FLUSH_BATCH_FLUSHED_SHIFT - 1)) - 1) #define TLB_FLUSH_BATCH_PENDING_LARGE \ (TLB_FLUSH_BATCH_PENDING_MASK / 2) static void set_tlb_ubc_flush_pending(struct mm_struct *mm, pte_t pteval, unsigned long uaddr) { struct tlbflush_unmap_batch *tlb_ubc = ¤t->tlb_ubc; int batch; bool writable = pte_dirty(pteval); if (!pte_accessible(mm, pteval)) return; arch_tlbbatch_add_pending(&tlb_ubc->arch, mm, uaddr); tlb_ubc->flush_required = true; /* * Ensure compiler does not re-order the setting of tlb_flush_batched * before the PTE is cleared. */ barrier(); batch = atomic_read(&mm->tlb_flush_batched); retry: if ((batch & TLB_FLUSH_BATCH_PENDING_MASK) > TLB_FLUSH_BATCH_PENDING_LARGE) { /* * Prevent `pending' from catching up with `flushed' because of * overflow. Reset `pending' and `flushed' to be 1 and 0 if * `pending' becomes large. */ if (!atomic_try_cmpxchg(&mm->tlb_flush_batched, &batch, 1)) goto retry; } else { atomic_inc(&mm->tlb_flush_batched); } /* * If the PTE was dirty then it's best to assume it's writable. The * caller must use try_to_unmap_flush_dirty() or try_to_unmap_flush() * before the page is queued for IO. */ if (writable) tlb_ubc->writable = true; } /* * Returns true if the TLB flush should be deferred to the end of a batch of * unmap operations to reduce IPIs. */ static bool should_defer_flush(struct mm_struct *mm, enum ttu_flags flags) { if (!(flags & TTU_BATCH_FLUSH)) return false; return arch_tlbbatch_should_defer(mm); } /* * Reclaim unmaps pages under the PTL but do not flush the TLB prior to * releasing the PTL if TLB flushes are batched. It's possible for a parallel * operation such as mprotect or munmap to race between reclaim unmapping * the page and flushing the page. If this race occurs, it potentially allows * access to data via a stale TLB entry. Tracking all mm's that have TLB * batching in flight would be expensive during reclaim so instead track * whether TLB batching occurred in the past and if so then do a flush here * if required. This will cost one additional flush per reclaim cycle paid * by the first operation at risk such as mprotect and mumap. * * This must be called under the PTL so that an access to tlb_flush_batched * that is potentially a "reclaim vs mprotect/munmap/etc" race will synchronise * via the PTL. */ void flush_tlb_batched_pending(struct mm_struct *mm) { int batch = atomic_read(&mm->tlb_flush_batched); int pending = batch & TLB_FLUSH_BATCH_PENDING_MASK; int flushed = batch >> TLB_FLUSH_BATCH_FLUSHED_SHIFT; if (pending != flushed) { arch_flush_tlb_batched_pending(mm); /* * If the new TLB flushing is pending during flushing, leave * mm->tlb_flush_batched as is, to avoid losing flushing. */ atomic_cmpxchg(&mm->tlb_flush_batched, batch, pending | (pending << TLB_FLUSH_BATCH_FLUSHED_SHIFT)); } } #else static void set_tlb_ubc_flush_pending(struct mm_struct *mm, pte_t pteval, unsigned long uaddr) { } static bool should_defer_flush(struct mm_struct *mm, enum ttu_flags flags) { return false; } #endif /* CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH */ /* * At what user virtual address is page expected in vma? * Caller should check the page is actually part of the vma. */ unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma) { struct folio *folio = page_folio(page); if (folio_test_anon(folio)) { struct anon_vma *page__anon_vma = folio_anon_vma(folio); /* * Note: swapoff's unuse_vma() is more efficient with this * check, and needs it to match anon_vma when KSM is active. */ if (!vma->anon_vma || !page__anon_vma || vma->anon_vma->root != page__anon_vma->root) return -EFAULT; } else if (!vma->vm_file) { return -EFAULT; } else if (vma->vm_file->f_mapping != folio->mapping) { return -EFAULT; } return vma_address(page, vma); } /* * Returns the actual pmd_t* where we expect 'address' to be mapped from, or * NULL if it doesn't exist. No guarantees / checks on what the pmd_t* * represents. */ pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address) { pgd_t *pgd; p4d_t *p4d; pud_t *pud; pmd_t *pmd = NULL; pgd = pgd_offset(mm, address); if (!pgd_present(*pgd)) goto out; p4d = p4d_offset(pgd, address); if (!p4d_present(*p4d)) goto out; pud = pud_offset(p4d, address); if (!pud_present(*pud)) goto out; pmd = pmd_offset(pud, address); out: return pmd; } struct folio_referenced_arg { int mapcount; int referenced; unsigned long vm_flags; struct mem_cgroup *memcg; }; /* * arg: folio_referenced_arg will be passed */ static bool folio_referenced_one(struct folio *folio, struct vm_area_struct *vma, unsigned long address, void *arg) { struct folio_referenced_arg *pra = arg; DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0); int referenced = 0; unsigned long start = address, ptes = 0; while (page_vma_mapped_walk(&pvmw)) { address = pvmw.address; if (vma->vm_flags & VM_LOCKED) { if (!folio_test_large(folio) || !pvmw.pte) { /* Restore the mlock which got missed */ mlock_vma_folio(folio, vma); page_vma_mapped_walk_done(&pvmw); pra->vm_flags |= VM_LOCKED; return false; /* To break the loop */ } /* * For large folio fully mapped to VMA, will * be handled after the pvmw loop. * * For large folio cross VMA boundaries, it's * expected to be picked by page reclaim. But * should skip reference of pages which are in * the range of VM_LOCKED vma. As page reclaim * should just count the reference of pages out * the range of VM_LOCKED vma. */ ptes++; pra->mapcount--; continue; } if (pvmw.pte) { if (lru_gen_enabled() && pte_young(ptep_get(pvmw.pte))) { lru_gen_look_around(&pvmw); referenced++; } if (ptep_clear_flush_young_notify(vma, address, pvmw.pte)) referenced++; } else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) { if (pmdp_clear_flush_young_notify(vma, address, pvmw.pmd)) referenced++; } else { /* unexpected pmd-mapped folio? */ WARN_ON_ONCE(1); } pra->mapcount--; } if ((vma->vm_flags & VM_LOCKED) && folio_test_large(folio) && folio_within_vma(folio, vma)) { unsigned long s_align, e_align; s_align = ALIGN_DOWN(start, PMD_SIZE); e_align = ALIGN_DOWN(start + folio_size(folio) - 1, PMD_SIZE); /* folio doesn't cross page table boundary and fully mapped */ if ((s_align == e_align) && (ptes == folio_nr_pages(folio))) { /* Restore the mlock which got missed */ mlock_vma_folio(folio, vma); pra->vm_flags |= VM_LOCKED; return false; /* To break the loop */ } } if (referenced) folio_clear_idle(folio); if (folio_test_clear_young(folio)) referenced++; if (referenced) { pra->referenced++; pra->vm_flags |= vma->vm_flags & ~VM_LOCKED; } if (!pra->mapcount) return false; /* To break the loop */ return true; } static bool invalid_folio_referenced_vma(struct vm_area_struct *vma, void *arg) { struct folio_referenced_arg *pra = arg; struct mem_cgroup *memcg = pra->memcg; /* * Ignore references from this mapping if it has no recency. If the * folio has been used in another mapping, we will catch it; if this * other mapping is already gone, the unmap path will have set the * referenced flag or activated the folio in zap_pte_range(). */ if (!vma_has_recency(vma)) return true; /* * If we are reclaiming on behalf of a cgroup, skip counting on behalf * of references from different cgroups. */ if (memcg && !mm_match_cgroup(vma->vm_mm, memcg)) return true; return false; } /** * folio_referenced() - Test if the folio was referenced. * @folio: The folio to test. * @is_locked: Caller holds lock on the folio. * @memcg: target memory cgroup * @vm_flags: A combination of all the vma->vm_flags which referenced the folio. * * Quick test_and_clear_referenced for all mappings of a folio, * * Return: The number of mappings which referenced the folio. Return -1 if * the function bailed out due to rmap lock contention. */ int folio_referenced(struct folio *folio, int is_locked, struct mem_cgroup *memcg, unsigned long *vm_flags) { int we_locked = 0; struct folio_referenced_arg pra = { .mapcount = folio_mapcount(folio), .memcg = memcg, }; struct rmap_walk_control rwc = { .rmap_one = folio_referenced_one, .arg = (void *)&pra, .anon_lock = folio_lock_anon_vma_read, .try_lock = true, .invalid_vma = invalid_folio_referenced_vma, }; *vm_flags = 0; if (!pra.mapcount) return 0; if (!folio_raw_mapping(folio)) return 0; if (!is_locked && (!folio_test_anon(folio) || folio_test_ksm(folio))) { we_locked = folio_trylock(folio); if (!we_locked) return 1; } rmap_walk(folio, &rwc); *vm_flags = pra.vm_flags; if (we_locked) folio_unlock(folio); return rwc.contended ? -1 : pra.referenced; } static int page_vma_mkclean_one(struct page_vma_mapped_walk *pvmw) { int cleaned = 0; struct vm_area_struct *vma = pvmw->vma; struct mmu_notifier_range range; unsigned long address = pvmw->address; /* * We have to assume the worse case ie pmd for invalidation. Note that * the folio can not be freed from this function. */ mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_PAGE, 0, vma->vm_mm, address, vma_address_end(pvmw)); mmu_notifier_invalidate_range_start(&range); while (page_vma_mapped_walk(pvmw)) { int ret = 0; address = pvmw->address; if (pvmw->pte) { pte_t *pte = pvmw->pte; pte_t entry = ptep_get(pte); if (!pte_dirty(entry) && !pte_write(entry)) continue; flush_cache_page(vma, address, pte_pfn(entry)); entry = ptep_clear_flush(vma, address, pte); entry = pte_wrprotect(entry); entry = pte_mkclean(entry); set_pte_at(vma->vm_mm, address, pte, entry); ret = 1; } else { #ifdef CONFIG_TRANSPARENT_HUGEPAGE pmd_t *pmd = pvmw->pmd; pmd_t entry; if (!pmd_dirty(*pmd) && !pmd_write(*pmd)) continue; flush_cache_range(vma, address, address + HPAGE_PMD_SIZE); entry = pmdp_invalidate(vma, address, pmd); entry = pmd_wrprotect(entry); entry = pmd_mkclean(entry); set_pmd_at(vma->vm_mm, address, pmd, entry); ret = 1; #else /* unexpected pmd-mapped folio? */ WARN_ON_ONCE(1); #endif } if (ret) cleaned++; } mmu_notifier_invalidate_range_end(&range); return cleaned; } static bool page_mkclean_one(struct folio *folio, struct vm_area_struct *vma, unsigned long address, void *arg) { DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, PVMW_SYNC); int *cleaned = arg; *cleaned += page_vma_mkclean_one(&pvmw); return true; } static bool invalid_mkclean_vma(struct vm_area_struct *vma, void *arg) { if (vma->vm_flags & VM_SHARED) return false; return true; } int folio_mkclean(struct folio *folio) { int cleaned = 0; struct address_space *mapping; struct rmap_walk_control rwc = { .arg = (void *)&cleaned, .rmap_one = page_mkclean_one, .invalid_vma = invalid_mkclean_vma, }; BUG_ON(!folio_test_locked(folio)); if (!folio_mapped(folio)) return 0; mapping = folio_mapping(folio); if (!mapping) return 0; rmap_walk(folio, &rwc); return cleaned; } EXPORT_SYMBOL_GPL(folio_mkclean); /** * pfn_mkclean_range - Cleans the PTEs (including PMDs) mapped with range of * [@pfn, @pfn + @nr_pages) at the specific offset (@pgoff) * within the @vma of shared mappings. And since clean PTEs * should also be readonly, write protects them too. * @pfn: start pfn. * @nr_pages: number of physically contiguous pages srarting with @pfn. * @pgoff: page offset that the @pfn mapped with. * @vma: vma that @pfn mapped within. * * Returns the number of cleaned PTEs (including PMDs). */ int pfn_mkclean_range(unsigned long pfn, unsigned long nr_pages, pgoff_t pgoff, struct vm_area_struct *vma) { struct page_vma_mapped_walk pvmw = { .pfn = pfn, .nr_pages = nr_pages, .pgoff = pgoff, .vma = vma, .flags = PVMW_SYNC, }; if (invalid_mkclean_vma(vma, NULL)) return 0; pvmw.address = vma_pgoff_address(pgoff, nr_pages, vma); VM_BUG_ON_VMA(pvmw.address == -EFAULT, vma); return page_vma_mkclean_one(&pvmw); } int folio_total_mapcount(struct folio *folio) { int mapcount = folio_entire_mapcount(folio); int nr_pages; int i; /* In the common case, avoid the loop when no pages mapped by PTE */ if (folio_nr_pages_mapped(folio) == 0) return mapcount; /* * Add all the PTE mappings of those pages mapped by PTE. * Limit the loop to folio_nr_pages_mapped()? * Perhaps: given all the raciness, that may be a good or a bad idea. */ nr_pages = folio_nr_pages(folio); for (i = 0; i < nr_pages; i++) mapcount += atomic_read(&folio_page(folio, i)->_mapcount); /* But each of those _mapcounts was based on -1 */ mapcount += nr_pages; return mapcount; } static __always_inline unsigned int __folio_add_rmap(struct folio *folio, struct page *page, int nr_pages, enum rmap_level level, int *nr_pmdmapped) { atomic_t *mapped = &folio->_nr_pages_mapped; int first, nr = 0; __folio_rmap_sanity_checks(folio, page, nr_pages, level); switch (level) { case RMAP_LEVEL_PTE: do { first = atomic_inc_and_test(&page->_mapcount); if (first && folio_test_large(folio)) { first = atomic_inc_return_relaxed(mapped); first = (first < ENTIRELY_MAPPED); } if (first) nr++; } while (page++, --nr_pages > 0); break; case RMAP_LEVEL_PMD: first = atomic_inc_and_test(&folio->_entire_mapcount); if (first) { nr = atomic_add_return_relaxed(ENTIRELY_MAPPED, mapped); if (likely(nr < ENTIRELY_MAPPED + ENTIRELY_MAPPED)) { *nr_pmdmapped = folio_nr_pages(folio); nr = *nr_pmdmapped - (nr & FOLIO_PAGES_MAPPED); /* Raced ahead of a remove and another add? */ if (unlikely(nr < 0)) nr = 0; } else { /* Raced ahead of a remove of ENTIRELY_MAPPED */ nr = 0; } } break; } return nr; } /** * folio_move_anon_rmap - move a folio to our anon_vma * @folio: The folio to move to our anon_vma * @vma: The vma the folio belongs to * * When a folio belongs exclusively to one process after a COW event, * that folio can be moved into the anon_vma that belongs to just that * process, so the rmap code will not search the parent or sibling processes. */ void folio_move_anon_rmap(struct folio *folio, struct vm_area_struct *vma) { void *anon_vma = vma->anon_vma; VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); VM_BUG_ON_VMA(!anon_vma, vma); anon_vma += PAGE_MAPPING_ANON; /* * Ensure that anon_vma and the PAGE_MAPPING_ANON bit are written * simultaneously, so a concurrent reader (eg folio_referenced()'s * folio_test_anon()) will not see one without the other. */ WRITE_ONCE(folio->mapping, anon_vma); } /** * __folio_set_anon - set up a new anonymous rmap for a folio * @folio: The folio to set up the new anonymous rmap for. * @vma: VM area to add the folio to. * @address: User virtual address of the mapping * @exclusive: Whether the folio is exclusive to the process. */ static void __folio_set_anon(struct folio *folio, struct vm_area_struct *vma, unsigned long address, bool exclusive) { struct anon_vma *anon_vma = vma->anon_vma; BUG_ON(!anon_vma); /* * If the folio isn't exclusive to this vma, we must use the _oldest_ * possible anon_vma for the folio mapping! */ if (!exclusive) anon_vma = anon_vma->root; /* * page_idle does a lockless/optimistic rmap scan on folio->mapping. * Make sure the compiler doesn't split the stores of anon_vma and * the PAGE_MAPPING_ANON type identifier, otherwise the rmap code * could mistake the mapping for a struct address_space and crash. */ anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON; WRITE_ONCE(folio->mapping, (struct address_space *) anon_vma); folio->index = linear_page_index(vma, address); } /** * __page_check_anon_rmap - sanity check anonymous rmap addition * @folio: The folio containing @page. * @page: the page to check the mapping of * @vma: the vm area in which the mapping is added * @address: the user virtual address mapped */ static void __page_check_anon_rmap(struct folio *folio, struct page *page, struct vm_area_struct *vma, unsigned long address) { /* * The page's anon-rmap details (mapping and index) are guaranteed to * be set up correctly at this point. * * We have exclusion against folio_add_anon_rmap_*() because the caller * always holds the page locked. * * We have exclusion against folio_add_new_anon_rmap because those pages * are initially only visible via the pagetables, and the pte is locked * over the call to folio_add_new_anon_rmap. */ VM_BUG_ON_FOLIO(folio_anon_vma(folio)->root != vma->anon_vma->root, folio); VM_BUG_ON_PAGE(page_to_pgoff(page) != linear_page_index(vma, address), page); } static __always_inline void __folio_add_anon_rmap(struct folio *folio, struct page *page, int nr_pages, struct vm_area_struct *vma, unsigned long address, rmap_t flags, enum rmap_level level) { int i, nr, nr_pmdmapped = 0; nr = __folio_add_rmap(folio, page, nr_pages, level, &nr_pmdmapped); if (nr_pmdmapped) __lruvec_stat_mod_folio(folio, NR_ANON_THPS, nr_pmdmapped); if (nr) __lruvec_stat_mod_folio(folio, NR_ANON_MAPPED, nr); if (unlikely(!folio_test_anon(folio))) { VM_WARN_ON_FOLIO(!folio_test_locked(folio), folio); /* * For a PTE-mapped large folio, we only know that the single * PTE is exclusive. Further, __folio_set_anon() might not get * folio->index right when not given the address of the head * page. */ VM_WARN_ON_FOLIO(folio_test_large(folio) && level != RMAP_LEVEL_PMD, folio); __folio_set_anon(folio, vma, address, !!(flags & RMAP_EXCLUSIVE)); } else if (likely(!folio_test_ksm(folio))) { __page_check_anon_rmap(folio, page, vma, address); } if (flags & RMAP_EXCLUSIVE) { switch (level) { case RMAP_LEVEL_PTE: for (i = 0; i < nr_pages; i++) SetPageAnonExclusive(page + i); break; case RMAP_LEVEL_PMD: SetPageAnonExclusive(page); break; } } for (i = 0; i < nr_pages; i++) { struct page *cur_page = page + i; /* While PTE-mapping a THP we have a PMD and a PTE mapping. */ VM_WARN_ON_FOLIO((atomic_read(&cur_page->_mapcount) > 0 || (folio_test_large(folio) && folio_entire_mapcount(folio) > 1)) && PageAnonExclusive(cur_page), folio); } /* * For large folio, only mlock it if it's fully mapped to VMA. It's * not easy to check whether the large folio is fully mapped to VMA * here. Only mlock normal 4K folio and leave page reclaim to handle * large folio. */ if (!folio_test_large(folio)) mlock_vma_folio(folio, vma); } /** * folio_add_anon_rmap_ptes - add PTE mappings to a page range of an anon folio * @folio: The folio to add the mappings to * @page: The first page to add * @nr_pages: The number of pages which will be mapped * @vma: The vm area in which the mappings are added * @address: The user virtual address of the first page to map * @flags: The rmap flags * * The page range of folio is defined by [first_page, first_page + nr_pages) * * The caller needs to hold the page table lock, and the page must be locked in * the anon_vma case: to serialize mapping,index checking after setting, * and to ensure that an anon folio is not being upgraded racily to a KSM folio * (but KSM folios are never downgraded). */ void folio_add_anon_rmap_ptes(struct folio *folio, struct page *page, int nr_pages, struct vm_area_struct *vma, unsigned long address, rmap_t flags) { __folio_add_anon_rmap(folio, page, nr_pages, vma, address, flags, RMAP_LEVEL_PTE); } /** * folio_add_anon_rmap_pmd - add a PMD mapping to a page range of an anon folio * @folio: The folio to add the mapping to * @page: The first page to add * @vma: The vm area in which the mapping is added * @address: The user virtual address of the first page to map * @flags: The rmap flags * * The page range of folio is defined by [first_page, first_page + HPAGE_PMD_NR) * * The caller needs to hold the page table lock, and the page must be locked in * the anon_vma case: to serialize mapping,index checking after setting. */ void folio_add_anon_rmap_pmd(struct folio *folio, struct page *page, struct vm_area_struct *vma, unsigned long address, rmap_t flags) { #ifdef CONFIG_TRANSPARENT_HUGEPAGE __folio_add_anon_rmap(folio, page, HPAGE_PMD_NR, vma, address, flags, RMAP_LEVEL_PMD); #else WARN_ON_ONCE(true); #endif } /** * folio_add_new_anon_rmap - Add mapping to a new anonymous folio. * @folio: The folio to add the mapping to. * @vma: the vm area in which the mapping is added * @address: the user virtual address mapped * * Like folio_add_anon_rmap_*() but must only be called on *new* folios. * This means the inc-and-test can be bypassed. * The folio does not have to be locked. * * If the folio is pmd-mappable, it is accounted as a THP. As the folio * is new, it's assumed to be mapped exclusively by a single process. */ void folio_add_new_anon_rmap(struct folio *folio, struct vm_area_struct *vma, unsigned long address) { int nr = folio_nr_pages(folio); VM_WARN_ON_FOLIO(folio_test_hugetlb(folio), folio); VM_BUG_ON_VMA(address < vma->vm_start || address + (nr << PAGE_SHIFT) > vma->vm_end, vma); __folio_set_swapbacked(folio); __folio_set_anon(folio, vma, address, true); if (likely(!folio_test_large(folio))) { /* increment count (starts at -1) */ atomic_set(&folio->_mapcount, 0); SetPageAnonExclusive(&folio->page); } else if (!folio_test_pmd_mappable(folio)) { int i; for (i = 0; i < nr; i++) { struct page *page = folio_page(folio, i); /* increment count (starts at -1) */ atomic_set(&page->_mapcount, 0); SetPageAnonExclusive(page); } atomic_set(&folio->_nr_pages_mapped, nr); } else { /* increment count (starts at -1) */ atomic_set(&folio->_entire_mapcount, 0); atomic_set(&folio->_nr_pages_mapped, ENTIRELY_MAPPED); SetPageAnonExclusive(&folio->page); __lruvec_stat_mod_folio(folio, NR_ANON_THPS, nr); } __lruvec_stat_mod_folio(folio, NR_ANON_MAPPED, nr); } static __always_inline void __folio_add_file_rmap(struct folio *folio, struct page *page, int nr_pages, struct vm_area_struct *vma, enum rmap_level level) { int nr, nr_pmdmapped = 0; VM_WARN_ON_FOLIO(folio_test_anon(folio), folio); nr = __folio_add_rmap(folio, page, nr_pages, level, &nr_pmdmapped); if (nr_pmdmapped) __lruvec_stat_mod_folio(folio, folio_test_swapbacked(folio) ? NR_SHMEM_PMDMAPPED : NR_FILE_PMDMAPPED, nr_pmdmapped); if (nr) __lruvec_stat_mod_folio(folio, NR_FILE_MAPPED, nr); /* See comments in folio_add_anon_rmap_*() */ if (!folio_test_large(folio)) mlock_vma_folio(folio, vma); } /** * folio_add_file_rmap_ptes - add PTE mappings to a page range of a folio * @folio: The folio to add the mappings to * @page: The first page to add * @nr_pages: The number of pages that will be mapped using PTEs * @vma: The vm area in which the mappings are added * * The page range of the folio is defined by [page, page + nr_pages) * * The caller needs to hold the page table lock. */ void folio_add_file_rmap_ptes(struct folio *folio, struct page *page, int nr_pages, struct vm_area_struct *vma) { __folio_add_file_rmap(folio, page, nr_pages, vma, RMAP_LEVEL_PTE); } /** * folio_add_file_rmap_pmd - add a PMD mapping to a page range of a folio * @folio: The folio to add the mapping to * @page: The first page to add * @vma: The vm area in which the mapping is added * * The page range of the folio is defined by [page, page + HPAGE_PMD_NR) * * The caller needs to hold the page table lock. */ void folio_add_file_rmap_pmd(struct folio *folio, struct page *page, struct vm_area_struct *vma) { #ifdef CONFIG_TRANSPARENT_HUGEPAGE __folio_add_file_rmap(folio, page, HPAGE_PMD_NR, vma, RMAP_LEVEL_PMD); #else WARN_ON_ONCE(true); #endif } static __always_inline void __folio_remove_rmap(struct folio *folio, struct page *page, int nr_pages, struct vm_area_struct *vma, enum rmap_level level) { atomic_t *mapped = &folio->_nr_pages_mapped; int last, nr = 0, nr_pmdmapped = 0; enum node_stat_item idx; __folio_rmap_sanity_checks(folio, page, nr_pages, level); switch (level) { case RMAP_LEVEL_PTE: do { last = atomic_add_negative(-1, &page->_mapcount); if (last && folio_test_large(folio)) { last = atomic_dec_return_relaxed(mapped); last = (last < ENTIRELY_MAPPED); } if (last) nr++; } while (page++, --nr_pages > 0); break; case RMAP_LEVEL_PMD: last = atomic_add_negative(-1, &folio->_entire_mapcount); if (last) { nr = atomic_sub_return_relaxed(ENTIRELY_MAPPED, mapped); if (likely(nr < ENTIRELY_MAPPED)) { nr_pmdmapped = folio_nr_pages(folio); nr = nr_pmdmapped - (nr & FOLIO_PAGES_MAPPED); /* Raced ahead of another remove and an add? */ if (unlikely(nr < 0)) nr = 0; } else { /* An add of ENTIRELY_MAPPED raced ahead */ nr = 0; } } break; } if (nr_pmdmapped) { if (folio_test_anon(folio)) idx = NR_ANON_THPS; else if (folio_test_swapbacked(folio)) idx = NR_SHMEM_PMDMAPPED; else idx = NR_FILE_PMDMAPPED; __lruvec_stat_mod_folio(folio, idx, -nr_pmdmapped); } if (nr) { idx = folio_test_anon(folio) ? NR_ANON_MAPPED : NR_FILE_MAPPED; __lruvec_stat_mod_folio(folio, idx, -nr); /* * Queue anon large folio for deferred split if at least one * page of the folio is unmapped and at least one page * is still mapped. */ if (folio_test_large(folio) && folio_test_anon(folio)) if (level == RMAP_LEVEL_PTE || nr < nr_pmdmapped) deferred_split_folio(folio); } /* * It would be tidy to reset folio_test_anon mapping when fully * unmapped, but that might overwrite a racing folio_add_anon_rmap_*() * which increments mapcount after us but sets mapping before us: * so leave the reset to free_pages_prepare, and remember that * it's only reliable while mapped. */ munlock_vma_folio(folio, vma); } /** * folio_remove_rmap_ptes - remove PTE mappings from a page range of a folio * @folio: The folio to remove the mappings from * @page: The first page to remove * @nr_pages: The number of pages that will be removed from the mapping * @vma: The vm area from which the mappings are removed * * The page range of the folio is defined by [page, page + nr_pages) * * The caller needs to hold the page table lock. */ void folio_remove_rmap_ptes(struct folio *folio, struct page *page, int nr_pages, struct vm_area_struct *vma) { __folio_remove_rmap(folio, page, nr_pages, vma, RMAP_LEVEL_PTE); } /** * folio_remove_rmap_pmd - remove a PMD mapping from a page range of a folio * @folio: The folio to remove the mapping from * @page: The first page to remove * @vma: The vm area from which the mapping is removed * * The page range of the folio is defined by [page, page + HPAGE_PMD_NR) * * The caller needs to hold the page table lock. */ void folio_remove_rmap_pmd(struct folio *folio, struct page *page, struct vm_area_struct *vma) { #ifdef CONFIG_TRANSPARENT_HUGEPAGE __folio_remove_rmap(folio, page, HPAGE_PMD_NR, vma, RMAP_LEVEL_PMD); #else WARN_ON_ONCE(true); #endif } /* * @arg: enum ttu_flags will be passed to this argument */ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma, unsigned long address, void *arg) { struct mm_struct *mm = vma->vm_mm; DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0); pte_t pteval; struct page *subpage; bool anon_exclusive, ret = true; struct mmu_notifier_range range; enum ttu_flags flags = (enum ttu_flags)(long)arg; unsigned long pfn; unsigned long hsz = 0; /* * When racing against e.g. zap_pte_range() on another cpu, * in between its ptep_get_and_clear_full() and folio_remove_rmap_*(), * try_to_unmap() may return before page_mapped() has become false, * if page table locking is skipped: use TTU_SYNC to wait for that. */ if (flags & TTU_SYNC) pvmw.flags = PVMW_SYNC; if (flags & TTU_SPLIT_HUGE_PMD) split_huge_pmd_address(vma, address, false, folio); /* * For THP, we have to assume the worse case ie pmd for invalidation. * For hugetlb, it could be much worse if we need to do pud * invalidation in the case of pmd sharing. * * Note that the folio can not be freed in this function as call of * try_to_unmap() must hold a reference on the folio. */ range.end = vma_address_end(&pvmw); mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm, address, range.end); if (folio_test_hugetlb(folio)) { /* * If sharing is possible, start and end will be adjusted * accordingly. */ adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end); /* We need the huge page size for set_huge_pte_at() */ hsz = huge_page_size(hstate_vma(vma)); } mmu_notifier_invalidate_range_start(&range); while (page_vma_mapped_walk(&pvmw)) { /* Unexpected PMD-mapped THP? */ VM_BUG_ON_FOLIO(!pvmw.pte, folio); /* * If the folio is in an mlock()d vma, we must not swap it out. */ if (!(flags & TTU_IGNORE_MLOCK) && (vma->vm_flags & VM_LOCKED)) { /* Restore the mlock which got missed */ if (!folio_test_large(folio)) mlock_vma_folio(folio, vma); page_vma_mapped_walk_done(&pvmw); ret = false; break; } pfn = pte_pfn(ptep_get(pvmw.pte)); subpage = folio_page(folio, pfn - folio_pfn(folio)); address = pvmw.address; anon_exclusive = folio_test_anon(folio) && PageAnonExclusive(subpage); if (folio_test_hugetlb(folio)) { bool anon = folio_test_anon(folio); /* * The try_to_unmap() is only passed a hugetlb page * in the case where the hugetlb page is poisoned. */ VM_BUG_ON_PAGE(!PageHWPoison(subpage), subpage); /* * huge_pmd_unshare may unmap an entire PMD page. * There is no way of knowing exactly which PMDs may * be cached for this mm, so we must flush them all. * start/end were already adjusted above to cover this * range. */ flush_cache_range(vma, range.start, range.end); /* * To call huge_pmd_unshare, i_mmap_rwsem must be * held in write mode. Caller needs to explicitly * do this outside rmap routines. * * We also must hold hugetlb vma_lock in write mode. * Lock order dictates acquiring vma_lock BEFORE * i_mmap_rwsem. We can only try lock here and fail * if unsuccessful. */ if (!anon) { VM_BUG_ON(!(flags & TTU_RMAP_LOCKED)); if (!hugetlb_vma_trylock_write(vma)) { page_vma_mapped_walk_done(&pvmw); ret = false; break; } if (huge_pmd_unshare(mm, vma, address, pvmw.pte)) { hugetlb_vma_unlock_write(vma); flush_tlb_range(vma, range.start, range.end); /* * The ref count of the PMD page was * dropped which is part of the way map * counting is done for shared PMDs. * Return 'true' here. When there is * no other sharing, huge_pmd_unshare * returns false and we will unmap the * actual page and drop map count * to zero. */ page_vma_mapped_walk_done(&pvmw); break; } hugetlb_vma_unlock_write(vma); } pteval = huge_ptep_clear_flush(vma, address, pvmw.pte); } else { flush_cache_page(vma, address, pfn); /* Nuke the page table entry. */ if (should_defer_flush(mm, flags)) { /* * We clear the PTE but do not flush so potentially * a remote CPU could still be writing to the folio. * If the entry was previously clean then the * architecture must guarantee that a clear->dirty * transition on a cached TLB entry is written through * and traps if the PTE is unmapped. */ pteval = ptep_get_and_clear(mm, address, pvmw.pte); set_tlb_ubc_flush_pending(mm, pteval, address); } else { pteval = ptep_clear_flush(vma, address, pvmw.pte); } } /* * Now the pte is cleared. If this pte was uffd-wp armed, * we may want to replace a none pte with a marker pte if * it's file-backed, so we don't lose the tracking info. */ pte_install_uffd_wp_if_needed(vma, address, pvmw.pte, pteval); /* Set the dirty flag on the folio now the pte is gone. */ if (pte_dirty(pteval)) folio_mark_dirty(folio); /* Update high watermark before we lower rss */ update_hiwater_rss(mm); if (PageHWPoison(subpage) && (flags & TTU_HWPOISON)) { pteval = swp_entry_to_pte(make_hwpoison_entry(subpage)); if (folio_test_hugetlb(folio)) { hugetlb_count_sub(folio_nr_pages(folio), mm); set_huge_pte_at(mm, address, pvmw.pte, pteval, hsz); } else { dec_mm_counter(mm, mm_counter(&folio->page)); set_pte_at(mm, address, pvmw.pte, pteval); } } else if (pte_unused(pteval) && !userfaultfd_armed(vma)) { /* * The guest indicated that the page content is of no * interest anymore. Simply discard the pte, vmscan * will take care of the rest. * A future reference will then fault in a new zero * page. When userfaultfd is active, we must not drop * this page though, as its main user (postcopy * migration) will not expect userfaults on already * copied pages. */ dec_mm_counter(mm, mm_counter(&folio->page)); } else if (folio_test_anon(folio)) { swp_entry_t entry = page_swap_entry(subpage); pte_t swp_pte; /* * Store the swap location in the pte. * See handle_pte_fault() ... */ if (unlikely(folio_test_swapbacked(folio) != folio_test_swapcache(folio))) { WARN_ON_ONCE(1); ret = false; page_vma_mapped_walk_done(&pvmw); break; } /* MADV_FREE page check */ if (!folio_test_swapbacked(folio)) { int ref_count, map_count; /* * Synchronize with gup_pte_range(): * - clear PTE; barrier; read refcount * - inc refcount; barrier; read PTE */ smp_mb(); ref_count = folio_ref_count(folio); map_count = folio_mapcount(folio); /* * Order reads for page refcount and dirty flag * (see comments in __remove_mapping()). */ smp_rmb(); /* * The only page refs must be one from isolation * plus the rmap(s) (dropped by discard:). */ if (ref_count == 1 + map_count && !folio_test_dirty(folio)) { dec_mm_counter(mm, MM_ANONPAGES); goto discard; } /* * If the folio was redirtied, it cannot be * discarded. Remap the page to page table. */ set_pte_at(mm, address, pvmw.pte, pteval); folio_set_swapbacked(folio); ret = false; page_vma_mapped_walk_done(&pvmw); break; } if (swap_duplicate(entry) < 0) { set_pte_at(mm, address, pvmw.pte, pteval); ret = false; page_vma_mapped_walk_done(&pvmw); break; } if (arch_unmap_one(mm, vma, address, pteval) < 0) { swap_free(entry); set_pte_at(mm, address, pvmw.pte, pteval); ret = false; page_vma_mapped_walk_done(&pvmw); break; } /* See folio_try_share_anon_rmap(): clear PTE first. */ if (anon_exclusive && folio_try_share_anon_rmap_pte(folio, subpage)) { swap_free(entry); set_pte_at(mm, address, pvmw.pte, pteval); ret = false; page_vma_mapped_walk_done(&pvmw); break; } if (list_empty(&mm->mmlist)) { spin_lock(&mmlist_lock); if (list_empty(&mm->mmlist)) list_add(&mm->mmlist, &init_mm.mmlist); spin_unlock(&mmlist_lock); } dec_mm_counter(mm, MM_ANONPAGES); inc_mm_counter(mm, MM_SWAPENTS); swp_pte = swp_entry_to_pte(entry); if (anon_exclusive) swp_pte = pte_swp_mkexclusive(swp_pte); if (pte_soft_dirty(pteval)) swp_pte = pte_swp_mksoft_dirty(swp_pte); if (pte_uffd_wp(pteval)) swp_pte = pte_swp_mkuffd_wp(swp_pte); set_pte_at(mm, address, pvmw.pte, swp_pte); } else { /* * This is a locked file-backed folio, * so it cannot be removed from the page * cache and replaced by a new folio before * mmu_notifier_invalidate_range_end, so no * concurrent thread might update its page table * to point at a new folio while a device is * still using this folio. * * See Documentation/mm/mmu_notifier.rst */ dec_mm_counter(mm, mm_counter_file(&folio->page)); } discard: if (unlikely(folio_test_hugetlb(folio))) hugetlb_remove_rmap(folio); else folio_remove_rmap_pte(folio, subpage, vma); if (vma->vm_flags & VM_LOCKED) mlock_drain_local(); folio_put(folio); } mmu_notifier_invalidate_range_end(&range); return ret; } static bool invalid_migration_vma(struct vm_area_struct *vma, void *arg) { return vma_is_temporary_stack(vma); } static int folio_not_mapped(struct folio *folio) { return !folio_mapped(folio); } /** * try_to_unmap - Try to remove all page table mappings to a folio. * @folio: The folio to unmap. * @flags: action and flags * * Tries to remove all the page table entries which are mapping this * folio. It is the caller's responsibility to check if the folio is * still mapped if needed (use TTU_SYNC to prevent accounting races). * * Context: Caller must hold the folio lock. */ void try_to_unmap(struct folio *folio, enum ttu_flags flags) { struct rmap_walk_control rwc = { .rmap_one = try_to_unmap_one, .arg = (void *)flags, .done = folio_not_mapped, .anon_lock = folio_lock_anon_vma_read, }; if (flags & TTU_RMAP_LOCKED) rmap_walk_locked(folio, &rwc); else rmap_walk(folio, &rwc); } /* * @arg: enum ttu_flags will be passed to this argument. * * If TTU_SPLIT_HUGE_PMD is specified any PMD mappings will be split into PTEs * containing migration entries. */ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma, unsigned long address, void *arg) { struct mm_struct *mm = vma->vm_mm; DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0); pte_t pteval; struct page *subpage; bool anon_exclusive, ret = true; struct mmu_notifier_range range; enum ttu_flags flags = (enum ttu_flags)(long)arg; unsigned long pfn; unsigned long hsz = 0; /* * When racing against e.g. zap_pte_range() on another cpu, * in between its ptep_get_and_clear_full() and folio_remove_rmap_*(), * try_to_migrate() may return before page_mapped() has become false, * if page table locking is skipped: use TTU_SYNC to wait for that. */ if (flags & TTU_SYNC) pvmw.flags = PVMW_SYNC; /* * unmap_page() in mm/huge_memory.c is the only user of migration with * TTU_SPLIT_HUGE_PMD and it wants to freeze. */ if (flags & TTU_SPLIT_HUGE_PMD) split_huge_pmd_address(vma, address, true, folio); /* * For THP, we have to assume the worse case ie pmd for invalidation. * For hugetlb, it could be much worse if we need to do pud * invalidation in the case of pmd sharing. * * Note that the page can not be free in this function as call of * try_to_unmap() must hold a reference on the page. */ range.end = vma_address_end(&pvmw); mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm, address, range.end); if (folio_test_hugetlb(folio)) { /* * If sharing is possible, start and end will be adjusted * accordingly. */ adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end); /* We need the huge page size for set_huge_pte_at() */ hsz = huge_page_size(hstate_vma(vma)); } mmu_notifier_invalidate_range_start(&range); while (page_vma_mapped_walk(&pvmw)) { #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION /* PMD-mapped THP migration entry */ if (!pvmw.pte) { subpage = folio_page(folio, pmd_pfn(*pvmw.pmd) - folio_pfn(folio)); VM_BUG_ON_FOLIO(folio_test_hugetlb(folio) || !folio_test_pmd_mappable(folio), folio); if (set_pmd_migration_entry(&pvmw, subpage)) { ret = false; page_vma_mapped_walk_done(&pvmw); break; } continue; } #endif /* Unexpected PMD-mapped THP? */ VM_BUG_ON_FOLIO(!pvmw.pte, folio); pfn = pte_pfn(ptep_get(pvmw.pte)); if (folio_is_zone_device(folio)) { /* * Our PTE is a non-present device exclusive entry and * calculating the subpage as for the common case would * result in an invalid pointer. * * Since only PAGE_SIZE pages can currently be * migrated, just set it to page. This will need to be * changed when hugepage migrations to device private * memory are supported. */ VM_BUG_ON_FOLIO(folio_nr_pages(folio) > 1, folio); subpage = &folio->page; } else { subpage = folio_page(folio, pfn - folio_pfn(folio)); } address = pvmw.address; anon_exclusive = folio_test_anon(folio) && PageAnonExclusive(subpage); if (folio_test_hugetlb(folio)) { bool anon = folio_test_anon(folio); /* * huge_pmd_unshare may unmap an entire PMD page. * There is no way of knowing exactly which PMDs may * be cached for this mm, so we must flush them all. * start/end were already adjusted above to cover this * range. */ flush_cache_range(vma, range.start, range.end); /* * To call huge_pmd_unshare, i_mmap_rwsem must be * held in write mode. Caller needs to explicitly * do this outside rmap routines. * * We also must hold hugetlb vma_lock in write mode. * Lock order dictates acquiring vma_lock BEFORE * i_mmap_rwsem. We can only try lock here and * fail if unsuccessful. */ if (!anon) { VM_BUG_ON(!(flags & TTU_RMAP_LOCKED)); if (!hugetlb_vma_trylock_write(vma)) { page_vma_mapped_walk_done(&pvmw); ret = false; break; } if (huge_pmd_unshare(mm, vma, address, pvmw.pte)) { hugetlb_vma_unlock_write(vma); flush_tlb_range(vma, range.start, range.end); /* * The ref count of the PMD page was * dropped which is part of the way map * counting is done for shared PMDs. * Return 'true' here. When there is * no other sharing, huge_pmd_unshare * returns false and we will unmap the * actual page and drop map count * to zero. */ page_vma_mapped_walk_done(&pvmw); break; } hugetlb_vma_unlock_write(vma); } /* Nuke the hugetlb page table entry */ pteval = huge_ptep_clear_flush(vma, address, pvmw.pte); } else { flush_cache_page(vma, address, pfn); /* Nuke the page table entry. */ if (should_defer_flush(mm, flags)) { /* * We clear the PTE but do not flush so potentially * a remote CPU could still be writing to the folio. * If the entry was previously clean then the * architecture must guarantee that a clear->dirty * transition on a cached TLB entry is written through * and traps if the PTE is unmapped. */ pteval = ptep_get_and_clear(mm, address, pvmw.pte); set_tlb_ubc_flush_pending(mm, pteval, address); } else { pteval = ptep_clear_flush(vma, address, pvmw.pte); } } /* Set the dirty flag on the folio now the pte is gone. */ if (pte_dirty(pteval)) folio_mark_dirty(folio); /* Update high watermark before we lower rss */ update_hiwater_rss(mm); if (folio_is_device_private(folio)) { unsigned long pfn = folio_pfn(folio); swp_entry_t entry; pte_t swp_pte; if (anon_exclusive) WARN_ON_ONCE(folio_try_share_anon_rmap_pte(folio, subpage)); /* * Store the pfn of the page in a special migration * pte. do_swap_page() will wait until the migration * pte is removed and then restart fault handling. */ entry = pte_to_swp_entry(pteval); if (is_writable_device_private_entry(entry)) entry = make_writable_migration_entry(pfn); else if (anon_exclusive) entry = make_readable_exclusive_migration_entry(pfn); else entry = make_readable_migration_entry(pfn); swp_pte = swp_entry_to_pte(entry); /* * pteval maps a zone device page and is therefore * a swap pte. */ if (pte_swp_soft_dirty(pteval)) swp_pte = pte_swp_mksoft_dirty(swp_pte); if (pte_swp_uffd_wp(pteval)) swp_pte = pte_swp_mkuffd_wp(swp_pte); set_pte_at(mm, pvmw.address, pvmw.pte, swp_pte); trace_set_migration_pte(pvmw.address, pte_val(swp_pte), compound_order(&folio->page)); /* * No need to invalidate here it will synchronize on * against the special swap migration pte. */ } else if (PageHWPoison(subpage)) { pteval = swp_entry_to_pte(make_hwpoison_entry(subpage)); if (folio_test_hugetlb(folio)) { hugetlb_count_sub(folio_nr_pages(folio), mm); set_huge_pte_at(mm, address, pvmw.pte, pteval, hsz); } else { dec_mm_counter(mm, mm_counter(&folio->page)); set_pte_at(mm, address, pvmw.pte, pteval); } } else if (pte_unused(pteval) && !userfaultfd_armed(vma)) { /* * The guest indicated that the page content is of no * interest anymore. Simply discard the pte, vmscan * will take care of the rest. * A future reference will then fault in a new zero * page. When userfaultfd is active, we must not drop * this page though, as its main user (postcopy * migration) will not expect userfaults on already * copied pages. */ dec_mm_counter(mm, mm_counter(&folio->page)); } else { swp_entry_t entry; pte_t swp_pte; if (arch_unmap_one(mm, vma, address, pteval) < 0) { if (folio_test_hugetlb(folio)) set_huge_pte_at(mm, address, pvmw.pte, pteval, hsz); else set_pte_at(mm, address, pvmw.pte, pteval); ret = false; page_vma_mapped_walk_done(&pvmw); break; } VM_BUG_ON_PAGE(pte_write(pteval) && folio_test_anon(folio) && !anon_exclusive, subpage); /* See folio_try_share_anon_rmap_pte(): clear PTE first. */ if (folio_test_hugetlb(folio)) { if (anon_exclusive && hugetlb_try_share_anon_rmap(folio)) { set_huge_pte_at(mm, address, pvmw.pte, pteval, hsz); ret = false; page_vma_mapped_walk_done(&pvmw); break; } } else if (anon_exclusive && folio_try_share_anon_rmap_pte(folio, subpage)) { set_pte_at(mm, address, pvmw.pte, pteval); ret = false; page_vma_mapped_walk_done(&pvmw); break; } /* * Store the pfn of the page in a special migration * pte. do_swap_page() will wait until the migration * pte is removed and then restart fault handling. */ if (pte_write(pteval)) entry = make_writable_migration_entry( page_to_pfn(subpage)); else if (anon_exclusive) entry = make_readable_exclusive_migration_entry( page_to_pfn(subpage)); else entry = make_readable_migration_entry( page_to_pfn(subpage)); if (pte_young(pteval)) entry = make_migration_entry_young(entry); if (pte_dirty(pteval)) entry = make_migration_entry_dirty(entry); swp_pte = swp_entry_to_pte(entry); if (pte_soft_dirty(pteval)) swp_pte = pte_swp_mksoft_dirty(swp_pte); if (pte_uffd_wp(pteval)) swp_pte = pte_swp_mkuffd_wp(swp_pte); if (folio_test_hugetlb(folio)) set_huge_pte_at(mm, address, pvmw.pte, swp_pte, hsz); else set_pte_at(mm, address, pvmw.pte, swp_pte); trace_set_migration_pte(address, pte_val(swp_pte), compound_order(&folio->page)); /* * No need to invalidate here it will synchronize on * against the special swap migration pte. */ } if (unlikely(folio_test_hugetlb(folio))) hugetlb_remove_rmap(folio); else folio_remove_rmap_pte(folio, subpage, vma); if (vma->vm_flags & VM_LOCKED) mlock_drain_local(); folio_put(folio); } mmu_notifier_invalidate_range_end(&range); return ret; } /** * try_to_migrate - try to replace all page table mappings with swap entries * @folio: the folio to replace page table entries for * @flags: action and flags * * Tries to remove all the page table entries which are mapping this folio and * replace them with special swap entries. Caller must hold the folio lock. */ void try_to_migrate(struct folio *folio, enum ttu_flags flags) { struct rmap_walk_control rwc = { .rmap_one = try_to_migrate_one, .arg = (void *)flags, .done = folio_not_mapped, .anon_lock = folio_lock_anon_vma_read, }; /* * Migration always ignores mlock and only supports TTU_RMAP_LOCKED and * TTU_SPLIT_HUGE_PMD, TTU_SYNC, and TTU_BATCH_FLUSH flags. */ if (WARN_ON_ONCE(flags & ~(TTU_RMAP_LOCKED | TTU_SPLIT_HUGE_PMD | TTU_SYNC | TTU_BATCH_FLUSH))) return; if (folio_is_zone_device(folio) && (!folio_is_device_private(folio) && !folio_is_device_coherent(folio))) return; /* * During exec, a temporary VMA is setup and later moved. * The VMA is moved under the anon_vma lock but not the * page tables leading to a race where migration cannot * find the migration ptes. Rather than increasing the * locking requirements of exec(), migration skips * temporary VMAs until after exec() completes. */ if (!folio_test_ksm(folio) && folio_test_anon(folio)) rwc.invalid_vma = invalid_migration_vma; if (flags & TTU_RMAP_LOCKED) rmap_walk_locked(folio, &rwc); else rmap_walk(folio, &rwc); } #ifdef CONFIG_DEVICE_PRIVATE struct make_exclusive_args { struct mm_struct *mm; unsigned long address; void *owner; bool valid; }; static bool page_make_device_exclusive_one(struct folio *folio, struct vm_area_struct *vma, unsigned long address, void *priv) { struct mm_struct *mm = vma->vm_mm; DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0); struct make_exclusive_args *args = priv; pte_t pteval; struct page *subpage; bool ret = true; struct mmu_notifier_range range; swp_entry_t entry; pte_t swp_pte; pte_t ptent; mmu_notifier_range_init_owner(&range, MMU_NOTIFY_EXCLUSIVE, 0, vma->vm_mm, address, min(vma->vm_end, address + folio_size(folio)), args->owner); mmu_notifier_invalidate_range_start(&range); while (page_vma_mapped_walk(&pvmw)) { /* Unexpected PMD-mapped THP? */ VM_BUG_ON_FOLIO(!pvmw.pte, folio); ptent = ptep_get(pvmw.pte); if (!pte_present(ptent)) { ret = false; page_vma_mapped_walk_done(&pvmw); break; } subpage = folio_page(folio, pte_pfn(ptent) - folio_pfn(folio)); address = pvmw.address; /* Nuke the page table entry. */ flush_cache_page(vma, address, pte_pfn(ptent)); pteval = ptep_clear_flush(vma, address, pvmw.pte); /* Set the dirty flag on the folio now the pte is gone. */ if (pte_dirty(pteval)) folio_mark_dirty(folio); /* * Check that our target page is still mapped at the expected * address. */ if (args->mm == mm && args->address == address && pte_write(pteval)) args->valid = true; /* * Store the pfn of the page in a special migration * pte. do_swap_page() will wait until the migration * pte is removed and then restart fault handling. */ if (pte_write(pteval)) entry = make_writable_device_exclusive_entry( page_to_pfn(subpage)); else entry = make_readable_device_exclusive_entry( page_to_pfn(subpage)); swp_pte = swp_entry_to_pte(entry); if (pte_soft_dirty(pteval)) swp_pte = pte_swp_mksoft_dirty(swp_pte); if (pte_uffd_wp(pteval)) swp_pte = pte_swp_mkuffd_wp(swp_pte); set_pte_at(mm, address, pvmw.pte, swp_pte); /* * There is a reference on the page for the swap entry which has * been removed, so shouldn't take another. */ folio_remove_rmap_pte(folio, subpage, vma); } mmu_notifier_invalidate_range_end(&range); return ret; } /** * folio_make_device_exclusive - Mark the folio exclusively owned by a device. * @folio: The folio to replace page table entries for. * @mm: The mm_struct where the folio is expected to be mapped. * @address: Address where the folio is expected to be mapped. * @owner: passed to MMU_NOTIFY_EXCLUSIVE range notifier callbacks * * Tries to remove all the page table entries which are mapping this * folio and replace them with special device exclusive swap entries to * grant a device exclusive access to the folio. * * Context: Caller must hold the folio lock. * Return: false if the page is still mapped, or if it could not be unmapped * from the expected address. Otherwise returns true (success). */ static bool folio_make_device_exclusive(struct folio *folio, struct mm_struct *mm, unsigned long address, void *owner) { struct make_exclusive_args args = { .mm = mm, .address = address, .owner = owner, .valid = false, }; struct rmap_walk_control rwc = { .rmap_one = page_make_device_exclusive_one, .done = folio_not_mapped, .anon_lock = folio_lock_anon_vma_read, .arg = &args, }; /* * Restrict to anonymous folios for now to avoid potential writeback * issues. */ if (!folio_test_anon(folio)) return false; rmap_walk(folio, &rwc); return args.valid && !folio_mapcount(folio); } /** * make_device_exclusive_range() - Mark a range for exclusive use by a device * @mm: mm_struct of associated target process * @start: start of the region to mark for exclusive device access * @end: end address of region * @pages: returns the pages which were successfully marked for exclusive access * @owner: passed to MMU_NOTIFY_EXCLUSIVE range notifier to allow filtering * * Returns: number of pages found in the range by GUP. A page is marked for * exclusive access only if the page pointer is non-NULL. * * This function finds ptes mapping page(s) to the given address range, locks * them and replaces mappings with special swap entries preventing userspace CPU * access. On fault these entries are replaced with the original mapping after * calling MMU notifiers. * * A driver using this to program access from a device must use a mmu notifier * critical section to hold a device specific lock during programming. Once * programming is complete it should drop the page lock and reference after * which point CPU access to the page will revoke the exclusive access. */ int make_device_exclusive_range(struct mm_struct *mm, unsigned long start, unsigned long end, struct page **pages, void *owner) { long npages = (end - start) >> PAGE_SHIFT; long i; npages = get_user_pages_remote(mm, start, npages, FOLL_GET | FOLL_WRITE | FOLL_SPLIT_PMD, pages, NULL); if (npages < 0) return npages; for (i = 0; i < npages; i++, start += PAGE_SIZE) { struct folio *folio = page_folio(pages[i]); if (PageTail(pages[i]) || !folio_trylock(folio)) { folio_put(folio); pages[i] = NULL; continue; } if (!folio_make_device_exclusive(folio, mm, start, owner)) { folio_unlock(folio); folio_put(folio); pages[i] = NULL; } } return npages; } EXPORT_SYMBOL_GPL(make_device_exclusive_range); #endif void __put_anon_vma(struct anon_vma *anon_vma) { struct anon_vma *root = anon_vma->root; anon_vma_free(anon_vma); if (root != anon_vma && atomic_dec_and_test(&root->refcount)) anon_vma_free(root); } static struct anon_vma *rmap_walk_anon_lock(struct folio *folio, struct rmap_walk_control *rwc) { struct anon_vma *anon_vma; if (rwc->anon_lock) return rwc->anon_lock(folio, rwc); /* * Note: remove_migration_ptes() cannot use folio_lock_anon_vma_read() * because that depends on page_mapped(); but not all its usages * are holding mmap_lock. Users without mmap_lock are required to * take a reference count to prevent the anon_vma disappearing */ anon_vma = folio_anon_vma(folio); if (!anon_vma) return NULL; if (anon_vma_trylock_read(anon_vma)) goto out; if (rwc->try_lock) { anon_vma = NULL; rwc->contended = true; goto out; } anon_vma_lock_read(anon_vma); out: return anon_vma; } /* * rmap_walk_anon - do something to anonymous page using the object-based * rmap method * @folio: the folio to be handled * @rwc: control variable according to each walk type * @locked: caller holds relevant rmap lock * * Find all the mappings of a folio using the mapping pointer and the vma * chains contained in the anon_vma struct it points to. */ static void rmap_walk_anon(struct folio *folio, struct rmap_walk_control *rwc, bool locked) { struct anon_vma *anon_vma; pgoff_t pgoff_start, pgoff_end; struct anon_vma_chain *avc; if (locked) { anon_vma = folio_anon_vma(folio); /* anon_vma disappear under us? */ VM_BUG_ON_FOLIO(!anon_vma, folio); } else { anon_vma = rmap_walk_anon_lock(folio, rwc); } if (!anon_vma) return; pgoff_start = folio_pgoff(folio); pgoff_end = pgoff_start + folio_nr_pages(folio) - 1; anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root, pgoff_start, pgoff_end) { struct vm_area_struct *vma = avc->vma; unsigned long address = vma_address(&folio->page, vma); VM_BUG_ON_VMA(address == -EFAULT, vma); cond_resched(); if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg)) continue; if (!rwc->rmap_one(folio, vma, address, rwc->arg)) break; if (rwc->done && rwc->done(folio)) break; } if (!locked) anon_vma_unlock_read(anon_vma); } /* * rmap_walk_file - do something to file page using the object-based rmap method * @folio: the folio to be handled * @rwc: control variable according to each walk type * @locked: caller holds relevant rmap lock * * Find all the mappings of a folio using the mapping pointer and the vma chains * contained in the address_space struct it points to. */ static void rmap_walk_file(struct folio *folio, struct rmap_walk_control *rwc, bool locked) { struct address_space *mapping = folio_mapping(folio); pgoff_t pgoff_start, pgoff_end; struct vm_area_struct *vma; /* * The page lock not only makes sure that page->mapping cannot * suddenly be NULLified by truncation, it makes sure that the * structure at mapping cannot be freed and reused yet, * so we can safely take mapping->i_mmap_rwsem. */ VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); if (!mapping) return; pgoff_start = folio_pgoff(folio); pgoff_end = pgoff_start + folio_nr_pages(folio) - 1; if (!locked) { if (i_mmap_trylock_read(mapping)) goto lookup; if (rwc->try_lock) { rwc->contended = true; return; } i_mmap_lock_read(mapping); } lookup: vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff_start, pgoff_end) { unsigned long address = vma_address(&folio->page, vma); VM_BUG_ON_VMA(address == -EFAULT, vma); cond_resched(); if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg)) continue; if (!rwc->rmap_one(folio, vma, address, rwc->arg)) goto done; if (rwc->done && rwc->done(folio)) goto done; } done: if (!locked) i_mmap_unlock_read(mapping); } void rmap_walk(struct folio *folio, struct rmap_walk_control *rwc) { if (unlikely(folio_test_ksm(folio))) rmap_walk_ksm(folio, rwc); else if (folio_test_anon(folio)) rmap_walk_anon(folio, rwc, false); else rmap_walk_file(folio, rwc, false); } /* Like rmap_walk, but caller holds relevant rmap lock */ void rmap_walk_locked(struct folio *folio, struct rmap_walk_control *rwc) { /* no ksm support for now */ VM_BUG_ON_FOLIO(folio_test_ksm(folio), folio); if (folio_test_anon(folio)) rmap_walk_anon(folio, rwc, true); else rmap_walk_file(folio, rwc, true); } #ifdef CONFIG_HUGETLB_PAGE /* * The following two functions are for anonymous (private mapped) hugepages. * Unlike common anonymous pages, anonymous hugepages have no accounting code * and no lru code, because we handle hugepages differently from common pages. */ void hugetlb_add_anon_rmap(struct folio *folio, struct vm_area_struct *vma, unsigned long address, rmap_t flags) { VM_WARN_ON_FOLIO(!folio_test_hugetlb(folio), folio); VM_WARN_ON_FOLIO(!folio_test_anon(folio), folio); atomic_inc(&folio->_entire_mapcount); if (flags & RMAP_EXCLUSIVE) SetPageAnonExclusive(&folio->page); VM_WARN_ON_FOLIO(folio_entire_mapcount(folio) > 1 && PageAnonExclusive(&folio->page), folio); } void hugetlb_add_new_anon_rmap(struct folio *folio, struct vm_area_struct *vma, unsigned long address) { VM_WARN_ON_FOLIO(!folio_test_hugetlb(folio), folio); BUG_ON(address < vma->vm_start || address >= vma->vm_end); /* increment count (starts at -1) */ atomic_set(&folio->_entire_mapcount, 0); folio_clear_hugetlb_restore_reserve(folio); __folio_set_anon(folio, vma, address, true); SetPageAnonExclusive(&folio->page); } #endif /* CONFIG_HUGETLB_PAGE */ |
136 135 136 4 4 1 326 4 2 309 3 7 2 197 4 121 5 230 6 2 78 84 308 2 14 8 4 2 16 285 285 85 61 179 218 7 21 7 200 1 224 1 1 1 14 14 14 14 13 1 13 14 14 14 14 14 14 23 23 33 1 32 1 6 27 27 3 3 2 7 2 3 4 4 3 4 8 8 16 16 16 16 19 19 20 20 20 19 20 20 20 20 20 14 6 12 9 9 18 3 20 20 9 2 4 25 25 1 17 22 3 19 19 7 6 1 12 9 10 7 7 7 7 12 19 2 2 2 24 17 7 18 7 11 10 1 236 229 7 134 134 134 125 58 57 196 24 24 170 7 187 9 2 1 7 5 1 2 4 6 6 1 6 46 46 54 56 55 55 54 51 50 34 32 4 4 3 3 1 31 2 1 5 26 25 1 3 18 33 8 68 4 65 64 3 61 58 6 270 270 187 4 4 1 5 5 1 4 191 179 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 | // SPDX-License-Identifier: GPL-2.0-or-later /* * TCP over IPv6 * Linux INET6 implementation * * Authors: * Pedro Roque <roque@di.fc.ul.pt> * * Based on: * linux/net/ipv4/tcp.c * linux/net/ipv4/tcp_input.c * linux/net/ipv4/tcp_output.c * * Fixes: * Hideaki YOSHIFUJI : sin6_scope_id support * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind * a single port at the same time. * YOSHIFUJI Hideaki @USAGI: convert /proc/net/tcp6 to seq_file. */ #include <linux/bottom_half.h> #include <linux/module.h> #include <linux/errno.h> #include <linux/types.h> #include <linux/socket.h> #include <linux/sockios.h> #include <linux/net.h> #include <linux/jiffies.h> #include <linux/in.h> #include <linux/in6.h> #include <linux/netdevice.h> #include <linux/init.h> #include <linux/jhash.h> #include <linux/ipsec.h> #include <linux/times.h> #include <linux/slab.h> #include <linux/uaccess.h> #include <linux/ipv6.h> #include <linux/icmpv6.h> #include <linux/random.h> #include <linux/indirect_call_wrapper.h> #include <net/tcp.h> #include <net/ndisc.h> #include <net/inet6_hashtables.h> #include <net/inet6_connection_sock.h> #include <net/ipv6.h> #include <net/transp_v6.h> #include <net/addrconf.h> #include <net/ip6_route.h> #include <net/ip6_checksum.h> #include <net/inet_ecn.h> #include <net/protocol.h> #include <net/xfrm.h> #include <net/snmp.h> #include <net/dsfield.h> #include <net/timewait_sock.h> #include <net/inet_common.h> #include <net/secure_seq.h> #include <net/busy_poll.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <crypto/hash.h> #include <linux/scatterlist.h> #include <trace/events/tcp.h> static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb); static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb, struct request_sock *req); INDIRECT_CALLABLE_SCOPE int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb); static const struct inet_connection_sock_af_ops ipv6_mapped; const struct inet_connection_sock_af_ops ipv6_specific; #if defined(CONFIG_TCP_MD5SIG) || defined(CONFIG_TCP_AO) static const struct tcp_sock_af_ops tcp_sock_ipv6_specific; static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific; #endif /* Helper returning the inet6 address from a given tcp socket. * It can be used in TCP stack instead of inet6_sk(sk). * This avoids a dereference and allow compiler optimizations. * It is a specialized version of inet6_sk_generic(). */ #define tcp_inet6_sk(sk) (&container_of_const(tcp_sk(sk), \ struct tcp6_sock, tcp)->inet6) static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb) { struct dst_entry *dst = skb_dst(skb); if (dst && dst_hold_safe(dst)) { const struct rt6_info *rt = (const struct rt6_info *)dst; rcu_assign_pointer(sk->sk_rx_dst, dst); sk->sk_rx_dst_ifindex = skb->skb_iif; sk->sk_rx_dst_cookie = rt6_get_cookie(rt); } } static u32 tcp_v6_init_seq(const struct sk_buff *skb) { return secure_tcpv6_seq(ipv6_hdr(skb)->daddr.s6_addr32, ipv6_hdr(skb)->saddr.s6_addr32, tcp_hdr(skb)->dest, tcp_hdr(skb)->source); } static u32 tcp_v6_init_ts_off(const struct net *net, const struct sk_buff *skb) { return secure_tcpv6_ts_off(net, ipv6_hdr(skb)->daddr.s6_addr32, ipv6_hdr(skb)->saddr.s6_addr32); } static int tcp_v6_pre_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) { /* This check is replicated from tcp_v6_connect() and intended to * prevent BPF program called below from accessing bytes that are out * of the bound specified by user in addr_len. */ if (addr_len < SIN6_LEN_RFC2133) return -EINVAL; sock_owned_by_me(sk); return BPF_CGROUP_RUN_PROG_INET6_CONNECT(sk, uaddr, &addr_len); } static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) { struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr; struct inet_connection_sock *icsk = inet_csk(sk); struct in6_addr *saddr = NULL, *final_p, final; struct inet_timewait_death_row *tcp_death_row; struct ipv6_pinfo *np = tcp_inet6_sk(sk); struct inet_sock *inet = inet_sk(sk); struct tcp_sock *tp = tcp_sk(sk); struct net *net = sock_net(sk); struct ipv6_txoptions *opt; struct dst_entry *dst; struct flowi6 fl6; int addr_type; int err; if (addr_len < SIN6_LEN_RFC2133) return -EINVAL; if (usin->sin6_family != AF_INET6) return -EAFNOSUPPORT; memset(&fl6, 0, sizeof(fl6)); if (inet6_test_bit(SNDFLOW, sk)) { fl6.flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK; IP6_ECN_flow_init(fl6.flowlabel); if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) { struct ip6_flowlabel *flowlabel; flowlabel = fl6_sock_lookup(sk, fl6.flowlabel); if (IS_ERR(flowlabel)) return -EINVAL; fl6_sock_release(flowlabel); } } /* * connect() to INADDR_ANY means loopback (BSD'ism). */ if (ipv6_addr_any(&usin->sin6_addr)) { if (ipv6_addr_v4mapped(&sk->sk_v6_rcv_saddr)) ipv6_addr_set_v4mapped(htonl(INADDR_LOOPBACK), &usin->sin6_addr); else usin->sin6_addr = in6addr_loopback; } addr_type = ipv6_addr_type(&usin->sin6_addr); if (addr_type & IPV6_ADDR_MULTICAST) return -ENETUNREACH; if (addr_type&IPV6_ADDR_LINKLOCAL) { if (addr_len >= sizeof(struct sockaddr_in6) && usin->sin6_scope_id) { /* If interface is set while binding, indices * must coincide. */ if (!sk_dev_equal_l3scope(sk, usin->sin6_scope_id)) return -EINVAL; sk->sk_bound_dev_if = usin->sin6_scope_id; } /* Connect to link-local address requires an interface */ if (!sk->sk_bound_dev_if) return -EINVAL; } if (tp->rx_opt.ts_recent_stamp && !ipv6_addr_equal(&sk->sk_v6_daddr, &usin->sin6_addr)) { tp->rx_opt.ts_recent = 0; tp->rx_opt.ts_recent_stamp = 0; WRITE_ONCE(tp->write_seq, 0); } sk->sk_v6_daddr = usin->sin6_addr; np->flow_label = fl6.flowlabel; /* * TCP over IPv4 */ if (addr_type & IPV6_ADDR_MAPPED) { u32 exthdrlen = icsk->icsk_ext_hdr_len; struct sockaddr_in sin; if (ipv6_only_sock(sk)) return -ENETUNREACH; sin.sin_family = AF_INET; sin.sin_port = usin->sin6_port; sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3]; /* Paired with READ_ONCE() in tcp_(get|set)sockopt() */ WRITE_ONCE(icsk->icsk_af_ops, &ipv6_mapped); if (sk_is_mptcp(sk)) mptcpv6_handle_mapped(sk, true); sk->sk_backlog_rcv = tcp_v4_do_rcv; #if defined(CONFIG_TCP_MD5SIG) || defined(CONFIG_TCP_AO) tp->af_specific = &tcp_sock_ipv6_mapped_specific; #endif err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin)); if (err) { icsk->icsk_ext_hdr_len = exthdrlen; /* Paired with READ_ONCE() in tcp_(get|set)sockopt() */ WRITE_ONCE(icsk->icsk_af_ops, &ipv6_specific); if (sk_is_mptcp(sk)) mptcpv6_handle_mapped(sk, false); sk->sk_backlog_rcv = tcp_v6_do_rcv; #if defined(CONFIG_TCP_MD5SIG) || defined(CONFIG_TCP_AO) tp->af_specific = &tcp_sock_ipv6_specific; #endif goto failure; } np->saddr = sk->sk_v6_rcv_saddr; return err; } if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr)) saddr = &sk->sk_v6_rcv_saddr; fl6.flowi6_proto = IPPROTO_TCP; fl6.daddr = sk->sk_v6_daddr; fl6.saddr = saddr ? *saddr : np->saddr; fl6.flowlabel = ip6_make_flowinfo(np->tclass, np->flow_label); fl6.flowi6_oif = sk->sk_bound_dev_if; fl6.flowi6_mark = sk->sk_mark; fl6.fl6_dport = usin->sin6_port; fl6.fl6_sport = inet->inet_sport; fl6.flowi6_uid = sk->sk_uid; opt = rcu_dereference_protected(np->opt, lockdep_sock_is_held(sk)); final_p = fl6_update_dst(&fl6, opt, &final); security_sk_classify_flow(sk, flowi6_to_flowi_common(&fl6)); dst = ip6_dst_lookup_flow(net, sk, &fl6, final_p); if (IS_ERR(dst)) { err = PTR_ERR(dst); goto failure; } tp->tcp_usec_ts = dst_tcp_usec_ts(dst); tcp_death_row = &sock_net(sk)->ipv4.tcp_death_row; if (!saddr) { saddr = &fl6.saddr; err = inet_bhash2_update_saddr(sk, saddr, AF_INET6); if (err) goto failure; } /* set the source address */ np->saddr = *saddr; inet->inet_rcv_saddr = LOOPBACK4_IPV6; sk->sk_gso_type = SKB_GSO_TCPV6; ip6_dst_store(sk, dst, NULL, NULL); icsk->icsk_ext_hdr_len = 0; if (opt) icsk->icsk_ext_hdr_len = opt->opt_flen + opt->opt_nflen; tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr); inet->inet_dport = usin->sin6_port; tcp_set_state(sk, TCP_SYN_SENT); err = inet6_hash_connect(tcp_death_row, sk); if (err) goto late_failure; sk_set_txhash(sk); if (likely(!tp->repair)) { if (!tp->write_seq) WRITE_ONCE(tp->write_seq, secure_tcpv6_seq(np->saddr.s6_addr32, sk->sk_v6_daddr.s6_addr32, inet->inet_sport, inet->inet_dport)); tp->tsoffset = secure_tcpv6_ts_off(net, np->saddr.s6_addr32, sk->sk_v6_daddr.s6_addr32); } if (tcp_fastopen_defer_connect(sk, &err)) return err; if (err) goto late_failure; err = tcp_connect(sk); if (err) goto late_failure; return 0; late_failure: tcp_set_state(sk, TCP_CLOSE); inet_bhash2_reset_saddr(sk); failure: inet->inet_dport = 0; sk->sk_route_caps = 0; return err; } static void tcp_v6_mtu_reduced(struct sock *sk) { struct dst_entry *dst; u32 mtu; if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE)) return; mtu = READ_ONCE(tcp_sk(sk)->mtu_info); /* Drop requests trying to increase our current mss. * Check done in __ip6_rt_update_pmtu() is too late. */ if (tcp_mtu_to_mss(sk, mtu) >= tcp_sk(sk)->mss_cache) return; dst = inet6_csk_update_pmtu(sk, mtu); if (!dst) return; if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) { tcp_sync_mss(sk, dst_mtu(dst)); tcp_simple_retransmit(sk); } } static int tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, u8 type, u8 code, int offset, __be32 info) { const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data; const struct tcphdr *th = (struct tcphdr *)(skb->data+offset); struct net *net = dev_net(skb->dev); struct request_sock *fastopen; struct ipv6_pinfo *np; struct tcp_sock *tp; __u32 seq, snd_una; struct sock *sk; bool fatal; int err; sk = __inet6_lookup_established(net, net->ipv4.tcp_death_row.hashinfo, &hdr->daddr, th->dest, &hdr->saddr, ntohs(th->source), skb->dev->ifindex, inet6_sdif(skb)); if (!sk) { __ICMP6_INC_STATS(net, __in6_dev_get(skb->dev), ICMP6_MIB_INERRORS); return -ENOENT; } if (sk->sk_state == TCP_TIME_WAIT) { /* To increase the counter of ignored icmps for TCP-AO */ tcp_ao_ignore_icmp(sk, AF_INET6, type, code); inet_twsk_put(inet_twsk(sk)); return 0; } seq = ntohl(th->seq); fatal = icmpv6_err_convert(type, code, &err); if (sk->sk_state == TCP_NEW_SYN_RECV) { tcp_req_err(sk, seq, fatal); return 0; } if (tcp_ao_ignore_icmp(sk, AF_INET6, type, code)) { sock_put(sk); return 0; } bh_lock_sock(sk); if (sock_owned_by_user(sk) && type != ICMPV6_PKT_TOOBIG) __NET_INC_STATS(net, LINUX_MIB_LOCKDROPPEDICMPS); if (sk->sk_state == TCP_CLOSE) goto out; if (static_branch_unlikely(&ip6_min_hopcount)) { /* min_hopcount can be changed concurrently from do_ipv6_setsockopt() */ if (ipv6_hdr(skb)->hop_limit < READ_ONCE(tcp_inet6_sk(sk)->min_hopcount)) { __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP); goto out; } } tp = tcp_sk(sk); /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */ fastopen = rcu_dereference(tp->fastopen_rsk); snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una; if (sk->sk_state != TCP_LISTEN && !between(seq, snd_una, tp->snd_nxt)) { __NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS); goto out; } np = tcp_inet6_sk(sk); if (type == NDISC_REDIRECT) { if (!sock_owned_by_user(sk)) { struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie); if (dst) dst->ops->redirect(dst, sk, skb); } goto out; } if (type == ICMPV6_PKT_TOOBIG) { u32 mtu = ntohl(info); /* We are not interested in TCP_LISTEN and open_requests * (SYN-ACKs send out by Linux are always <576bytes so * they should go through unfragmented). */ if (sk->sk_state == TCP_LISTEN) goto out; if (!ip6_sk_accept_pmtu(sk)) goto out; if (mtu < IPV6_MIN_MTU) goto out; WRITE_ONCE(tp->mtu_info, mtu); if (!sock_owned_by_user(sk)) tcp_v6_mtu_reduced(sk); else if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED, &sk->sk_tsq_flags)) sock_hold(sk); goto out; } /* Might be for an request_sock */ switch (sk->sk_state) { case TCP_SYN_SENT: case TCP_SYN_RECV: /* Only in fast or simultaneous open. If a fast open socket is * already accepted it is treated as a connected one below. */ if (fastopen && !fastopen->sk) break; ipv6_icmp_error(sk, skb, err, th->dest, ntohl(info), (u8 *)th); if (!sock_owned_by_user(sk)) { WRITE_ONCE(sk->sk_err, err); sk_error_report(sk); /* Wake people up to see the error (see connect in sock.c) */ tcp_done(sk); } else { WRITE_ONCE(sk->sk_err_soft, err); } goto out; case TCP_LISTEN: break; default: /* check if this ICMP message allows revert of backoff. * (see RFC 6069) */ if (!fastopen && type == ICMPV6_DEST_UNREACH && code == ICMPV6_NOROUTE) tcp_ld_RTO_revert(sk, seq); } if (!sock_owned_by_user(sk) && inet6_test_bit(RECVERR6, sk)) { WRITE_ONCE(sk->sk_err, err); sk_error_report(sk); } else { WRITE_ONCE(sk->sk_err_soft, err); } out: bh_unlock_sock(sk); sock_put(sk); return 0; } static int tcp_v6_send_synack(const struct sock *sk, struct dst_entry *dst, struct flowi *fl, struct request_sock *req, struct tcp_fastopen_cookie *foc, enum tcp_synack_type synack_type, struct sk_buff *syn_skb) { struct inet_request_sock *ireq = inet_rsk(req); const struct ipv6_pinfo *np = tcp_inet6_sk(sk); struct ipv6_txoptions *opt; struct flowi6 *fl6 = &fl->u.ip6; struct sk_buff *skb; int err = -ENOMEM; u8 tclass; /* First, grab a route. */ if (!dst && (dst = inet6_csk_route_req(sk, fl6, req, IPPROTO_TCP)) == NULL) goto done; skb = tcp_make_synack(sk, dst, req, foc, synack_type, syn_skb); if (skb) { __tcp_v6_send_check(skb, &ireq->ir_v6_loc_addr, &ireq->ir_v6_rmt_addr); fl6->daddr = ireq->ir_v6_rmt_addr; if (inet6_test_bit(REPFLOW, sk) && ireq->pktopts) fl6->flowlabel = ip6_flowlabel(ipv6_hdr(ireq->pktopts)); tclass = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_reflect_tos) ? (tcp_rsk(req)->syn_tos & ~INET_ECN_MASK) | (np->tclass & INET_ECN_MASK) : np->tclass; if (!INET_ECN_is_capable(tclass) && tcp_bpf_ca_needs_ecn((struct sock *)req)) tclass |= INET_ECN_ECT_0; rcu_read_lock(); opt = ireq->ipv6_opt; if (!opt) opt = rcu_dereference(np->opt); err = ip6_xmit(sk, skb, fl6, skb->mark ? : READ_ONCE(sk->sk_mark), opt, tclass, READ_ONCE(sk->sk_priority)); rcu_read_unlock(); err = net_xmit_eval(err); } done: return err; } static void tcp_v6_reqsk_destructor(struct request_sock *req) { kfree(inet_rsk(req)->ipv6_opt); consume_skb(inet_rsk(req)->pktopts); } #ifdef CONFIG_TCP_MD5SIG static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(const struct sock *sk, const struct in6_addr *addr, int l3index) { return tcp_md5_do_lookup(sk, l3index, (union tcp_md5_addr *)addr, AF_INET6); } static struct tcp_md5sig_key *tcp_v6_md5_lookup(const struct sock *sk, const struct sock *addr_sk) { int l3index; l3index = l3mdev_master_ifindex_by_index(sock_net(sk), addr_sk->sk_bound_dev_if); return tcp_v6_md5_do_lookup(sk, &addr_sk->sk_v6_daddr, l3index); } static int tcp_v6_parse_md5_keys(struct sock *sk, int optname, sockptr_t optval, int optlen) { struct tcp_md5sig cmd; struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr; union tcp_ao_addr *addr; int l3index = 0; u8 prefixlen; bool l3flag; u8 flags; if (optlen < sizeof(cmd)) return -EINVAL; if (copy_from_sockptr(&cmd, optval, sizeof(cmd))) return -EFAULT; if (sin6->sin6_family != AF_INET6) return -EINVAL; flags = cmd.tcpm_flags & TCP_MD5SIG_FLAG_IFINDEX; l3flag = cmd.tcpm_flags & TCP_MD5SIG_FLAG_IFINDEX; if (optname == TCP_MD5SIG_EXT && cmd.tcpm_flags & TCP_MD5SIG_FLAG_PREFIX) { prefixlen = cmd.tcpm_prefixlen; if (prefixlen > 128 || (ipv6_addr_v4mapped(&sin6->sin6_addr) && prefixlen > 32)) return -EINVAL; } else { prefixlen = ipv6_addr_v4mapped(&sin6->sin6_addr) ? 32 : 128; } if (optname == TCP_MD5SIG_EXT && cmd.tcpm_ifindex && cmd.tcpm_flags & TCP_MD5SIG_FLAG_IFINDEX) { struct net_device *dev; rcu_read_lock(); dev = dev_get_by_index_rcu(sock_net(sk), cmd.tcpm_ifindex); if (dev && netif_is_l3_master(dev)) l3index = dev->ifindex; rcu_read_unlock(); /* ok to reference set/not set outside of rcu; * right now device MUST be an L3 master */ if (!dev || !l3index) return -EINVAL; } if (!cmd.tcpm_keylen) { if (ipv6_addr_v4mapped(&sin6->sin6_addr)) return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3], AF_INET, prefixlen, l3index, flags); return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr, AF_INET6, prefixlen, l3index, flags); } if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN) return -EINVAL; if (ipv6_addr_v4mapped(&sin6->sin6_addr)) { addr = (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3]; /* Don't allow keys for peers that have a matching TCP-AO key. * See the comment in tcp_ao_add_cmd() */ if (tcp_ao_required(sk, addr, AF_INET, l3flag ? l3index : -1, false)) return -EKEYREJECTED; return tcp_md5_do_add(sk, addr, AF_INET, prefixlen, l3index, flags, cmd.tcpm_key, cmd.tcpm_keylen); } addr = (union tcp_md5_addr *)&sin6->sin6_addr; /* Don't allow keys for peers that have a matching TCP-AO key. * See the comment in tcp_ao_add_cmd() */ if (tcp_ao_required(sk, addr, AF_INET6, l3flag ? l3index : -1, false)) return -EKEYREJECTED; return tcp_md5_do_add(sk, addr, AF_INET6, prefixlen, l3index, flags, cmd.tcpm_key, cmd.tcpm_keylen); } static int tcp_v6_md5_hash_headers(struct tcp_sigpool *hp, const struct in6_addr *daddr, const struct in6_addr *saddr, const struct tcphdr *th, int nbytes) { struct tcp6_pseudohdr *bp; struct scatterlist sg; struct tcphdr *_th; bp = hp->scratch; /* 1. TCP pseudo-header (RFC2460) */ bp->saddr = *saddr; bp->daddr = *daddr; bp->protocol = cpu_to_be32(IPPROTO_TCP); bp->len = cpu_to_be32(nbytes); _th = (struct tcphdr *)(bp + 1); memcpy(_th, th, sizeof(*th)); _th->check = 0; sg_init_one(&sg, bp, sizeof(*bp) + sizeof(*th)); ahash_request_set_crypt(hp->req, &sg, NULL, sizeof(*bp) + sizeof(*th)); return crypto_ahash_update(hp->req); } static int tcp_v6_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key, const struct in6_addr *daddr, struct in6_addr *saddr, const struct tcphdr *th) { struct tcp_sigpool hp; if (tcp_sigpool_start(tcp_md5_sigpool_id, &hp)) goto clear_hash_nostart; if (crypto_ahash_init(hp.req)) goto clear_hash; if (tcp_v6_md5_hash_headers(&hp, daddr, saddr, th, th->doff << 2)) goto clear_hash; if (tcp_md5_hash_key(&hp, key)) goto clear_hash; ahash_request_set_crypt(hp.req, NULL, md5_hash, 0); if (crypto_ahash_final(hp.req)) goto clear_hash; tcp_sigpool_end(&hp); return 0; clear_hash: tcp_sigpool_end(&hp); clear_hash_nostart: memset(md5_hash, 0, 16); return 1; } static int tcp_v6_md5_hash_skb(char *md5_hash, const struct tcp_md5sig_key *key, const struct sock *sk, const struct sk_buff *skb) { const struct tcphdr *th = tcp_hdr(skb); const struct in6_addr *saddr, *daddr; struct tcp_sigpool hp; if (sk) { /* valid for establish/request sockets */ saddr = &sk->sk_v6_rcv_saddr; daddr = &sk->sk_v6_daddr; } else { const struct ipv6hdr *ip6h = ipv6_hdr(skb); saddr = &ip6h->saddr; daddr = &ip6h->daddr; } if (tcp_sigpool_start(tcp_md5_sigpool_id, &hp)) goto clear_hash_nostart; if (crypto_ahash_init(hp.req)) goto clear_hash; if (tcp_v6_md5_hash_headers(&hp, daddr, saddr, th, skb->len)) goto clear_hash; if (tcp_sigpool_hash_skb_data(&hp, skb, th->doff << 2)) goto clear_hash; if (tcp_md5_hash_key(&hp, key)) goto clear_hash; ahash_request_set_crypt(hp.req, NULL, md5_hash, 0); if (crypto_ahash_final(hp.req)) goto clear_hash; tcp_sigpool_end(&hp); return 0; clear_hash: tcp_sigpool_end(&hp); clear_hash_nostart: memset(md5_hash, 0, 16); return 1; } #endif static void tcp_v6_init_req(struct request_sock *req, const struct sock *sk_listener, struct sk_buff *skb) { bool l3_slave = ipv6_l3mdev_skb(TCP_SKB_CB(skb)->header.h6.flags); struct inet_request_sock *ireq = inet_rsk(req); const struct ipv6_pinfo *np = tcp_inet6_sk(sk_listener); ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr; ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr; /* So that link locals have meaning */ if ((!sk_listener->sk_bound_dev_if || l3_slave) && ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL) ireq->ir_iif = tcp_v6_iif(skb); if (!TCP_SKB_CB(skb)->tcp_tw_isn && (ipv6_opt_accepted(sk_listener, skb, &TCP_SKB_CB(skb)->header.h6) || np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo || np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim || inet6_test_bit(REPFLOW, sk_listener))) { refcount_inc(&skb->users); ireq->pktopts = skb; } } static struct dst_entry *tcp_v6_route_req(const struct sock *sk, struct sk_buff *skb, struct flowi *fl, struct request_sock *req) { tcp_v6_init_req(req, sk, skb); if (security_inet_conn_request(sk, skb, req)) return NULL; return inet6_csk_route_req(sk, &fl->u.ip6, req, IPPROTO_TCP); } struct request_sock_ops tcp6_request_sock_ops __read_mostly = { .family = AF_INET6, .obj_size = sizeof(struct tcp6_request_sock), .rtx_syn_ack = tcp_rtx_synack, .send_ack = tcp_v6_reqsk_send_ack, .destructor = tcp_v6_reqsk_destructor, .send_reset = tcp_v6_send_reset, .syn_ack_timeout = tcp_syn_ack_timeout, }; const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = { .mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr), #ifdef CONFIG_TCP_MD5SIG .req_md5_lookup = tcp_v6_md5_lookup, .calc_md5_hash = tcp_v6_md5_hash_skb, #endif #ifdef CONFIG_TCP_AO .ao_lookup = tcp_v6_ao_lookup_rsk, .ao_calc_key = tcp_v6_ao_calc_key_rsk, .ao_synack_hash = tcp_v6_ao_synack_hash, #endif #ifdef CONFIG_SYN_COOKIES .cookie_init_seq = cookie_v6_init_sequence, #endif .route_req = tcp_v6_route_req, .init_seq = tcp_v6_init_seq, .init_ts_off = tcp_v6_init_ts_off, .send_synack = tcp_v6_send_synack, }; static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32 tsval, u32 tsecr, int oif, int rst, u8 tclass, __be32 label, u32 priority, u32 txhash, struct tcp_key *key) { const struct tcphdr *th = tcp_hdr(skb); struct tcphdr *t1; struct sk_buff *buff; struct flowi6 fl6; struct net *net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev); struct sock *ctl_sk = net->ipv6.tcp_sk; unsigned int tot_len = sizeof(struct tcphdr); __be32 mrst = 0, *topt; struct dst_entry *dst; __u32 mark = 0; if (tsecr) tot_len += TCPOLEN_TSTAMP_ALIGNED; if (tcp_key_is_md5(key)) tot_len += TCPOLEN_MD5SIG_ALIGNED; if (tcp_key_is_ao(key)) tot_len += tcp_ao_len_aligned(key->ao_key); #ifdef CONFIG_MPTCP if (rst && !tcp_key_is_md5(key)) { mrst = mptcp_reset_option(skb); if (mrst) tot_len += sizeof(__be32); } #endif buff = alloc_skb(MAX_TCP_HEADER, GFP_ATOMIC); if (!buff) return; skb_reserve(buff, MAX_TCP_HEADER); t1 = skb_push(buff, tot_len); skb_reset_transport_header(buff); /* Swap the send and the receive. */ memset(t1, 0, sizeof(*t1)); t1->dest = th->source; t1->source = th->dest; t1->doff = tot_len / 4; t1->seq = htonl(seq); t1->ack_seq = htonl(ack); t1->ack = !rst || !th->ack; t1->rst = rst; t1->window = htons(win); topt = (__be32 *)(t1 + 1); if (tsecr) { *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) | (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP); *topt++ = htonl(tsval); *topt++ = htonl(tsecr); } if (mrst) *topt++ = mrst; #ifdef CONFIG_TCP_MD5SIG if (tcp_key_is_md5(key)) { *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) | (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG); tcp_v6_md5_hash_hdr((__u8 *)topt, key->md5_key, &ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr, t1); } #endif #ifdef CONFIG_TCP_AO if (tcp_key_is_ao(key)) { *topt++ = htonl((TCPOPT_AO << 24) | (tcp_ao_len(key->ao_key) << 16) | (key->ao_key->sndid << 8) | (key->rcv_next)); tcp_ao_hash_hdr(AF_INET6, (char *)topt, key->ao_key, key->traffic_key, (union tcp_ao_addr *)&ipv6_hdr(skb)->saddr, (union tcp_ao_addr *)&ipv6_hdr(skb)->daddr, t1, key->sne); } #endif memset(&fl6, 0, sizeof(fl6)); fl6.daddr = ipv6_hdr(skb)->saddr; fl6.saddr = ipv6_hdr(skb)->daddr; fl6.flowlabel = label; buff->ip_summed = CHECKSUM_PARTIAL; __tcp_v6_send_check(buff, &fl6.saddr, &fl6.daddr); fl6.flowi6_proto = IPPROTO_TCP; if (rt6_need_strict(&fl6.daddr) && !oif) fl6.flowi6_oif = tcp_v6_iif(skb); else { if (!oif && netif_index_is_l3_master(net, skb->skb_iif)) oif = skb->skb_iif; fl6.flowi6_oif = oif; } if (sk) { if (sk->sk_state == TCP_TIME_WAIT) mark = inet_twsk(sk)->tw_mark; else mark = READ_ONCE(sk->sk_mark); skb_set_delivery_time(buff, tcp_transmit_time(sk), true); } if (txhash) { /* autoflowlabel/skb_get_hash_flowi6 rely on buff->hash */ skb_set_hash(buff, txhash, PKT_HASH_TYPE_L4); } fl6.flowi6_mark = IP6_REPLY_MARK(net, skb->mark) ?: mark; fl6.fl6_dport = t1->dest; fl6.fl6_sport = t1->source; fl6.flowi6_uid = sock_net_uid(net, sk && sk_fullsock(sk) ? sk : NULL); security_skb_classify_flow(skb, flowi6_to_flowi_common(&fl6)); /* Pass a socket to ip6_dst_lookup either it is for RST * Underlying function will use this to retrieve the network * namespace */ if (sk && sk->sk_state != TCP_TIME_WAIT) dst = ip6_dst_lookup_flow(net, sk, &fl6, NULL); /*sk's xfrm_policy can be referred*/ else dst = ip6_dst_lookup_flow(net, ctl_sk, &fl6, NULL); if (!IS_ERR(dst)) { skb_dst_set(buff, dst); ip6_xmit(ctl_sk, buff, &fl6, fl6.flowi6_mark, NULL, tclass & ~INET_ECN_MASK, priority); TCP_INC_STATS(net, TCP_MIB_OUTSEGS); if (rst) TCP_INC_STATS(net, TCP_MIB_OUTRSTS); return; } kfree_skb(buff); } static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb) { const struct tcphdr *th = tcp_hdr(skb); struct ipv6hdr *ipv6h = ipv6_hdr(skb); const __u8 *md5_hash_location = NULL; #if defined(CONFIG_TCP_MD5SIG) || defined(CONFIG_TCP_AO) bool allocated_traffic_key = false; #endif const struct tcp_ao_hdr *aoh; struct tcp_key key = {}; u32 seq = 0, ack_seq = 0; __be32 label = 0; u32 priority = 0; struct net *net; u32 txhash = 0; int oif = 0; #ifdef CONFIG_TCP_MD5SIG unsigned char newhash[16]; int genhash; struct sock *sk1 = NULL; #endif if (th->rst) return; /* If sk not NULL, it means we did a successful lookup and incoming * route had to be correct. prequeue might have dropped our dst. */ if (!sk && !ipv6_unicast_destination(skb)) return; net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev); /* Invalid TCP option size or twice included auth */ if (tcp_parse_auth_options(th, &md5_hash_location, &aoh)) return; #if defined(CONFIG_TCP_MD5SIG) || defined(CONFIG_TCP_AO) rcu_read_lock(); #endif #ifdef CONFIG_TCP_MD5SIG if (sk && sk_fullsock(sk)) { int l3index; /* sdif set, means packet ingressed via a device * in an L3 domain and inet_iif is set to it. */ l3index = tcp_v6_sdif(skb) ? tcp_v6_iif_l3_slave(skb) : 0; key.md5_key = tcp_v6_md5_do_lookup(sk, &ipv6h->saddr, l3index); if (key.md5_key) key.type = TCP_KEY_MD5; } else if (md5_hash_location) { int dif = tcp_v6_iif_l3_slave(skb); int sdif = tcp_v6_sdif(skb); int l3index; /* * active side is lost. Try to find listening socket through * source port, and then find md5 key through listening socket. * we are not loose security here: * Incoming packet is checked with md5 hash with finding key, * no RST generated if md5 hash doesn't match. */ sk1 = inet6_lookup_listener(net, net->ipv4.tcp_death_row.hashinfo, NULL, 0, &ipv6h->saddr, th->source, &ipv6h->daddr, ntohs(th->source), dif, sdif); if (!sk1) goto out; /* sdif set, means packet ingressed via a device * in an L3 domain and dif is set to it. */ l3index = tcp_v6_sdif(skb) ? dif : 0; key.md5_key = tcp_v6_md5_do_lookup(sk1, &ipv6h->saddr, l3index); if (!key.md5_key) goto out; key.type = TCP_KEY_MD5; genhash = tcp_v6_md5_hash_skb(newhash, key.md5_key, NULL, skb); if (genhash || memcmp(md5_hash_location, newhash, 16) != 0) goto out; } #endif if (th->ack) seq = ntohl(th->ack_seq); else ack_seq = ntohl(th->seq) + th->syn + th->fin + skb->len - (th->doff << 2); #ifdef CONFIG_TCP_AO if (aoh) { int l3index; l3index = tcp_v6_sdif(skb) ? tcp_v6_iif_l3_slave(skb) : 0; if (tcp_ao_prepare_reset(sk, skb, aoh, l3index, seq, &key.ao_key, &key.traffic_key, &allocated_traffic_key, &key.rcv_next, &key.sne)) goto out; key.type = TCP_KEY_AO; } #endif if (sk) { oif = sk->sk_bound_dev_if; if (sk_fullsock(sk)) { trace_tcp_send_reset(sk, skb); if (inet6_test_bit(REPFLOW, sk)) label = ip6_flowlabel(ipv6h); priority = READ_ONCE(sk->sk_priority); txhash = sk->sk_txhash; } if (sk->sk_state == TCP_TIME_WAIT) { label = cpu_to_be32(inet_twsk(sk)->tw_flowlabel); priority = inet_twsk(sk)->tw_priority; txhash = inet_twsk(sk)->tw_txhash; } } else { if (net->ipv6.sysctl.flowlabel_reflect & FLOWLABEL_REFLECT_TCP_RESET) label = ip6_flowlabel(ipv6h); } tcp_v6_send_response(sk, skb, seq, ack_seq, 0, 0, 0, oif, 1, ipv6_get_dsfield(ipv6h), label, priority, txhash, &key); #if defined(CONFIG_TCP_MD5SIG) || defined(CONFIG_TCP_AO) out: if (allocated_traffic_key) kfree(key.traffic_key); rcu_read_unlock(); #endif } static void tcp_v6_send_ack(const struct sock *sk, struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32 tsval, u32 tsecr, int oif, struct tcp_key *key, u8 tclass, __be32 label, u32 priority, u32 txhash) { tcp_v6_send_response(sk, skb, seq, ack, win, tsval, tsecr, oif, 0, tclass, label, priority, txhash, key); } static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb) { struct inet_timewait_sock *tw = inet_twsk(sk); struct tcp_timewait_sock *tcptw = tcp_twsk(sk); struct tcp_key key = {}; #ifdef CONFIG_TCP_AO struct tcp_ao_info *ao_info; if (static_branch_unlikely(&tcp_ao_needed.key)) { /* FIXME: the segment to-be-acked is not verified yet */ ao_info = rcu_dereference(tcptw->ao_info); if (ao_info) { const struct tcp_ao_hdr *aoh; /* Invalid TCP option size or twice included auth */ if (tcp_parse_auth_options(tcp_hdr(skb), NULL, &aoh)) goto out; if (aoh) key.ao_key = tcp_ao_established_key(ao_info, aoh->rnext_keyid, -1); } } if (key.ao_key) { struct tcp_ao_key *rnext_key; key.traffic_key = snd_other_key(key.ao_key); /* rcv_next switches to our rcv_next */ rnext_key = READ_ONCE(ao_info->rnext_key); key.rcv_next = rnext_key->rcvid; key.sne = READ_ONCE(ao_info->snd_sne); key.type = TCP_KEY_AO; #else if (0) { #endif #ifdef CONFIG_TCP_MD5SIG } else if (static_branch_unlikely(&tcp_md5_needed.key)) { key.md5_key = tcp_twsk_md5_key(tcptw); if (key.md5_key) key.type = TCP_KEY_MD5; #endif } tcp_v6_send_ack(sk, skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt, tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale, tcp_tw_tsval(tcptw), tcptw->tw_ts_recent, tw->tw_bound_dev_if, &key, tw->tw_tclass, cpu_to_be32(tw->tw_flowlabel), tw->tw_priority, tw->tw_txhash); #ifdef CONFIG_TCP_AO out: #endif inet_twsk_put(tw); } static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb, struct request_sock *req) { struct tcp_key key = {}; #ifdef CONFIG_TCP_AO if (static_branch_unlikely(&tcp_ao_needed.key) && tcp_rsk_used_ao(req)) { const struct in6_addr *addr = &ipv6_hdr(skb)->saddr; const struct tcp_ao_hdr *aoh; int l3index; l3index = tcp_v6_sdif(skb) ? tcp_v6_iif_l3_slave(skb) : 0; /* Invalid TCP option size or twice included auth */ if (tcp_parse_auth_options(tcp_hdr(skb), NULL, &aoh)) return; if (!aoh) return; key.ao_key = tcp_ao_do_lookup(sk, l3index, (union tcp_ao_addr *)addr, AF_INET6, aoh->rnext_keyid, -1); if (unlikely(!key.ao_key)) { /* Send ACK with any matching MKT for the peer */ key.ao_key = tcp_ao_do_lookup(sk, l3index, (union tcp_ao_addr *)addr, AF_INET6, -1, -1); /* Matching key disappeared (user removed the key?) * let the handshake timeout. */ if (!key.ao_key) { net_info_ratelimited("TCP-AO key for (%pI6, %d)->(%pI6, %d) suddenly disappeared, won't ACK new connection\n", addr, ntohs(tcp_hdr(skb)->source), &ipv6_hdr(skb)->daddr, ntohs(tcp_hdr(skb)->dest)); return; } } key.traffic_key = kmalloc(tcp_ao_digest_size(key.ao_key), GFP_ATOMIC); if (!key.traffic_key) return; key.type = TCP_KEY_AO; key.rcv_next = aoh->keyid; tcp_v6_ao_calc_key_rsk(key.ao_key, key.traffic_key, req); #else if (0) { #endif #ifdef CONFIG_TCP_MD5SIG } else if (static_branch_unlikely(&tcp_md5_needed.key)) { int l3index = tcp_v6_sdif(skb) ? tcp_v6_iif_l3_slave(skb) : 0; key.md5_key = tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->saddr, l3index); if (key.md5_key) key.type = TCP_KEY_MD5; #endif } /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV * sk->sk_state == TCP_SYN_RECV -> for Fast Open. */ /* RFC 7323 2.3 * The window field (SEG.WND) of every outgoing segment, with the * exception of <SYN> segments, MUST be right-shifted by * Rcv.Wind.Shift bits: */ tcp_v6_send_ack(sk, skb, (sk->sk_state == TCP_LISTEN) ? tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt, tcp_rsk(req)->rcv_nxt, req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale, tcp_rsk_tsval(tcp_rsk(req)), READ_ONCE(req->ts_recent), sk->sk_bound_dev_if, &key, ipv6_get_dsfield(ipv6_hdr(skb)), 0, READ_ONCE(sk->sk_priority), READ_ONCE(tcp_rsk(req)->txhash)); if (tcp_key_is_ao(&key)) kfree(key.traffic_key); } static struct sock *tcp_v6_cookie_check(struct sock *sk, struct sk_buff *skb) { #ifdef CONFIG_SYN_COOKIES const struct tcphdr *th = tcp_hdr(skb); if (!th->syn) sk = cookie_v6_check(sk, skb); #endif return sk; } u16 tcp_v6_get_syncookie(struct sock *sk, struct ipv6hdr *iph, struct tcphdr *th, u32 *cookie) { u16 mss = 0; #ifdef CONFIG_SYN_COOKIES mss = tcp_get_syncookie_mss(&tcp6_request_sock_ops, &tcp_request_sock_ipv6_ops, sk, th); if (mss) { *cookie = __cookie_v6_init_sequence(iph, th, &mss); tcp_synq_overflow(sk); } #endif return mss; } static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb) { if (skb->protocol == htons(ETH_P_IP)) return tcp_v4_conn_request(sk, skb); if (!ipv6_unicast_destination(skb)) goto drop; if (ipv6_addr_v4mapped(&ipv6_hdr(skb)->saddr)) { __IP6_INC_STATS(sock_net(sk), NULL, IPSTATS_MIB_INHDRERRORS); return 0; } return tcp_conn_request(&tcp6_request_sock_ops, &tcp_request_sock_ipv6_ops, sk, skb); drop: tcp_listendrop(sk); return 0; /* don't send reset */ } static void tcp_v6_restore_cb(struct sk_buff *skb) { /* We need to move header back to the beginning if xfrm6_policy_check() * and tcp_v6_fill_cb() are going to be called again. * ip6_datagram_recv_specific_ctl() also expects IP6CB to be there. */ memmove(IP6CB(skb), &TCP_SKB_CB(skb)->header.h6, sizeof(struct inet6_skb_parm)); } static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *skb, struct request_sock *req, struct dst_entry *dst, struct request_sock *req_unhash, bool *own_req) { struct inet_request_sock *ireq; struct ipv6_pinfo *newnp; const struct ipv6_pinfo *np = tcp_inet6_sk(sk); struct ipv6_txoptions *opt; struct inet_sock *newinet; bool found_dup_sk = false; struct tcp_sock *newtp; struct sock *newsk; #ifdef CONFIG_TCP_MD5SIG struct tcp_md5sig_key *key; int l3index; #endif struct flowi6 fl6; if (skb->protocol == htons(ETH_P_IP)) { /* * v6 mapped */ newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst, req_unhash, own_req); if (!newsk) return NULL; inet_sk(newsk)->pinet6 = tcp_inet6_sk(newsk); newnp = tcp_inet6_sk(newsk); newtp = tcp_sk(newsk); memcpy(newnp, np, sizeof(struct ipv6_pinfo)); newnp->saddr = newsk->sk_v6_rcv_saddr; inet_csk(newsk)->icsk_af_ops = &ipv6_mapped; if (sk_is_mptcp(newsk)) mptcpv6_handle_mapped(newsk, true); newsk->sk_backlog_rcv = tcp_v4_do_rcv; #if defined(CONFIG_TCP_MD5SIG) || defined(CONFIG_TCP_AO) newtp->af_specific = &tcp_sock_ipv6_mapped_specific; #endif newnp->ipv6_mc_list = NULL; newnp->ipv6_ac_list = NULL; newnp->ipv6_fl_list = NULL; newnp->pktoptions = NULL; newnp->opt = NULL; newnp->mcast_oif = inet_iif(skb); newnp->mcast_hops = ip_hdr(skb)->ttl; newnp->rcv_flowinfo = 0; if (inet6_test_bit(REPFLOW, sk)) newnp->flow_label = 0; /* * No need to charge this sock to the relevant IPv6 refcnt debug socks count * here, tcp_create_openreq_child now does this for us, see the comment in * that function for the gory details. -acme */ /* It is tricky place. Until this moment IPv4 tcp worked with IPv6 icsk.icsk_af_ops. Sync it now. */ tcp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie); return newsk; } ireq = inet_rsk(req); if (sk_acceptq_is_full(sk)) goto out_overflow; if (!dst) { dst = inet6_csk_route_req(sk, &fl6, req, IPPROTO_TCP); if (!dst) goto out; } newsk = tcp_create_openreq_child(sk, req, skb); if (!newsk) goto out_nonewsk; /* * No need to charge this sock to the relevant IPv6 refcnt debug socks * count here, tcp_create_openreq_child now does this for us, see the * comment in that function for the gory details. -acme */ newsk->sk_gso_type = SKB_GSO_TCPV6; ip6_dst_store(newsk, dst, NULL, NULL); inet6_sk_rx_dst_set(newsk, skb); inet_sk(newsk)->pinet6 = tcp_inet6_sk(newsk); newtp = tcp_sk(newsk); newinet = inet_sk(newsk); newnp = tcp_inet6_sk(newsk); memcpy(newnp, np, sizeof(struct ipv6_pinfo)); newsk->sk_v6_daddr = ireq->ir_v6_rmt_addr; newnp->saddr = ireq->ir_v6_loc_addr; newsk->sk_v6_rcv_saddr = ireq->ir_v6_loc_addr; newsk->sk_bound_dev_if = ireq->ir_iif; /* Now IPv6 options... First: no IPv4 options. */ newinet->inet_opt = NULL; newnp->ipv6_mc_list = NULL; newnp->ipv6_ac_list = NULL; newnp->ipv6_fl_list = NULL; /* Clone RX bits */ newnp->rxopt.all = np->rxopt.all; newnp->pktoptions = NULL; newnp->opt = NULL; newnp->mcast_oif = tcp_v6_iif(skb); newnp->mcast_hops = ipv6_hdr(skb)->hop_limit; newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb)); if (inet6_test_bit(REPFLOW, sk)) newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb)); /* Set ToS of the new socket based upon the value of incoming SYN. * ECT bits are set later in tcp_init_transfer(). */ if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_reflect_tos)) newnp->tclass = tcp_rsk(req)->syn_tos & ~INET_ECN_MASK; /* Clone native IPv6 options from listening socket (if any) Yes, keeping reference count would be much more clever, but we make one more one thing there: reattach optmem to newsk. */ opt = ireq->ipv6_opt; if (!opt) opt = rcu_dereference(np->opt); if (opt) { opt = ipv6_dup_options(newsk, opt); RCU_INIT_POINTER(newnp->opt, opt); } inet_csk(newsk)->icsk_ext_hdr_len = 0; if (opt) inet_csk(newsk)->icsk_ext_hdr_len = opt->opt_nflen + opt->opt_flen; tcp_ca_openreq_child(newsk, dst); tcp_sync_mss(newsk, dst_mtu(dst)); newtp->advmss = tcp_mss_clamp(tcp_sk(sk), dst_metric_advmss(dst)); tcp_initialize_rcv_mss(newsk); newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6; newinet->inet_rcv_saddr = LOOPBACK4_IPV6; #ifdef CONFIG_TCP_MD5SIG l3index = l3mdev_master_ifindex_by_index(sock_net(sk), ireq->ir_iif); if (!tcp_rsk_used_ao(req)) { /* Copy over the MD5 key from the original socket */ key = tcp_v6_md5_do_lookup(sk, &newsk->sk_v6_daddr, l3index); if (key) { const union tcp_md5_addr *addr; addr = (union tcp_md5_addr *)&newsk->sk_v6_daddr; if (tcp_md5_key_copy(newsk, addr, AF_INET6, 128, l3index, key)) { inet_csk_prepare_forced_close(newsk); tcp_done(newsk); goto out; } } } #endif #ifdef CONFIG_TCP_AO /* Copy over tcp_ao_info if any */ if (tcp_ao_copy_all_matching(sk, newsk, req, skb, AF_INET6)) goto out; /* OOM */ #endif if (__inet_inherit_port(sk, newsk) < 0) { inet_csk_prepare_forced_close(newsk); tcp_done(newsk); goto out; } *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash), &found_dup_sk); if (*own_req) { tcp_move_syn(newtp, req); /* Clone pktoptions received with SYN, if we own the req */ if (ireq->pktopts) { newnp->pktoptions = skb_clone_and_charge_r(ireq->pktopts, newsk); consume_skb(ireq->pktopts); ireq->pktopts = NULL; if (newnp->pktoptions) tcp_v6_restore_cb(newnp->pktoptions); } } else { if (!req_unhash && found_dup_sk) { /* This code path should only be executed in the * syncookie case only */ bh_unlock_sock(newsk); sock_put(newsk); newsk = NULL; } } return newsk; out_overflow: __NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS); out_nonewsk: dst_release(dst); out: tcp_listendrop(sk); return NULL; } INDIRECT_CALLABLE_DECLARE(struct dst_entry *ipv4_dst_check(struct dst_entry *, u32)); /* The socket must have it's spinlock held when we get * here, unless it is a TCP_LISTEN socket. * * We have a potential double-lock case here, so even when * doing backlog processing we use the BH locking scheme. * This is because we cannot sleep with the original spinlock * held. */ INDIRECT_CALLABLE_SCOPE int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb) { struct ipv6_pinfo *np = tcp_inet6_sk(sk); struct sk_buff *opt_skb = NULL; enum skb_drop_reason reason; struct tcp_sock *tp; /* Imagine: socket is IPv6. IPv4 packet arrives, goes to IPv4 receive handler and backlogged. From backlog it always goes here. Kerboom... Fortunately, tcp_rcv_established and rcv_established handle them correctly, but it is not case with tcp_v6_hnd_req and tcp_v6_send_reset(). --ANK */ if (skb->protocol == htons(ETH_P_IP)) return tcp_v4_do_rcv(sk, skb); /* * socket locking is here for SMP purposes as backlog rcv * is currently called with bh processing disabled. */ /* Do Stevens' IPV6_PKTOPTIONS. Yes, guys, it is the only place in our code, where we may make it not affecting IPv4. The rest of code is protocol independent, and I do not like idea to uglify IPv4. Actually, all the idea behind IPV6_PKTOPTIONS looks not very well thought. For now we latch options, received in the last packet, enqueued by tcp. Feel free to propose better solution. --ANK (980728) */ if (np->rxopt.all) opt_skb = skb_clone_and_charge_r(skb, sk); reason = SKB_DROP_REASON_NOT_SPECIFIED; if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */ struct dst_entry *dst; dst = rcu_dereference_protected(sk->sk_rx_dst, lockdep_sock_is_held(sk)); sock_rps_save_rxhash(sk, skb); sk_mark_napi_id(sk, skb); if (dst) { if (sk->sk_rx_dst_ifindex != skb->skb_iif || INDIRECT_CALL_1(dst->ops->check, ip6_dst_check, dst, sk->sk_rx_dst_cookie) == NULL) { RCU_INIT_POINTER(sk->sk_rx_dst, NULL); dst_release(dst); } } tcp_rcv_established(sk, skb); if (opt_skb) goto ipv6_pktoptions; return 0; } if (tcp_checksum_complete(skb)) goto csum_err; if (sk->sk_state == TCP_LISTEN) { struct sock *nsk = tcp_v6_cookie_check(sk, skb); if (!nsk) goto discard; if (nsk != sk) { if (tcp_child_process(sk, nsk, skb)) goto reset; if (opt_skb) __kfree_skb(opt_skb); return 0; } } else sock_rps_save_rxhash(sk, skb); if (tcp_rcv_state_process(sk, skb)) goto reset; if (opt_skb) goto ipv6_pktoptions; return 0; reset: tcp_v6_send_reset(sk, skb); discard: if (opt_skb) __kfree_skb(opt_skb); kfree_skb_reason(skb, reason); return 0; csum_err: reason = SKB_DROP_REASON_TCP_CSUM; trace_tcp_bad_csum(skb); TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS); TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS); goto discard; ipv6_pktoptions: /* Do you ask, what is it? 1. skb was enqueued by tcp. 2. skb is added to tail of read queue, rather than out of order. 3. socket is not in passive state. 4. Finally, it really contains options, which user wants to receive. */ tp = tcp_sk(sk); if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt && !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) { if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo) WRITE_ONCE(np->mcast_oif, tcp_v6_iif(opt_skb)); if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) WRITE_ONCE(np->mcast_hops, ipv6_hdr(opt_skb)->hop_limit); if (np->rxopt.bits.rxflow || np->rxopt.bits.rxtclass) np->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(opt_skb)); if (inet6_test_bit(REPFLOW, sk)) np->flow_label = ip6_flowlabel(ipv6_hdr(opt_skb)); if (ipv6_opt_accepted(sk, opt_skb, &TCP_SKB_CB(opt_skb)->header.h6)) { tcp_v6_restore_cb(opt_skb); opt_skb = xchg(&np->pktoptions, opt_skb); } else { __kfree_skb(opt_skb); opt_skb = xchg(&np->pktoptions, NULL); } } consume_skb(opt_skb); return 0; } static void tcp_v6_fill_cb(struct sk_buff *skb, const struct ipv6hdr *hdr, const struct tcphdr *th) { /* This is tricky: we move IP6CB at its correct location into * TCP_SKB_CB(). It must be done after xfrm6_policy_check(), because * _decode_session6() uses IP6CB(). * barrier() makes sure compiler won't play aliasing games. */ memmove(&TCP_SKB_CB(skb)->header.h6, IP6CB(skb), sizeof(struct inet6_skb_parm)); barrier(); TCP_SKB_CB(skb)->seq = ntohl(th->seq); TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin + skb->len - th->doff*4); TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq); TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th); TCP_SKB_CB(skb)->tcp_tw_isn = 0; TCP_SKB_CB(skb)->ip_dsfield = ipv6_get_dsfield(hdr); TCP_SKB_CB(skb)->sacked = 0; TCP_SKB_CB(skb)->has_rxtstamp = skb->tstamp || skb_hwtstamps(skb)->hwtstamp; } INDIRECT_CALLABLE_SCOPE int tcp_v6_rcv(struct sk_buff *skb) { enum skb_drop_reason drop_reason; int sdif = inet6_sdif(skb); int dif = inet6_iif(skb); const struct tcphdr *th; const struct ipv6hdr *hdr; bool refcounted; struct sock *sk; int ret; struct net *net = dev_net(skb->dev); drop_reason = SKB_DROP_REASON_NOT_SPECIFIED; if (skb->pkt_type != PACKET_HOST) goto discard_it; /* * Count it even if it's bad. */ __TCP_INC_STATS(net, TCP_MIB_INSEGS); if (!pskb_may_pull(skb, sizeof(struct tcphdr))) goto discard_it; th = (const struct tcphdr *)skb->data; if (unlikely(th->doff < sizeof(struct tcphdr) / 4)) { drop_reason = SKB_DROP_REASON_PKT_TOO_SMALL; goto bad_packet; } if (!pskb_may_pull(skb, th->doff*4)) goto discard_it; if (skb_checksum_init(skb, IPPROTO_TCP, ip6_compute_pseudo)) goto csum_error; th = (const struct tcphdr *)skb->data; hdr = ipv6_hdr(skb); lookup: sk = __inet6_lookup_skb(net->ipv4.tcp_death_row.hashinfo, skb, __tcp_hdrlen(th), th->source, th->dest, inet6_iif(skb), sdif, &refcounted); if (!sk) goto no_tcp_socket; process: if (sk->sk_state == TCP_TIME_WAIT) goto do_time_wait; if (sk->sk_state == TCP_NEW_SYN_RECV) { struct request_sock *req = inet_reqsk(sk); bool req_stolen = false; struct sock *nsk; sk = req->rsk_listener; if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) drop_reason = SKB_DROP_REASON_XFRM_POLICY; else drop_reason = tcp_inbound_hash(sk, req, skb, &hdr->saddr, &hdr->daddr, AF_INET6, dif, sdif); if (drop_reason) { sk_drops_add(sk, skb); reqsk_put(req); goto discard_it; } if (tcp_checksum_complete(skb)) { reqsk_put(req); goto csum_error; } if (unlikely(sk->sk_state != TCP_LISTEN)) { nsk = reuseport_migrate_sock(sk, req_to_sk(req), skb); if (!nsk) { inet_csk_reqsk_queue_drop_and_put(sk, req); goto lookup; } sk = nsk; /* reuseport_migrate_sock() has already held one sk_refcnt * before returning. */ } else { sock_hold(sk); } refcounted = true; nsk = NULL; if (!tcp_filter(sk, skb)) { th = (const struct tcphdr *)skb->data; hdr = ipv6_hdr(skb); tcp_v6_fill_cb(skb, hdr, th); nsk = tcp_check_req(sk, skb, req, false, &req_stolen); } else { drop_reason = SKB_DROP_REASON_SOCKET_FILTER; } if (!nsk) { reqsk_put(req); if (req_stolen) { /* Another cpu got exclusive access to req * and created a full blown socket. * Try to feed this packet to this socket * instead of discarding it. */ tcp_v6_restore_cb(skb); sock_put(sk); goto lookup; } goto discard_and_relse; } nf_reset_ct(skb); if (nsk == sk) { reqsk_put(req); tcp_v6_restore_cb(skb); } else if (tcp_child_process(sk, nsk, skb)) { tcp_v6_send_reset(nsk, skb); goto discard_and_relse; } else { sock_put(sk); return 0; } } if (static_branch_unlikely(&ip6_min_hopcount)) { /* min_hopcount can be changed concurrently from do_ipv6_setsockopt() */ if (unlikely(hdr->hop_limit < READ_ONCE(tcp_inet6_sk(sk)->min_hopcount))) { __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP); drop_reason = SKB_DROP_REASON_TCP_MINTTL; goto discard_and_relse; } } if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) { drop_reason = SKB_DROP_REASON_XFRM_POLICY; goto discard_and_relse; } drop_reason = tcp_inbound_hash(sk, NULL, skb, &hdr->saddr, &hdr->daddr, AF_INET6, dif, sdif); if (drop_reason) goto discard_and_relse; nf_reset_ct(skb); if (tcp_filter(sk, skb)) { drop_reason = SKB_DROP_REASON_SOCKET_FILTER; goto discard_and_relse; } th = (const struct tcphdr *)skb->data; hdr = ipv6_hdr(skb); tcp_v6_fill_cb(skb, hdr, th); skb->dev = NULL; if (sk->sk_state == TCP_LISTEN) { ret = tcp_v6_do_rcv(sk, skb); goto put_and_return; } sk_incoming_cpu_update(sk); bh_lock_sock_nested(sk); tcp_segs_in(tcp_sk(sk), skb); ret = 0; if (!sock_owned_by_user(sk)) { ret = tcp_v6_do_rcv(sk, skb); } else { if (tcp_add_backlog(sk, skb, &drop_reason)) goto discard_and_relse; } bh_unlock_sock(sk); put_and_return: if (refcounted) sock_put(sk); return ret ? -1 : 0; no_tcp_socket: drop_reason = SKB_DROP_REASON_NO_SOCKET; if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) goto discard_it; tcp_v6_fill_cb(skb, hdr, th); if (tcp_checksum_complete(skb)) { csum_error: drop_reason = SKB_DROP_REASON_TCP_CSUM; trace_tcp_bad_csum(skb); __TCP_INC_STATS(net, TCP_MIB_CSUMERRORS); bad_packet: __TCP_INC_STATS(net, TCP_MIB_INERRS); } else { tcp_v6_send_reset(NULL, skb); } discard_it: SKB_DR_OR(drop_reason, NOT_SPECIFIED); kfree_skb_reason(skb, drop_reason); return 0; discard_and_relse: sk_drops_add(sk, skb); if (refcounted) sock_put(sk); goto discard_it; do_time_wait: if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) { drop_reason = SKB_DROP_REASON_XFRM_POLICY; inet_twsk_put(inet_twsk(sk)); goto discard_it; } tcp_v6_fill_cb(skb, hdr, th); if (tcp_checksum_complete(skb)) { inet_twsk_put(inet_twsk(sk)); goto csum_error; } switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) { case TCP_TW_SYN: { struct sock *sk2; sk2 = inet6_lookup_listener(net, net->ipv4.tcp_death_row.hashinfo, skb, __tcp_hdrlen(th), &ipv6_hdr(skb)->saddr, th->source, &ipv6_hdr(skb)->daddr, ntohs(th->dest), tcp_v6_iif_l3_slave(skb), sdif); if (sk2) { struct inet_timewait_sock *tw = inet_twsk(sk); inet_twsk_deschedule_put(tw); sk = sk2; tcp_v6_restore_cb(skb); refcounted = false; goto process; } } /* to ACK */ fallthrough; case TCP_TW_ACK: tcp_v6_timewait_ack(sk, skb); break; case TCP_TW_RST: tcp_v6_send_reset(sk, skb); inet_twsk_deschedule_put(inet_twsk(sk)); goto discard_it; case TCP_TW_SUCCESS: ; } goto discard_it; } void tcp_v6_early_demux(struct sk_buff *skb) { struct net *net = dev_net(skb->dev); const struct ipv6hdr *hdr; const struct tcphdr *th; struct sock *sk; if (skb->pkt_type != PACKET_HOST) return; if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr))) return; hdr = ipv6_hdr(skb); th = tcp_hdr(skb); if (th->doff < sizeof(struct tcphdr) / 4) return; /* Note : We use inet6_iif() here, not tcp_v6_iif() */ sk = __inet6_lookup_established(net, net->ipv4.tcp_death_row.hashinfo, &hdr->saddr, th->source, &hdr->daddr, ntohs(th->dest), inet6_iif(skb), inet6_sdif(skb)); if (sk) { skb->sk = sk; skb->destructor = sock_edemux; if (sk_fullsock(sk)) { struct dst_entry *dst = rcu_dereference(sk->sk_rx_dst); if (dst) dst = dst_check(dst, sk->sk_rx_dst_cookie); if (dst && sk->sk_rx_dst_ifindex == skb->skb_iif) skb_dst_set_noref(skb, dst); } } } static struct timewait_sock_ops tcp6_timewait_sock_ops = { .twsk_obj_size = sizeof(struct tcp6_timewait_sock), .twsk_unique = tcp_twsk_unique, .twsk_destructor = tcp_twsk_destructor, }; INDIRECT_CALLABLE_SCOPE void tcp_v6_send_check(struct sock *sk, struct sk_buff *skb) { __tcp_v6_send_check(skb, &sk->sk_v6_rcv_saddr, &sk->sk_v6_daddr); } const struct inet_connection_sock_af_ops ipv6_specific = { .queue_xmit = inet6_csk_xmit, .send_check = tcp_v6_send_check, .rebuild_header = inet6_sk_rebuild_header, .sk_rx_dst_set = inet6_sk_rx_dst_set, .conn_request = tcp_v6_conn_request, .syn_recv_sock = tcp_v6_syn_recv_sock, .net_header_len = sizeof(struct ipv6hdr), .setsockopt = ipv6_setsockopt, .getsockopt = ipv6_getsockopt, .addr2sockaddr = inet6_csk_addr2sockaddr, .sockaddr_len = sizeof(struct sockaddr_in6), .mtu_reduced = tcp_v6_mtu_reduced, }; #if defined(CONFIG_TCP_MD5SIG) || defined(CONFIG_TCP_AO) static const struct tcp_sock_af_ops tcp_sock_ipv6_specific = { #ifdef CONFIG_TCP_MD5SIG .md5_lookup = tcp_v6_md5_lookup, .calc_md5_hash = tcp_v6_md5_hash_skb, .md5_parse = tcp_v6_parse_md5_keys, #endif #ifdef CONFIG_TCP_AO .ao_lookup = tcp_v6_ao_lookup, .calc_ao_hash = tcp_v6_ao_hash_skb, .ao_parse = tcp_v6_parse_ao, .ao_calc_key_sk = tcp_v6_ao_calc_key_sk, #endif }; #endif /* * TCP over IPv4 via INET6 API */ static const struct inet_connection_sock_af_ops ipv6_mapped = { .queue_xmit = ip_queue_xmit, .send_check = tcp_v4_send_check, .rebuild_header = inet_sk_rebuild_header, .sk_rx_dst_set = inet_sk_rx_dst_set, .conn_request = tcp_v6_conn_request, .syn_recv_sock = tcp_v6_syn_recv_sock, .net_header_len = sizeof(struct iphdr), .setsockopt = ipv6_setsockopt, .getsockopt = ipv6_getsockopt, .addr2sockaddr = inet6_csk_addr2sockaddr, .sockaddr_len = sizeof(struct sockaddr_in6), .mtu_reduced = tcp_v4_mtu_reduced, }; #if defined(CONFIG_TCP_MD5SIG) || defined(CONFIG_TCP_AO) static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = { #ifdef CONFIG_TCP_MD5SIG .md5_lookup = tcp_v4_md5_lookup, .calc_md5_hash = tcp_v4_md5_hash_skb, .md5_parse = tcp_v6_parse_md5_keys, #endif #ifdef CONFIG_TCP_AO .ao_lookup = tcp_v6_ao_lookup, .calc_ao_hash = tcp_v4_ao_hash_skb, .ao_parse = tcp_v6_parse_ao, .ao_calc_key_sk = tcp_v4_ao_calc_key_sk, #endif }; #endif /* NOTE: A lot of things set to zero explicitly by call to * sk_alloc() so need not be done here. */ static int tcp_v6_init_sock(struct sock *sk) { struct inet_connection_sock *icsk = inet_csk(sk); tcp_init_sock(sk); icsk->icsk_af_ops = &ipv6_specific; #if defined(CONFIG_TCP_MD5SIG) || defined(CONFIG_TCP_AO) tcp_sk(sk)->af_specific = &tcp_sock_ipv6_specific; #endif return 0; } #ifdef CONFIG_PROC_FS /* Proc filesystem TCPv6 sock list dumping. */ static void get_openreq6(struct seq_file *seq, const struct request_sock *req, int i) { long ttd = req->rsk_timer.expires - jiffies; const struct in6_addr *src = &inet_rsk(req)->ir_v6_loc_addr; const struct in6_addr *dest = &inet_rsk(req)->ir_v6_rmt_addr; if (ttd < 0) ttd = 0; seq_printf(seq, "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X " "%02X %08X:%08X %02X:%08lX %08X %5u %8d %d %d %pK\n", i, src->s6_addr32[0], src->s6_addr32[1], src->s6_addr32[2], src->s6_addr32[3], inet_rsk(req)->ir_num, dest->s6_addr32[0], dest->s6_addr32[1], dest->s6_addr32[2], dest->s6_addr32[3], ntohs(inet_rsk(req)->ir_rmt_port), TCP_SYN_RECV, 0, 0, /* could print option size, but that is af dependent. */ 1, /* timers active (only the expire timer) */ jiffies_to_clock_t(ttd), req->num_timeout, from_kuid_munged(seq_user_ns(seq), sock_i_uid(req->rsk_listener)), 0, /* non standard timer */ 0, /* open_requests have no inode */ 0, req); } static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i) { const struct in6_addr *dest, *src; __u16 destp, srcp; int timer_active; unsigned long timer_expires; const struct inet_sock *inet = inet_sk(sp); const struct tcp_sock *tp = tcp_sk(sp); const struct inet_connection_sock *icsk = inet_csk(sp); const struct fastopen_queue *fastopenq = &icsk->icsk_accept_queue.fastopenq; int rx_queue; int state; dest = &sp->sk_v6_daddr; src = &sp->sk_v6_rcv_saddr; destp = ntohs(inet->inet_dport); srcp = ntohs(inet->inet_sport); if (icsk->icsk_pending == ICSK_TIME_RETRANS || icsk->icsk_pending == ICSK_TIME_REO_TIMEOUT || icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) { timer_active = 1; timer_expires = icsk->icsk_timeout; } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) { timer_active = 4; timer_expires = icsk->icsk_timeout; } else if (timer_pending(&sp->sk_timer)) { timer_active = 2; timer_expires = sp->sk_timer.expires; } else { timer_active = 0; timer_expires = jiffies; } state = inet_sk_state_load(sp); if (state == TCP_LISTEN) rx_queue = READ_ONCE(sp->sk_ack_backlog); else /* Because we don't lock the socket, * we might find a transient negative value. */ rx_queue = max_t(int, READ_ONCE(tp->rcv_nxt) - READ_ONCE(tp->copied_seq), 0); seq_printf(seq, "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X " "%02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %lu %lu %u %u %d\n", i, src->s6_addr32[0], src->s6_addr32[1], src->s6_addr32[2], src->s6_addr32[3], srcp, dest->s6_addr32[0], dest->s6_addr32[1], dest->s6_addr32[2], dest->s6_addr32[3], destp, state, READ_ONCE(tp->write_seq) - tp->snd_una, rx_queue, timer_active, jiffies_delta_to_clock_t(timer_expires - jiffies), icsk->icsk_retransmits, from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)), icsk->icsk_probes_out, sock_i_ino(sp), refcount_read(&sp->sk_refcnt), sp, jiffies_to_clock_t(icsk->icsk_rto), jiffies_to_clock_t(icsk->icsk_ack.ato), (icsk->icsk_ack.quick << 1) | inet_csk_in_pingpong_mode(sp), tcp_snd_cwnd(tp), state == TCP_LISTEN ? fastopenq->max_qlen : (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh) ); } static void get_timewait6_sock(struct seq_file *seq, struct inet_timewait_sock *tw, int i) { long delta = tw->tw_timer.expires - jiffies; const struct in6_addr *dest, *src; __u16 destp, srcp; dest = &tw->tw_v6_daddr; src = &tw->tw_v6_rcv_saddr; destp = ntohs(tw->tw_dport); srcp = ntohs(tw->tw_sport); seq_printf(seq, "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X " "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n", i, src->s6_addr32[0], src->s6_addr32[1], src->s6_addr32[2], src->s6_addr32[3], srcp, dest->s6_addr32[0], dest->s6_addr32[1], dest->s6_addr32[2], dest->s6_addr32[3], destp, tw->tw_substate, 0, 0, 3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0, refcount_read(&tw->tw_refcnt), tw); } static int tcp6_seq_show(struct seq_file *seq, void *v) { struct tcp_iter_state *st; struct sock *sk = v; if (v == SEQ_START_TOKEN) { seq_puts(seq, " sl " "local_address " "remote_address " "st tx_queue rx_queue tr tm->when retrnsmt" " uid timeout inode\n"); goto out; } st = seq->private; if (sk->sk_state == TCP_TIME_WAIT) get_timewait6_sock(seq, v, st->num); else if (sk->sk_state == TCP_NEW_SYN_RECV) get_openreq6(seq, v, st->num); else get_tcp6_sock(seq, v, st->num); out: return 0; } static const struct seq_operations tcp6_seq_ops = { .show = tcp6_seq_show, .start = tcp_seq_start, .next = tcp_seq_next, .stop = tcp_seq_stop, }; static struct tcp_seq_afinfo tcp6_seq_afinfo = { .family = AF_INET6, }; int __net_init tcp6_proc_init(struct net *net) { if (!proc_create_net_data("tcp6", 0444, net->proc_net, &tcp6_seq_ops, sizeof(struct tcp_iter_state), &tcp6_seq_afinfo)) return -ENOMEM; return 0; } void tcp6_proc_exit(struct net *net) { remove_proc_entry("tcp6", net->proc_net); } #endif struct proto tcpv6_prot = { .name = "TCPv6", .owner = THIS_MODULE, .close = tcp_close, .pre_connect = tcp_v6_pre_connect, .connect = tcp_v6_connect, .disconnect = tcp_disconnect, .accept = inet_csk_accept, .ioctl = tcp_ioctl, .init = tcp_v6_init_sock, .destroy = tcp_v4_destroy_sock, .shutdown = tcp_shutdown, .setsockopt = tcp_setsockopt, .getsockopt = tcp_getsockopt, .bpf_bypass_getsockopt = tcp_bpf_bypass_getsockopt, .keepalive = tcp_set_keepalive, .recvmsg = tcp_recvmsg, .sendmsg = tcp_sendmsg, .splice_eof = tcp_splice_eof, .backlog_rcv = tcp_v6_do_rcv, .release_cb = tcp_release_cb, .hash = inet6_hash, .unhash = inet_unhash, .get_port = inet_csk_get_port, .put_port = inet_put_port, #ifdef CONFIG_BPF_SYSCALL .psock_update_sk_prot = tcp_bpf_update_proto, #endif .enter_memory_pressure = tcp_enter_memory_pressure, .leave_memory_pressure = tcp_leave_memory_pressure, .stream_memory_free = tcp_stream_memory_free, .sockets_allocated = &tcp_sockets_allocated, .memory_allocated = &tcp_memory_allocated, .per_cpu_fw_alloc = &tcp_memory_per_cpu_fw_alloc, .memory_pressure = &tcp_memory_pressure, .orphan_count = &tcp_orphan_count, .sysctl_mem = sysctl_tcp_mem, .sysctl_wmem_offset = offsetof(struct net, ipv4.sysctl_tcp_wmem), .sysctl_rmem_offset = offsetof(struct net, ipv4.sysctl_tcp_rmem), .max_header = MAX_TCP_HEADER, .obj_size = sizeof(struct tcp6_sock), .ipv6_pinfo_offset = offsetof(struct tcp6_sock, inet6), .slab_flags = SLAB_TYPESAFE_BY_RCU, .twsk_prot = &tcp6_timewait_sock_ops, .rsk_prot = &tcp6_request_sock_ops, .h.hashinfo = NULL, .no_autobind = true, .diag_destroy = tcp_abort, }; EXPORT_SYMBOL_GPL(tcpv6_prot); static const struct inet6_protocol tcpv6_protocol = { .handler = tcp_v6_rcv, .err_handler = tcp_v6_err, .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL, }; static struct inet_protosw tcpv6_protosw = { .type = SOCK_STREAM, .protocol = IPPROTO_TCP, .prot = &tcpv6_prot, .ops = &inet6_stream_ops, .flags = INET_PROTOSW_PERMANENT | INET_PROTOSW_ICSK, }; static int __net_init tcpv6_net_init(struct net *net) { return inet_ctl_sock_create(&net->ipv6.tcp_sk, PF_INET6, SOCK_RAW, IPPROTO_TCP, net); } static void __net_exit tcpv6_net_exit(struct net *net) { inet_ctl_sock_destroy(net->ipv6.tcp_sk); } static void __net_exit tcpv6_net_exit_batch(struct list_head *net_exit_list) { tcp_twsk_purge(net_exit_list, AF_INET6); } static struct pernet_operations tcpv6_net_ops = { .init = tcpv6_net_init, .exit = tcpv6_net_exit, .exit_batch = tcpv6_net_exit_batch, }; int __init tcpv6_init(void) { int ret; ret = inet6_add_protocol(&tcpv6_protocol, IPPROTO_TCP); if (ret) goto out; /* register inet6 protocol */ ret = inet6_register_protosw(&tcpv6_protosw); if (ret) goto out_tcpv6_protocol; ret = register_pernet_subsys(&tcpv6_net_ops); if (ret) goto out_tcpv6_protosw; ret = mptcpv6_init(); if (ret) goto out_tcpv6_pernet_subsys; out: return ret; out_tcpv6_pernet_subsys: unregister_pernet_subsys(&tcpv6_net_ops); out_tcpv6_protosw: inet6_unregister_protosw(&tcpv6_protosw); out_tcpv6_protocol: inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP); goto out; } void tcpv6_exit(void) { unregister_pernet_subsys(&tcpv6_net_ops); inet6_unregister_protosw(&tcpv6_protosw); inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP); } |
95 63 63 5 60 66 57 12 8 63 59 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 | /* SPDX-License-Identifier: GPL-2.0-or-later */ /* AF_RXRPC tracepoints * * Copyright (C) 2016 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) */ #undef TRACE_SYSTEM #define TRACE_SYSTEM rxrpc #if !defined(_TRACE_RXRPC_H) || defined(TRACE_HEADER_MULTI_READ) #define _TRACE_RXRPC_H #include <linux/tracepoint.h> #include <linux/errqueue.h> /* * Declare tracing information enums and their string mappings for display. */ #define rxrpc_abort_reasons \ /* AFS errors */ \ EM(afs_abort_general_error, "afs-error") \ EM(afs_abort_interrupted, "afs-intr") \ EM(afs_abort_oom, "afs-oom") \ EM(afs_abort_op_not_supported, "afs-op-notsupp") \ EM(afs_abort_probeuuid_negative, "afs-probeuuid-neg") \ EM(afs_abort_send_data_error, "afs-send-data") \ EM(afs_abort_unmarshal_error, "afs-unmarshal") \ /* rxperf errors */ \ EM(rxperf_abort_general_error, "rxperf-error") \ EM(rxperf_abort_oom, "rxperf-oom") \ EM(rxperf_abort_op_not_supported, "rxperf-op-notsupp") \ EM(rxperf_abort_unmarshal_error, "rxperf-unmarshal") \ /* RxKAD security errors */ \ EM(rxkad_abort_1_short_check, "rxkad1-short-check") \ EM(rxkad_abort_1_short_data, "rxkad1-short-data") \ EM(rxkad_abort_1_short_encdata, "rxkad1-short-encdata") \ EM(rxkad_abort_1_short_header, "rxkad1-short-hdr") \ EM(rxkad_abort_2_short_check, "rxkad2-short-check") \ EM(rxkad_abort_2_short_data, "rxkad2-short-data") \ EM(rxkad_abort_2_short_header, "rxkad2-short-hdr") \ EM(rxkad_abort_2_short_len, "rxkad2-short-len") \ EM(rxkad_abort_bad_checksum, "rxkad2-bad-cksum") \ EM(rxkad_abort_chall_key_expired, "rxkad-chall-key-exp") \ EM(rxkad_abort_chall_level, "rxkad-chall-level") \ EM(rxkad_abort_chall_no_key, "rxkad-chall-nokey") \ EM(rxkad_abort_chall_short, "rxkad-chall-short") \ EM(rxkad_abort_chall_version, "rxkad-chall-version") \ EM(rxkad_abort_resp_bad_callid, "rxkad-resp-bad-callid") \ EM(rxkad_abort_resp_bad_checksum, "rxkad-resp-bad-cksum") \ EM(rxkad_abort_resp_bad_param, "rxkad-resp-bad-param") \ EM(rxkad_abort_resp_call_ctr, "rxkad-resp-call-ctr") \ EM(rxkad_abort_resp_call_state, "rxkad-resp-call-state") \ EM(rxkad_abort_resp_key_expired, "rxkad-resp-key-exp") \ EM(rxkad_abort_resp_key_rejected, "rxkad-resp-key-rej") \ EM(rxkad_abort_resp_level, "rxkad-resp-level") \ EM(rxkad_abort_resp_nokey, "rxkad-resp-nokey") \ EM(rxkad_abort_resp_ooseq, "rxkad-resp-ooseq") \ EM(rxkad_abort_resp_short, "rxkad-resp-short") \ EM(rxkad_abort_resp_short_tkt, "rxkad-resp-short-tkt") \ EM(rxkad_abort_resp_tkt_aname, "rxkad-resp-tk-aname") \ EM(rxkad_abort_resp_tkt_expired, "rxkad-resp-tk-exp") \ EM(rxkad_abort_resp_tkt_future, "rxkad-resp-tk-future") \ EM(rxkad_abort_resp_tkt_inst, "rxkad-resp-tk-inst") \ EM(rxkad_abort_resp_tkt_len, "rxkad-resp-tk-len") \ EM(rxkad_abort_resp_tkt_realm, "rxkad-resp-tk-realm") \ EM(rxkad_abort_resp_tkt_short, "rxkad-resp-tk-short") \ EM(rxkad_abort_resp_tkt_sinst, "rxkad-resp-tk-sinst") \ EM(rxkad_abort_resp_tkt_sname, "rxkad-resp-tk-sname") \ EM(rxkad_abort_resp_unknown_tkt, "rxkad-resp-unknown-tkt") \ EM(rxkad_abort_resp_version, "rxkad-resp-version") \ /* rxrpc errors */ \ EM(rxrpc_abort_call_improper_term, "call-improper-term") \ EM(rxrpc_abort_call_reset, "call-reset") \ EM(rxrpc_abort_call_sendmsg, "call-sendmsg") \ EM(rxrpc_abort_call_sock_release, "call-sock-rel") \ EM(rxrpc_abort_call_sock_release_tba, "call-sock-rel-tba") \ EM(rxrpc_abort_call_timeout, "call-timeout") \ EM(rxrpc_abort_no_service_key, "no-serv-key") \ EM(rxrpc_abort_nomem, "nomem") \ EM(rxrpc_abort_service_not_offered, "serv-not-offered") \ EM(rxrpc_abort_shut_down, "shut-down") \ EM(rxrpc_abort_unsupported_security, "unsup-sec") \ EM(rxrpc_badmsg_bad_abort, "bad-abort") \ EM(rxrpc_badmsg_bad_jumbo, "bad-jumbo") \ EM(rxrpc_badmsg_short_ack, "short-ack") \ EM(rxrpc_badmsg_short_ack_info, "short-ack-info") \ EM(rxrpc_badmsg_short_hdr, "short-hdr") \ EM(rxrpc_badmsg_unsupported_packet, "unsup-pkt") \ EM(rxrpc_badmsg_zero_call, "zero-call") \ EM(rxrpc_badmsg_zero_seq, "zero-seq") \ EM(rxrpc_badmsg_zero_service, "zero-service") \ EM(rxrpc_eproto_ackr_outside_window, "ackr-out-win") \ EM(rxrpc_eproto_ackr_sack_overflow, "ackr-sack-over") \ EM(rxrpc_eproto_ackr_short_sack, "ackr-short-sack") \ EM(rxrpc_eproto_ackr_zero, "ackr-zero") \ EM(rxrpc_eproto_bad_upgrade, "bad-upgrade") \ EM(rxrpc_eproto_data_after_last, "data-after-last") \ EM(rxrpc_eproto_different_last, "diff-last") \ EM(rxrpc_eproto_early_reply, "early-reply") \ EM(rxrpc_eproto_improper_term, "improper-term") \ EM(rxrpc_eproto_no_client_call, "no-cl-call") \ EM(rxrpc_eproto_no_client_conn, "no-cl-conn") \ EM(rxrpc_eproto_no_service_call, "no-sv-call") \ EM(rxrpc_eproto_reupgrade, "re-upgrade") \ EM(rxrpc_eproto_rxnull_challenge, "rxnull-chall") \ EM(rxrpc_eproto_rxnull_response, "rxnull-resp") \ EM(rxrpc_eproto_tx_rot_last, "tx-rot-last") \ EM(rxrpc_eproto_unexpected_ack, "unex-ack") \ EM(rxrpc_eproto_unexpected_ackall, "unex-ackall") \ EM(rxrpc_eproto_unexpected_implicit_end, "unex-impl-end") \ EM(rxrpc_eproto_unexpected_reply, "unex-reply") \ EM(rxrpc_eproto_wrong_security, "wrong-sec") \ EM(rxrpc_recvmsg_excess_data, "recvmsg-excess") \ EM(rxrpc_recvmsg_short_data, "recvmsg-short") \ E_(rxrpc_sendmsg_late_send, "sendmsg-late") #define rxrpc_call_poke_traces \ EM(rxrpc_call_poke_abort, "Abort") \ EM(rxrpc_call_poke_complete, "Compl") \ EM(rxrpc_call_poke_error, "Error") \ EM(rxrpc_call_poke_idle, "Idle") \ EM(rxrpc_call_poke_start, "Start") \ EM(rxrpc_call_poke_timer, "Timer") \ E_(rxrpc_call_poke_timer_now, "Timer-now") #define rxrpc_skb_traces \ EM(rxrpc_skb_eaten_by_unshare, "ETN unshare ") \ EM(rxrpc_skb_eaten_by_unshare_nomem, "ETN unshar-nm") \ EM(rxrpc_skb_get_conn_secured, "GET conn-secd") \ EM(rxrpc_skb_get_conn_work, "GET conn-work") \ EM(rxrpc_skb_get_last_nack, "GET last-nack") \ EM(rxrpc_skb_get_local_work, "GET locl-work") \ EM(rxrpc_skb_get_reject_work, "GET rej-work ") \ EM(rxrpc_skb_get_to_recvmsg, "GET to-recv ") \ EM(rxrpc_skb_get_to_recvmsg_oos, "GET to-recv-o") \ EM(rxrpc_skb_new_encap_rcv, "NEW encap-rcv") \ EM(rxrpc_skb_new_error_report, "NEW error-rpt") \ EM(rxrpc_skb_new_jumbo_subpacket, "NEW jumbo-sub") \ EM(rxrpc_skb_new_unshared, "NEW unshared ") \ EM(rxrpc_skb_put_conn_secured, "PUT conn-secd") \ EM(rxrpc_skb_put_conn_work, "PUT conn-work") \ EM(rxrpc_skb_put_error_report, "PUT error-rep") \ EM(rxrpc_skb_put_input, "PUT input ") \ EM(rxrpc_skb_put_jumbo_subpacket, "PUT jumbo-sub") \ EM(rxrpc_skb_put_last_nack, "PUT last-nack") \ EM(rxrpc_skb_put_purge, "PUT purge ") \ EM(rxrpc_skb_put_rotate, "PUT rotate ") \ EM(rxrpc_skb_put_unknown, "PUT unknown ") \ EM(rxrpc_skb_see_conn_work, "SEE conn-work") \ EM(rxrpc_skb_see_recvmsg, "SEE recvmsg ") \ EM(rxrpc_skb_see_reject, "SEE reject ") \ EM(rxrpc_skb_see_rotate, "SEE rotate ") \ E_(rxrpc_skb_see_version, "SEE version ") #define rxrpc_local_traces \ EM(rxrpc_local_free, "FREE ") \ EM(rxrpc_local_get_call, "GET call ") \ EM(rxrpc_local_get_client_conn, "GET conn-cln") \ EM(rxrpc_local_get_for_use, "GET for-use ") \ EM(rxrpc_local_get_peer, "GET peer ") \ EM(rxrpc_local_get_prealloc_conn, "GET conn-pre") \ EM(rxrpc_local_new, "NEW ") \ EM(rxrpc_local_put_bind, "PUT bind ") \ EM(rxrpc_local_put_call, "PUT call ") \ EM(rxrpc_local_put_for_use, "PUT for-use ") \ EM(rxrpc_local_put_kill_conn, "PUT conn-kil") \ EM(rxrpc_local_put_peer, "PUT peer ") \ EM(rxrpc_local_put_prealloc_peer, "PUT peer-pre") \ EM(rxrpc_local_put_release_sock, "PUT rel-sock") \ EM(rxrpc_local_stop, "STOP ") \ EM(rxrpc_local_stopped, "STOPPED ") \ EM(rxrpc_local_unuse_bind, "UNU bind ") \ EM(rxrpc_local_unuse_conn_work, "UNU conn-wrk") \ EM(rxrpc_local_unuse_peer_keepalive, "UNU peer-kpa") \ EM(rxrpc_local_unuse_release_sock, "UNU rel-sock") \ EM(rxrpc_local_use_conn_work, "USE conn-wrk") \ EM(rxrpc_local_use_lookup, "USE lookup ") \ E_(rxrpc_local_use_peer_keepalive, "USE peer-kpa") #define rxrpc_peer_traces \ EM(rxrpc_peer_free, "FREE ") \ EM(rxrpc_peer_get_accept, "GET accept ") \ EM(rxrpc_peer_get_application, "GET app ") \ EM(rxrpc_peer_get_bundle, "GET bundle ") \ EM(rxrpc_peer_get_call, "GET call ") \ EM(rxrpc_peer_get_client_conn, "GET cln-conn") \ EM(rxrpc_peer_get_input, "GET input ") \ EM(rxrpc_peer_get_input_error, "GET inpt-err") \ EM(rxrpc_peer_get_keepalive, "GET keepaliv") \ EM(rxrpc_peer_get_lookup_client, "GET look-cln") \ EM(rxrpc_peer_get_service_conn, "GET srv-conn") \ EM(rxrpc_peer_new_client, "NEW client ") \ EM(rxrpc_peer_new_prealloc, "NEW prealloc") \ EM(rxrpc_peer_put_application, "PUT app ") \ EM(rxrpc_peer_put_bundle, "PUT bundle ") \ EM(rxrpc_peer_put_call, "PUT call ") \ EM(rxrpc_peer_put_conn, "PUT conn ") \ EM(rxrpc_peer_put_input, "PUT input ") \ EM(rxrpc_peer_put_input_error, "PUT inpt-err") \ E_(rxrpc_peer_put_keepalive, "PUT keepaliv") #define rxrpc_bundle_traces \ EM(rxrpc_bundle_free, "FREE ") \ EM(rxrpc_bundle_get_client_call, "GET clt-call") \ EM(rxrpc_bundle_get_client_conn, "GET clt-conn") \ EM(rxrpc_bundle_get_service_conn, "GET svc-conn") \ EM(rxrpc_bundle_put_call, "PUT call ") \ EM(rxrpc_bundle_put_conn, "PUT conn ") \ EM(rxrpc_bundle_put_discard, "PUT discard ") \ E_(rxrpc_bundle_new, "NEW ") #define rxrpc_conn_traces \ EM(rxrpc_conn_free, "FREE ") \ EM(rxrpc_conn_get_activate_call, "GET act-call") \ EM(rxrpc_conn_get_call_input, "GET inp-call") \ EM(rxrpc_conn_get_conn_input, "GET inp-conn") \ EM(rxrpc_conn_get_idle, "GET idle ") \ EM(rxrpc_conn_get_poke_abort, "GET pk-abort") \ EM(rxrpc_conn_get_poke_timer, "GET poke ") \ EM(rxrpc_conn_get_service_conn, "GET svc-conn") \ EM(rxrpc_conn_new_client, "NEW client ") \ EM(rxrpc_conn_new_service, "NEW service ") \ EM(rxrpc_conn_put_call, "PUT call ") \ EM(rxrpc_conn_put_call_input, "PUT inp-call") \ EM(rxrpc_conn_put_conn_input, "PUT inp-conn") \ EM(rxrpc_conn_put_discard_idle, "PUT disc-idl") \ EM(rxrpc_conn_put_local_dead, "PUT loc-dead") \ EM(rxrpc_conn_put_noreuse, "PUT noreuse ") \ EM(rxrpc_conn_put_poke, "PUT poke ") \ EM(rxrpc_conn_put_service_reaped, "PUT svc-reap") \ EM(rxrpc_conn_put_unbundle, "PUT unbundle") \ EM(rxrpc_conn_put_unidle, "PUT unidle ") \ EM(rxrpc_conn_put_work, "PUT work ") \ EM(rxrpc_conn_queue_challenge, "QUE chall ") \ EM(rxrpc_conn_queue_retry_work, "QUE retry-wk") \ EM(rxrpc_conn_queue_rx_work, "QUE rx-work ") \ EM(rxrpc_conn_see_new_service_conn, "SEE new-svc ") \ EM(rxrpc_conn_see_reap_service, "SEE reap-svc") \ E_(rxrpc_conn_see_work, "SEE work ") #define rxrpc_client_traces \ EM(rxrpc_client_activate_chans, "Activa") \ EM(rxrpc_client_alloc, "Alloc ") \ EM(rxrpc_client_chan_activate, "ChActv") \ EM(rxrpc_client_chan_disconnect, "ChDisc") \ EM(rxrpc_client_chan_pass, "ChPass") \ EM(rxrpc_client_cleanup, "Clean ") \ EM(rxrpc_client_discard, "Discar") \ EM(rxrpc_client_exposed, "Expose") \ EM(rxrpc_client_replace, "Replac") \ EM(rxrpc_client_queue_new_call, "Q-Call") \ EM(rxrpc_client_to_active, "->Actv") \ E_(rxrpc_client_to_idle, "->Idle") #define rxrpc_call_traces \ EM(rxrpc_call_get_io_thread, "GET iothread") \ EM(rxrpc_call_get_input, "GET input ") \ EM(rxrpc_call_get_kernel_service, "GET krnl-srv") \ EM(rxrpc_call_get_notify_socket, "GET notify ") \ EM(rxrpc_call_get_poke, "GET poke ") \ EM(rxrpc_call_get_recvmsg, "GET recvmsg ") \ EM(rxrpc_call_get_release_sock, "GET rel-sock") \ EM(rxrpc_call_get_sendmsg, "GET sendmsg ") \ EM(rxrpc_call_get_userid, "GET user-id ") \ EM(rxrpc_call_new_client, "NEW client ") \ EM(rxrpc_call_new_prealloc_service, "NEW prealloc") \ EM(rxrpc_call_put_discard_prealloc, "PUT disc-pre") \ EM(rxrpc_call_put_discard_error, "PUT disc-err") \ EM(rxrpc_call_put_io_thread, "PUT iothread") \ EM(rxrpc_call_put_input, "PUT input ") \ EM(rxrpc_call_put_kernel, "PUT kernel ") \ EM(rxrpc_call_put_poke, "PUT poke ") \ EM(rxrpc_call_put_recvmsg, "PUT recvmsg ") \ EM(rxrpc_call_put_release_sock, "PUT rls-sock") \ EM(rxrpc_call_put_release_sock_tba, "PUT rls-sk-a") \ EM(rxrpc_call_put_sendmsg, "PUT sendmsg ") \ EM(rxrpc_call_put_unnotify, "PUT unnotify") \ EM(rxrpc_call_put_userid_exists, "PUT u-exists") \ EM(rxrpc_call_put_userid, "PUT user-id ") \ EM(rxrpc_call_see_accept, "SEE accept ") \ EM(rxrpc_call_see_activate_client, "SEE act-clnt") \ EM(rxrpc_call_see_connect_failed, "SEE con-fail") \ EM(rxrpc_call_see_connected, "SEE connect ") \ EM(rxrpc_call_see_disconnected, "SEE disconn ") \ EM(rxrpc_call_see_distribute_error, "SEE dist-err") \ EM(rxrpc_call_see_input, "SEE input ") \ EM(rxrpc_call_see_release, "SEE release ") \ EM(rxrpc_call_see_userid_exists, "SEE u-exists") \ E_(rxrpc_call_see_zap, "SEE zap ") #define rxrpc_txqueue_traces \ EM(rxrpc_txqueue_await_reply, "AWR") \ EM(rxrpc_txqueue_dequeue, "DEQ") \ EM(rxrpc_txqueue_end, "END") \ EM(rxrpc_txqueue_queue, "QUE") \ EM(rxrpc_txqueue_queue_last, "QLS") \ EM(rxrpc_txqueue_rotate, "ROT") \ EM(rxrpc_txqueue_rotate_last, "RLS") \ E_(rxrpc_txqueue_wait, "WAI") #define rxrpc_receive_traces \ EM(rxrpc_receive_end, "END") \ EM(rxrpc_receive_front, "FRN") \ EM(rxrpc_receive_incoming, "INC") \ EM(rxrpc_receive_queue, "QUE") \ EM(rxrpc_receive_queue_last, "QLS") \ EM(rxrpc_receive_queue_oos, "QUO") \ EM(rxrpc_receive_queue_oos_last, "QOL") \ EM(rxrpc_receive_oos, "OOS") \ EM(rxrpc_receive_oos_last, "OSL") \ EM(rxrpc_receive_rotate, "ROT") \ E_(rxrpc_receive_rotate_last, "RLS") #define rxrpc_recvmsg_traces \ EM(rxrpc_recvmsg_cont, "CONT") \ EM(rxrpc_recvmsg_data_return, "DATA") \ EM(rxrpc_recvmsg_dequeue, "DEQU") \ EM(rxrpc_recvmsg_enter, "ENTR") \ EM(rxrpc_recvmsg_full, "FULL") \ EM(rxrpc_recvmsg_hole, "HOLE") \ EM(rxrpc_recvmsg_next, "NEXT") \ EM(rxrpc_recvmsg_requeue, "REQU") \ EM(rxrpc_recvmsg_return, "RETN") \ EM(rxrpc_recvmsg_terminal, "TERM") \ EM(rxrpc_recvmsg_to_be_accepted, "TBAC") \ EM(rxrpc_recvmsg_unqueue, "UNQU") \ E_(rxrpc_recvmsg_wait, "WAIT") #define rxrpc_rtt_tx_traces \ EM(rxrpc_rtt_tx_cancel, "CNCE") \ EM(rxrpc_rtt_tx_data, "DATA") \ EM(rxrpc_rtt_tx_no_slot, "FULL") \ E_(rxrpc_rtt_tx_ping, "PING") #define rxrpc_rtt_rx_traces \ EM(rxrpc_rtt_rx_other_ack, "OACK") \ EM(rxrpc_rtt_rx_obsolete, "OBSL") \ EM(rxrpc_rtt_rx_lost, "LOST") \ EM(rxrpc_rtt_rx_ping_response, "PONG") \ E_(rxrpc_rtt_rx_requested_ack, "RACK") #define rxrpc_timer_traces \ EM(rxrpc_timer_begin, "Begin ") \ EM(rxrpc_timer_exp_ack, "ExpAck") \ EM(rxrpc_timer_exp_hard, "ExpHrd") \ EM(rxrpc_timer_exp_idle, "ExpIdl") \ EM(rxrpc_timer_exp_keepalive, "ExpKA ") \ EM(rxrpc_timer_exp_lost_ack, "ExpLoA") \ EM(rxrpc_timer_exp_normal, "ExpNml") \ EM(rxrpc_timer_exp_ping, "ExpPng") \ EM(rxrpc_timer_exp_resend, "ExpRsn") \ EM(rxrpc_timer_init_for_reply, "IniRpl") \ EM(rxrpc_timer_init_for_send_reply, "SndRpl") \ EM(rxrpc_timer_restart, "Restrt") \ EM(rxrpc_timer_set_for_ack, "SetAck") \ EM(rxrpc_timer_set_for_hard, "SetHrd") \ EM(rxrpc_timer_set_for_idle, "SetIdl") \ EM(rxrpc_timer_set_for_keepalive, "KeepAl") \ EM(rxrpc_timer_set_for_lost_ack, "SetLoA") \ EM(rxrpc_timer_set_for_normal, "SetNml") \ EM(rxrpc_timer_set_for_ping, "SetPng") \ EM(rxrpc_timer_set_for_resend, "SetRTx") \ E_(rxrpc_timer_set_for_send, "SetSnd") #define rxrpc_propose_ack_traces \ EM(rxrpc_propose_ack_client_tx_end, "ClTxEnd") \ EM(rxrpc_propose_ack_input_data, "DataIn ") \ EM(rxrpc_propose_ack_input_data_hole, "DataInH") \ EM(rxrpc_propose_ack_ping_for_keepalive, "KeepAlv") \ EM(rxrpc_propose_ack_ping_for_lost_ack, "LostAck") \ EM(rxrpc_propose_ack_ping_for_lost_reply, "LostRpl") \ EM(rxrpc_propose_ack_ping_for_old_rtt, "OldRtt ") \ EM(rxrpc_propose_ack_ping_for_params, "Params ") \ EM(rxrpc_propose_ack_ping_for_rtt, "Rtt ") \ EM(rxrpc_propose_ack_processing_op, "ProcOp ") \ EM(rxrpc_propose_ack_respond_to_ack, "Rsp2Ack") \ EM(rxrpc_propose_ack_respond_to_ping, "Rsp2Png") \ EM(rxrpc_propose_ack_retry_tx, "RetryTx") \ EM(rxrpc_propose_ack_rotate_rx, "RxAck ") \ EM(rxrpc_propose_ack_rx_idle, "RxIdle ") \ E_(rxrpc_propose_ack_terminal_ack, "ClTerm ") #define rxrpc_congest_modes \ EM(RXRPC_CALL_CONGEST_AVOIDANCE, "CongAvoid") \ EM(RXRPC_CALL_FAST_RETRANSMIT, "FastReTx ") \ EM(RXRPC_CALL_PACKET_LOSS, "PktLoss ") \ E_(RXRPC_CALL_SLOW_START, "SlowStart") #define rxrpc_congest_changes \ EM(rxrpc_cong_begin_retransmission, " Retrans") \ EM(rxrpc_cong_cleared_nacks, " Cleared") \ EM(rxrpc_cong_new_low_nack, " NewLowN") \ EM(rxrpc_cong_no_change, " -") \ EM(rxrpc_cong_progress, " Progres") \ EM(rxrpc_cong_idle_reset, " IdleRes") \ EM(rxrpc_cong_retransmit_again, " ReTxAgn") \ EM(rxrpc_cong_rtt_window_end, " RttWinE") \ E_(rxrpc_cong_saw_nack, " SawNack") #define rxrpc_pkts \ EM(0, "?00") \ EM(RXRPC_PACKET_TYPE_DATA, "DATA") \ EM(RXRPC_PACKET_TYPE_ACK, "ACK") \ EM(RXRPC_PACKET_TYPE_BUSY, "BUSY") \ EM(RXRPC_PACKET_TYPE_ABORT, "ABORT") \ EM(RXRPC_PACKET_TYPE_ACKALL, "ACKALL") \ EM(RXRPC_PACKET_TYPE_CHALLENGE, "CHALL") \ EM(RXRPC_PACKET_TYPE_RESPONSE, "RESP") \ EM(RXRPC_PACKET_TYPE_DEBUG, "DEBUG") \ EM(9, "?09") \ EM(10, "?10") \ EM(11, "?11") \ EM(12, "?12") \ EM(RXRPC_PACKET_TYPE_VERSION, "VERSION") \ EM(14, "?14") \ E_(15, "?15") #define rxrpc_ack_names \ EM(0, "-0-") \ EM(RXRPC_ACK_REQUESTED, "REQ") \ EM(RXRPC_ACK_DUPLICATE, "DUP") \ EM(RXRPC_ACK_OUT_OF_SEQUENCE, "OOS") \ EM(RXRPC_ACK_EXCEEDS_WINDOW, "WIN") \ EM(RXRPC_ACK_NOSPACE, "MEM") \ EM(RXRPC_ACK_PING, "PNG") \ EM(RXRPC_ACK_PING_RESPONSE, "PNR") \ EM(RXRPC_ACK_DELAY, "DLY") \ EM(RXRPC_ACK_IDLE, "IDL") \ E_(RXRPC_ACK__INVALID, "-?-") #define rxrpc_sack_traces \ EM(rxrpc_sack_advance, "ADV") \ EM(rxrpc_sack_fill, "FIL") \ EM(rxrpc_sack_nack, "NAK") \ EM(rxrpc_sack_none, "---") \ E_(rxrpc_sack_oos, "OOS") #define rxrpc_completions \ EM(RXRPC_CALL_SUCCEEDED, "Succeeded") \ EM(RXRPC_CALL_REMOTELY_ABORTED, "RemoteAbort") \ EM(RXRPC_CALL_LOCALLY_ABORTED, "LocalAbort") \ EM(RXRPC_CALL_LOCAL_ERROR, "LocalError") \ E_(RXRPC_CALL_NETWORK_ERROR, "NetError") #define rxrpc_tx_points \ EM(rxrpc_tx_point_call_abort, "CallAbort") \ EM(rxrpc_tx_point_call_ack, "CallAck") \ EM(rxrpc_tx_point_call_data_frag, "CallDataFrag") \ EM(rxrpc_tx_point_call_data_nofrag, "CallDataNofrag") \ EM(rxrpc_tx_point_call_final_resend, "CallFinalResend") \ EM(rxrpc_tx_point_conn_abort, "ConnAbort") \ EM(rxrpc_tx_point_reject, "Reject") \ EM(rxrpc_tx_point_rxkad_challenge, "RxkadChall") \ EM(rxrpc_tx_point_rxkad_response, "RxkadResp") \ EM(rxrpc_tx_point_version_keepalive, "VerKeepalive") \ E_(rxrpc_tx_point_version_reply, "VerReply") #define rxrpc_req_ack_traces \ EM(rxrpc_reqack_ack_lost, "ACK-LOST ") \ EM(rxrpc_reqack_already_on, "ALREADY-ON") \ EM(rxrpc_reqack_more_rtt, "MORE-RTT ") \ EM(rxrpc_reqack_no_srv_last, "NO-SRVLAST") \ EM(rxrpc_reqack_old_rtt, "OLD-RTT ") \ EM(rxrpc_reqack_retrans, "RETRANS ") \ EM(rxrpc_reqack_slow_start, "SLOW-START") \ E_(rxrpc_reqack_small_txwin, "SMALL-TXWN") /* ---- Must update size of stat_why_req_ack[] if more are added! */ #define rxrpc_txbuf_traces \ EM(rxrpc_txbuf_alloc_ack, "ALLOC ACK ") \ EM(rxrpc_txbuf_alloc_data, "ALLOC DATA ") \ EM(rxrpc_txbuf_free, "FREE ") \ EM(rxrpc_txbuf_get_buffer, "GET BUFFER ") \ EM(rxrpc_txbuf_get_trans, "GET TRANS ") \ EM(rxrpc_txbuf_get_retrans, "GET RETRANS") \ EM(rxrpc_txbuf_put_ack_tx, "PUT ACK TX ") \ EM(rxrpc_txbuf_put_cleaned, "PUT CLEANED") \ EM(rxrpc_txbuf_put_nomem, "PUT NOMEM ") \ EM(rxrpc_txbuf_put_rotated, "PUT ROTATED") \ EM(rxrpc_txbuf_put_send_aborted, "PUT SEND-X ") \ EM(rxrpc_txbuf_put_trans, "PUT TRANS ") \ EM(rxrpc_txbuf_see_out_of_step, "OUT-OF-STEP") \ EM(rxrpc_txbuf_see_send_more, "SEE SEND+ ") \ E_(rxrpc_txbuf_see_unacked, "SEE UNACKED") /* * Generate enums for tracing information. */ #ifndef __NETFS_DECLARE_TRACE_ENUMS_ONCE_ONLY #define __NETFS_DECLARE_TRACE_ENUMS_ONCE_ONLY #undef EM #undef E_ #define EM(a, b) a, #define E_(a, b) a enum rxrpc_abort_reason { rxrpc_abort_reasons } __mode(byte); enum rxrpc_bundle_trace { rxrpc_bundle_traces } __mode(byte); enum rxrpc_call_poke_trace { rxrpc_call_poke_traces } __mode(byte); enum rxrpc_call_trace { rxrpc_call_traces } __mode(byte); enum rxrpc_client_trace { rxrpc_client_traces } __mode(byte); enum rxrpc_congest_change { rxrpc_congest_changes } __mode(byte); enum rxrpc_conn_trace { rxrpc_conn_traces } __mode(byte); enum rxrpc_local_trace { rxrpc_local_traces } __mode(byte); enum rxrpc_peer_trace { rxrpc_peer_traces } __mode(byte); enum rxrpc_propose_ack_outcome { rxrpc_propose_ack_outcomes } __mode(byte); enum rxrpc_propose_ack_trace { rxrpc_propose_ack_traces } __mode(byte); enum rxrpc_receive_trace { rxrpc_receive_traces } __mode(byte); enum rxrpc_recvmsg_trace { rxrpc_recvmsg_traces } __mode(byte); enum rxrpc_req_ack_trace { rxrpc_req_ack_traces } __mode(byte); enum rxrpc_rtt_rx_trace { rxrpc_rtt_rx_traces } __mode(byte); enum rxrpc_rtt_tx_trace { rxrpc_rtt_tx_traces } __mode(byte); enum rxrpc_sack_trace { rxrpc_sack_traces } __mode(byte); enum rxrpc_skb_trace { rxrpc_skb_traces } __mode(byte); enum rxrpc_timer_trace { rxrpc_timer_traces } __mode(byte); enum rxrpc_tx_point { rxrpc_tx_points } __mode(byte); enum rxrpc_txbuf_trace { rxrpc_txbuf_traces } __mode(byte); enum rxrpc_txqueue_trace { rxrpc_txqueue_traces } __mode(byte); #endif /* end __RXRPC_DECLARE_TRACE_ENUMS_ONCE_ONLY */ /* * Export enum symbols via userspace. */ #undef EM #undef E_ #ifndef RXRPC_TRACE_ONLY_DEFINE_ENUMS #define EM(a, b) TRACE_DEFINE_ENUM(a); #define E_(a, b) TRACE_DEFINE_ENUM(a); rxrpc_abort_reasons; rxrpc_bundle_traces; rxrpc_call_poke_traces; rxrpc_call_traces; rxrpc_client_traces; rxrpc_congest_changes; rxrpc_congest_modes; rxrpc_conn_traces; rxrpc_local_traces; rxrpc_propose_ack_traces; rxrpc_receive_traces; rxrpc_recvmsg_traces; rxrpc_req_ack_traces; rxrpc_rtt_rx_traces; rxrpc_rtt_tx_traces; rxrpc_sack_traces; rxrpc_skb_traces; rxrpc_timer_traces; rxrpc_tx_points; rxrpc_txbuf_traces; rxrpc_txqueue_traces; /* * Now redefine the EM() and E_() macros to map the enums to the strings that * will be printed in the output. */ #undef EM #undef E_ #define EM(a, b) { a, b }, #define E_(a, b) { a, b } TRACE_EVENT(rxrpc_local, TP_PROTO(unsigned int local_debug_id, enum rxrpc_local_trace op, int ref, int usage), TP_ARGS(local_debug_id, op, ref, usage), TP_STRUCT__entry( __field(unsigned int, local) __field(int, op) __field(int, ref) __field(int, usage) ), TP_fast_assign( __entry->local = local_debug_id; __entry->op = op; __entry->ref = ref; __entry->usage = usage; ), TP_printk("L=%08x %s r=%d u=%d", __entry->local, __print_symbolic(__entry->op, rxrpc_local_traces), __entry->ref, __entry->usage) ); TRACE_EVENT(rxrpc_peer, TP_PROTO(unsigned int peer_debug_id, int ref, enum rxrpc_peer_trace why), TP_ARGS(peer_debug_id, ref, why), TP_STRUCT__entry( __field(unsigned int, peer) __field(int, ref) __field(enum rxrpc_peer_trace, why) ), TP_fast_assign( __entry->peer = peer_debug_id; __entry->ref = ref; __entry->why = why; ), TP_printk("P=%08x %s r=%d", __entry->peer, __print_symbolic(__entry->why, rxrpc_peer_traces), __entry->ref) ); TRACE_EVENT(rxrpc_bundle, TP_PROTO(unsigned int bundle_debug_id, int ref, enum rxrpc_bundle_trace why), TP_ARGS(bundle_debug_id, ref, why), TP_STRUCT__entry( __field(unsigned int, bundle) __field(int, ref) __field(int, why) ), TP_fast_assign( __entry->bundle = bundle_debug_id; __entry->ref = ref; __entry->why = why; ), TP_printk("CB=%08x %s r=%d", __entry->bundle, __print_symbolic(__entry->why, rxrpc_bundle_traces), __entry->ref) ); TRACE_EVENT(rxrpc_conn, TP_PROTO(unsigned int conn_debug_id, int ref, enum rxrpc_conn_trace why), TP_ARGS(conn_debug_id, ref, why), TP_STRUCT__entry( __field(unsigned int, conn) __field(int, ref) __field(int, why) ), TP_fast_assign( __entry->conn = conn_debug_id; __entry->ref = ref; __entry->why = why; ), TP_printk("C=%08x %s r=%d", __entry->conn, __print_symbolic(__entry->why, rxrpc_conn_traces), __entry->ref) ); TRACE_EVENT(rxrpc_client, TP_PROTO(struct rxrpc_connection *conn, int channel, enum rxrpc_client_trace op), TP_ARGS(conn, channel, op), TP_STRUCT__entry( __field(unsigned int, conn) __field(u32, cid) __field(int, channel) __field(int, usage) __field(enum rxrpc_client_trace, op) ), TP_fast_assign( __entry->conn = conn ? conn->debug_id : 0; __entry->channel = channel; __entry->usage = conn ? refcount_read(&conn->ref) : -2; __entry->op = op; __entry->cid = conn ? conn->proto.cid : 0; ), TP_printk("C=%08x h=%2d %s i=%08x u=%d", __entry->conn, __entry->channel, __print_symbolic(__entry->op, rxrpc_client_traces), __entry->cid, __entry->usage) ); TRACE_EVENT(rxrpc_call, TP_PROTO(unsigned int call_debug_id, int ref, unsigned long aux, enum rxrpc_call_trace why), TP_ARGS(call_debug_id, ref, aux, why), TP_STRUCT__entry( __field(unsigned int, call) __field(int, ref) __field(int, why) __field(unsigned long, aux) ), TP_fast_assign( __entry->call = call_debug_id; __entry->ref = ref; __entry->why = why; __entry->aux = aux; ), TP_printk("c=%08x %s r=%d a=%lx", __entry->call, __print_symbolic(__entry->why, rxrpc_call_traces), __entry->ref, __entry->aux) ); TRACE_EVENT(rxrpc_skb, TP_PROTO(struct sk_buff *skb, int usage, int mod_count, enum rxrpc_skb_trace why), TP_ARGS(skb, usage, mod_count, why), TP_STRUCT__entry( __field(struct sk_buff *, skb) __field(int, usage) __field(int, mod_count) __field(enum rxrpc_skb_trace, why) ), TP_fast_assign( __entry->skb = skb; __entry->usage = usage; __entry->mod_count = mod_count; __entry->why = why; ), TP_printk("s=%p Rx %s u=%d m=%d", __entry->skb, __print_symbolic(__entry->why, rxrpc_skb_traces), __entry->usage, __entry->mod_count) ); TRACE_EVENT(rxrpc_rx_packet, TP_PROTO(struct rxrpc_skb_priv *sp), TP_ARGS(sp), TP_STRUCT__entry( __field_struct(struct rxrpc_host_header, hdr) ), TP_fast_assign( memcpy(&__entry->hdr, &sp->hdr, sizeof(__entry->hdr)); ), TP_printk("%08x:%08x:%08x:%04x %08x %08x %02x %02x %s", __entry->hdr.epoch, __entry->hdr.cid, __entry->hdr.callNumber, __entry->hdr.serviceId, __entry->hdr.serial, __entry->hdr.seq, __entry->hdr.securityIndex, __entry->hdr.flags, __print_symbolic(__entry->hdr.type, rxrpc_pkts)) ); TRACE_EVENT(rxrpc_rx_done, TP_PROTO(int result, int abort_code), TP_ARGS(result, abort_code), TP_STRUCT__entry( __field(int, result) __field(int, abort_code) ), TP_fast_assign( __entry->result = result; __entry->abort_code = abort_code; ), TP_printk("r=%d a=%d", __entry->result, __entry->abort_code) ); TRACE_EVENT(rxrpc_abort, TP_PROTO(unsigned int call_nr, enum rxrpc_abort_reason why, u32 cid, u32 call_id, rxrpc_seq_t seq, int abort_code, int error), TP_ARGS(call_nr, why, cid, call_id, seq, abort_code, error), TP_STRUCT__entry( __field(unsigned int, call_nr) __field(enum rxrpc_abort_reason, why) __field(u32, cid) __field(u32, call_id) __field(rxrpc_seq_t, seq) __field(int, abort_code) __field(int, error) ), TP_fast_assign( __entry->call_nr = call_nr; __entry->why = why; __entry->cid = cid; __entry->call_id = call_id; __entry->abort_code = abort_code; __entry->error = error; __entry->seq = seq; ), TP_printk("c=%08x %08x:%08x s=%u a=%d e=%d %s", __entry->call_nr, __entry->cid, __entry->call_id, __entry->seq, __entry->abort_code, __entry->error, __print_symbolic(__entry->why, rxrpc_abort_reasons)) ); TRACE_EVENT(rxrpc_call_complete, TP_PROTO(struct rxrpc_call *call), TP_ARGS(call), TP_STRUCT__entry( __field(unsigned int, call) __field(enum rxrpc_call_completion, compl) __field(int, error) __field(u32, abort_code) ), TP_fast_assign( __entry->call = call->debug_id; __entry->compl = call->completion; __entry->error = call->error; __entry->abort_code = call->abort_code; ), TP_printk("c=%08x %s r=%d ac=%d", __entry->call, __print_symbolic(__entry->compl, rxrpc_completions), __entry->error, __entry->abort_code) ); TRACE_EVENT(rxrpc_txqueue, TP_PROTO(struct rxrpc_call *call, enum rxrpc_txqueue_trace why), TP_ARGS(call, why), TP_STRUCT__entry( __field(unsigned int, call) __field(enum rxrpc_txqueue_trace, why) __field(rxrpc_seq_t, acks_hard_ack) __field(rxrpc_seq_t, tx_bottom) __field(rxrpc_seq_t, tx_top) __field(rxrpc_seq_t, tx_prepared) __field(int, tx_winsize) ), TP_fast_assign( __entry->call = call->debug_id; __entry->why = why; __entry->acks_hard_ack = call->acks_hard_ack; __entry->tx_bottom = call->tx_bottom; __entry->tx_top = call->tx_top; __entry->tx_prepared = call->tx_prepared; __entry->tx_winsize = call->tx_winsize; ), TP_printk("c=%08x %s f=%08x h=%08x n=%u/%u/%u/%u", __entry->call, __print_symbolic(__entry->why, rxrpc_txqueue_traces), __entry->tx_bottom, __entry->acks_hard_ack, __entry->tx_top - __entry->tx_bottom, __entry->tx_top - __entry->acks_hard_ack, __entry->tx_prepared - __entry->tx_bottom, __entry->tx_winsize) ); TRACE_EVENT(rxrpc_rx_data, TP_PROTO(unsigned int call, rxrpc_seq_t seq, rxrpc_serial_t serial, u8 flags), TP_ARGS(call, seq, serial, flags), TP_STRUCT__entry( __field(unsigned int, call) __field(rxrpc_seq_t, seq) __field(rxrpc_serial_t, serial) __field(u8, flags) ), TP_fast_assign( __entry->call = call; __entry->seq = seq; __entry->serial = serial; __entry->flags = flags; ), TP_printk("c=%08x DATA %08x q=%08x fl=%02x", __entry->call, __entry->serial, __entry->seq, __entry->flags) ); TRACE_EVENT(rxrpc_rx_ack, TP_PROTO(struct rxrpc_call *call, rxrpc_serial_t serial, rxrpc_serial_t ack_serial, rxrpc_seq_t first, rxrpc_seq_t prev, u8 reason, u8 n_acks), TP_ARGS(call, serial, ack_serial, first, prev, reason, n_acks), TP_STRUCT__entry( __field(unsigned int, call) __field(rxrpc_serial_t, serial) __field(rxrpc_serial_t, ack_serial) __field(rxrpc_seq_t, first) __field(rxrpc_seq_t, prev) __field(u8, reason) __field(u8, n_acks) ), TP_fast_assign( __entry->call = call->debug_id; __entry->serial = serial; __entry->ack_serial = ack_serial; __entry->first = first; __entry->prev = prev; __entry->reason = reason; __entry->n_acks = n_acks; ), TP_printk("c=%08x %08x %s r=%08x f=%08x p=%08x n=%u", __entry->call, __entry->serial, __print_symbolic(__entry->reason, rxrpc_ack_names), __entry->ack_serial, __entry->first, __entry->prev, __entry->n_acks) ); TRACE_EVENT(rxrpc_rx_abort, TP_PROTO(struct rxrpc_call *call, rxrpc_serial_t serial, u32 abort_code), TP_ARGS(call, serial, abort_code), TP_STRUCT__entry( __field(unsigned int, call) __field(rxrpc_serial_t, serial) __field(u32, abort_code) ), TP_fast_assign( __entry->call = call->debug_id; __entry->serial = serial; __entry->abort_code = abort_code; ), TP_printk("c=%08x ABORT %08x ac=%d", __entry->call, __entry->serial, __entry->abort_code) ); TRACE_EVENT(rxrpc_rx_challenge, TP_PROTO(struct rxrpc_connection *conn, rxrpc_serial_t serial, u32 version, u32 nonce, u32 min_level), TP_ARGS(conn, serial, version, nonce, min_level), TP_STRUCT__entry( __field(unsigned int, conn) __field(rxrpc_serial_t, serial) __field(u32, version) __field(u32, nonce) __field(u32, min_level) ), TP_fast_assign( __entry->conn = conn->debug_id; __entry->serial = serial; __entry->version = version; __entry->nonce = nonce; __entry->min_level = min_level; ), TP_printk("C=%08x CHALLENGE %08x v=%x n=%x ml=%x", __entry->conn, __entry->serial, __entry->version, __entry->nonce, __entry->min_level) ); TRACE_EVENT(rxrpc_rx_response, TP_PROTO(struct rxrpc_connection *conn, rxrpc_serial_t serial, u32 version, u32 kvno, u32 ticket_len), TP_ARGS(conn, serial, version, kvno, ticket_len), TP_STRUCT__entry( __field(unsigned int, conn) __field(rxrpc_serial_t, serial) __field(u32, version) __field(u32, kvno) __field(u32, ticket_len) ), TP_fast_assign( __entry->conn = conn->debug_id; __entry->serial = serial; __entry->version = version; __entry->kvno = kvno; __entry->ticket_len = ticket_len; ), TP_printk("C=%08x RESPONSE %08x v=%x kvno=%x tl=%x", __entry->conn, __entry->serial, __entry->version, __entry->kvno, __entry->ticket_len) ); TRACE_EVENT(rxrpc_rx_rwind_change, TP_PROTO(struct rxrpc_call *call, rxrpc_serial_t serial, u32 rwind, bool wake), TP_ARGS(call, serial, rwind, wake), TP_STRUCT__entry( __field(unsigned int, call) __field(rxrpc_serial_t, serial) __field(u32, rwind) __field(bool, wake) ), TP_fast_assign( __entry->call = call->debug_id; __entry->serial = serial; __entry->rwind = rwind; __entry->wake = wake; ), TP_printk("c=%08x %08x rw=%u%s", __entry->call, __entry->serial, __entry->rwind, __entry->wake ? " wake" : "") ); TRACE_EVENT(rxrpc_tx_packet, TP_PROTO(unsigned int call_id, struct rxrpc_wire_header *whdr, enum rxrpc_tx_point where), TP_ARGS(call_id, whdr, where), TP_STRUCT__entry( __field(unsigned int, call) __field(enum rxrpc_tx_point, where) __field_struct(struct rxrpc_wire_header, whdr) ), TP_fast_assign( __entry->call = call_id; memcpy(&__entry->whdr, whdr, sizeof(__entry->whdr)); __entry->where = where; ), TP_printk("c=%08x %08x:%08x:%08x:%04x %08x %08x %02x %02x %s %s", __entry->call, ntohl(__entry->whdr.epoch), ntohl(__entry->whdr.cid), ntohl(__entry->whdr.callNumber), ntohs(__entry->whdr.serviceId), ntohl(__entry->whdr.serial), ntohl(__entry->whdr.seq), __entry->whdr.type, __entry->whdr.flags, __entry->whdr.type <= 15 ? __print_symbolic(__entry->whdr.type, rxrpc_pkts) : "?UNK", __print_symbolic(__entry->where, rxrpc_tx_points)) ); TRACE_EVENT(rxrpc_tx_data, TP_PROTO(struct rxrpc_call *call, rxrpc_seq_t seq, rxrpc_serial_t serial, u8 flags, bool retrans, bool lose), TP_ARGS(call, seq, serial, flags, retrans, lose), TP_STRUCT__entry( __field(unsigned int, call) __field(rxrpc_seq_t, seq) __field(rxrpc_serial_t, serial) __field(u32, cid) __field(u32, call_id) __field(u8, flags) __field(bool, retrans) __field(bool, lose) ), TP_fast_assign( __entry->call = call->debug_id; __entry->cid = call->cid; __entry->call_id = call->call_id; __entry->seq = seq; __entry->serial = serial; __entry->flags = flags; __entry->retrans = retrans; __entry->lose = lose; ), TP_printk("c=%08x DATA %08x:%08x %08x q=%08x fl=%02x%s%s", __entry->call, __entry->cid, __entry->call_id, __entry->serial, __entry->seq, __entry->flags, __entry->retrans ? " *RETRANS*" : "", __entry->lose ? " *LOSE*" : "") ); TRACE_EVENT(rxrpc_tx_ack, TP_PROTO(unsigned int call, rxrpc_serial_t serial, rxrpc_seq_t ack_first, rxrpc_serial_t ack_serial, u8 reason, u8 n_acks, u16 rwind), TP_ARGS(call, serial, ack_first, ack_serial, reason, n_acks, rwind), TP_STRUCT__entry( __field(unsigned int, call) __field(rxrpc_serial_t, serial) __field(rxrpc_seq_t, ack_first) __field(rxrpc_serial_t, ack_serial) __field(u8, reason) __field(u8, n_acks) __field(u16, rwind) ), TP_fast_assign( __entry->call = call; __entry->serial = serial; __entry->ack_first = ack_first; __entry->ack_serial = ack_serial; __entry->reason = reason; __entry->n_acks = n_acks; __entry->rwind = rwind; ), TP_printk(" c=%08x ACK %08x %s f=%08x r=%08x n=%u rw=%u", __entry->call, __entry->serial, __print_symbolic(__entry->reason, rxrpc_ack_names), __entry->ack_first, __entry->ack_serial, __entry->n_acks, __entry->rwind) ); TRACE_EVENT(rxrpc_receive, TP_PROTO(struct rxrpc_call *call, enum rxrpc_receive_trace why, rxrpc_serial_t serial, rxrpc_seq_t seq), TP_ARGS(call, why, serial, seq), TP_STRUCT__entry( __field(unsigned int, call) __field(enum rxrpc_receive_trace, why) __field(rxrpc_serial_t, serial) __field(rxrpc_seq_t, seq) __field(rxrpc_seq_t, window) __field(rxrpc_seq_t, wtop) ), TP_fast_assign( __entry->call = call->debug_id; __entry->why = why; __entry->serial = serial; __entry->seq = seq; __entry->window = call->ackr_window; __entry->wtop = call->ackr_wtop; ), TP_printk("c=%08x %s r=%08x q=%08x w=%08x-%08x", __entry->call, __print_symbolic(__entry->why, rxrpc_receive_traces), __entry->serial, __entry->seq, __entry->window, __entry->wtop) ); TRACE_EVENT(rxrpc_recvmsg, TP_PROTO(unsigned int call_debug_id, enum rxrpc_recvmsg_trace why, int ret), TP_ARGS(call_debug_id, why, ret), TP_STRUCT__entry( __field(unsigned int, call) __field(enum rxrpc_recvmsg_trace, why) __field(int, ret) ), TP_fast_assign( __entry->call = call_debug_id; __entry->why = why; __entry->ret = ret; ), TP_printk("c=%08x %s ret=%d", __entry->call, __print_symbolic(__entry->why, rxrpc_recvmsg_traces), __entry->ret) ); TRACE_EVENT(rxrpc_recvdata, TP_PROTO(struct rxrpc_call *call, enum rxrpc_recvmsg_trace why, rxrpc_seq_t seq, unsigned int offset, unsigned int len, int ret), TP_ARGS(call, why, seq, offset, len, ret), TP_STRUCT__entry( __field(unsigned int, call) __field(enum rxrpc_recvmsg_trace, why) __field(rxrpc_seq_t, seq) __field(unsigned int, offset) __field(unsigned int, len) __field(int, ret) ), TP_fast_assign( __entry->call = call ? call->debug_id : 0; __entry->why = why; __entry->seq = seq; __entry->offset = offset; __entry->len = len; __entry->ret = ret; ), TP_printk("c=%08x %s q=%08x o=%u l=%u ret=%d", __entry->call, __print_symbolic(__entry->why, rxrpc_recvmsg_traces), __entry->seq, __entry->offset, __entry->len, __entry->ret) ); TRACE_EVENT(rxrpc_rtt_tx, TP_PROTO(struct rxrpc_call *call, enum rxrpc_rtt_tx_trace why, int slot, rxrpc_serial_t send_serial), TP_ARGS(call, why, slot, send_serial), TP_STRUCT__entry( __field(unsigned int, call) __field(enum rxrpc_rtt_tx_trace, why) __field(int, slot) __field(rxrpc_serial_t, send_serial) ), TP_fast_assign( __entry->call = call->debug_id; __entry->why = why; __entry->slot = slot; __entry->send_serial = send_serial; ), TP_printk("c=%08x [%d] %s sr=%08x", __entry->call, __entry->slot, __print_symbolic(__entry->why, rxrpc_rtt_tx_traces), __entry->send_serial) ); TRACE_EVENT(rxrpc_rtt_rx, TP_PROTO(struct rxrpc_call *call, enum rxrpc_rtt_rx_trace why, int slot, rxrpc_serial_t send_serial, rxrpc_serial_t resp_serial, u32 rtt, u32 rto), TP_ARGS(call, why, slot, send_serial, resp_serial, rtt, rto), TP_STRUCT__entry( __field(unsigned int, call) __field(enum rxrpc_rtt_rx_trace, why) __field(int, slot) __field(rxrpc_serial_t, send_serial) __field(rxrpc_serial_t, resp_serial) __field(u32, rtt) __field(u32, rto) ), TP_fast_assign( __entry->call = call->debug_id; __entry->why = why; __entry->slot = slot; __entry->send_serial = send_serial; __entry->resp_serial = resp_serial; __entry->rtt = rtt; __entry->rto = rto; ), TP_printk("c=%08x [%d] %s sr=%08x rr=%08x rtt=%u rto=%u", __entry->call, __entry->slot, __print_symbolic(__entry->why, rxrpc_rtt_rx_traces), __entry->send_serial, __entry->resp_serial, __entry->rtt, __entry->rto) ); TRACE_EVENT(rxrpc_timer, TP_PROTO(struct rxrpc_call *call, enum rxrpc_timer_trace why, unsigned long now), TP_ARGS(call, why, now), TP_STRUCT__entry( __field(unsigned int, call) __field(enum rxrpc_timer_trace, why) __field(long, now) __field(long, ack_at) __field(long, ack_lost_at) __field(long, resend_at) __field(long, ping_at) __field(long, expect_rx_by) __field(long, expect_req_by) __field(long, expect_term_by) __field(long, timer) ), TP_fast_assign( __entry->call = call->debug_id; __entry->why = why; __entry->now = now; __entry->ack_at = call->delay_ack_at; __entry->ack_lost_at = call->ack_lost_at; __entry->resend_at = call->resend_at; __entry->expect_rx_by = call->expect_rx_by; __entry->expect_req_by = call->expect_req_by; __entry->expect_term_by = call->expect_term_by; __entry->timer = call->timer.expires; ), TP_printk("c=%08x %s a=%ld la=%ld r=%ld xr=%ld xq=%ld xt=%ld t=%ld", __entry->call, __print_symbolic(__entry->why, rxrpc_timer_traces), __entry->ack_at - __entry->now, __entry->ack_lost_at - __entry->now, __entry->resend_at - __entry->now, __entry->expect_rx_by - __entry->now, __entry->expect_req_by - __entry->now, __entry->expect_term_by - __entry->now, __entry->timer - __entry->now) ); TRACE_EVENT(rxrpc_timer_expired, TP_PROTO(struct rxrpc_call *call, unsigned long now), TP_ARGS(call, now), TP_STRUCT__entry( __field(unsigned int, call) __field(long, now) __field(long, ack_at) __field(long, ack_lost_at) __field(long, resend_at) __field(long, ping_at) __field(long, expect_rx_by) __field(long, expect_req_by) __field(long, expect_term_by) __field(long, timer) ), TP_fast_assign( __entry->call = call->debug_id; __entry->now = now; __entry->ack_at = call->delay_ack_at; __entry->ack_lost_at = call->ack_lost_at; __entry->resend_at = call->resend_at; __entry->expect_rx_by = call->expect_rx_by; __entry->expect_req_by = call->expect_req_by; __entry->expect_term_by = call->expect_term_by; __entry->timer = call->timer.expires; ), TP_printk("c=%08x EXPIRED a=%ld la=%ld r=%ld xr=%ld xq=%ld xt=%ld t=%ld", __entry->call, __entry->ack_at - __entry->now, __entry->ack_lost_at - __entry->now, __entry->resend_at - __entry->now, __entry->expect_rx_by - __entry->now, __entry->expect_req_by - __entry->now, __entry->expect_term_by - __entry->now, __entry->timer - __entry->now) ); TRACE_EVENT(rxrpc_rx_lose, TP_PROTO(struct rxrpc_skb_priv *sp), TP_ARGS(sp), TP_STRUCT__entry( __field_struct(struct rxrpc_host_header, hdr) ), TP_fast_assign( memcpy(&__entry->hdr, &sp->hdr, sizeof(__entry->hdr)); ), TP_printk("%08x:%08x:%08x:%04x %08x %08x %02x %02x %s *LOSE*", __entry->hdr.epoch, __entry->hdr.cid, __entry->hdr.callNumber, __entry->hdr.serviceId, __entry->hdr.serial, __entry->hdr.seq, __entry->hdr.type, __entry->hdr.flags, __entry->hdr.type <= 15 ? __print_symbolic(__entry->hdr.type, rxrpc_pkts) : "?UNK") ); TRACE_EVENT(rxrpc_propose_ack, TP_PROTO(struct rxrpc_call *call, enum rxrpc_propose_ack_trace why, u8 ack_reason, rxrpc_serial_t serial), TP_ARGS(call, why, ack_reason, serial), TP_STRUCT__entry( __field(unsigned int, call) __field(enum rxrpc_propose_ack_trace, why) __field(rxrpc_serial_t, serial) __field(u8, ack_reason) ), TP_fast_assign( __entry->call = call->debug_id; __entry->why = why; __entry->serial = serial; __entry->ack_reason = ack_reason; ), TP_printk("c=%08x %s %s r=%08x", __entry->call, __print_symbolic(__entry->why, rxrpc_propose_ack_traces), __print_symbolic(__entry->ack_reason, rxrpc_ack_names), __entry->serial) ); TRACE_EVENT(rxrpc_send_ack, TP_PROTO(struct rxrpc_call *call, enum rxrpc_propose_ack_trace why, u8 ack_reason, rxrpc_serial_t serial), TP_ARGS(call, why, ack_reason, serial), TP_STRUCT__entry( __field(unsigned int, call) __field(enum rxrpc_propose_ack_trace, why) __field(rxrpc_serial_t, serial) __field(u8, ack_reason) ), TP_fast_assign( __entry->call = call->debug_id; __entry->why = why; __entry->serial = serial; __entry->ack_reason = ack_reason; ), TP_printk("c=%08x %s %s r=%08x", __entry->call, __print_symbolic(__entry->why, rxrpc_propose_ack_traces), __print_symbolic(__entry->ack_reason, rxrpc_ack_names), __entry->serial) ); TRACE_EVENT(rxrpc_drop_ack, TP_PROTO(struct rxrpc_call *call, enum rxrpc_propose_ack_trace why, u8 ack_reason, rxrpc_serial_t serial, bool nobuf), TP_ARGS(call, why, ack_reason, serial, nobuf), TP_STRUCT__entry( __field(unsigned int, call) __field(enum rxrpc_propose_ack_trace, why) __field(rxrpc_serial_t, serial) __field(u8, ack_reason) __field(bool, nobuf) ), TP_fast_assign( __entry->call = call->debug_id; __entry->why = why; __entry->serial = serial; __entry->ack_reason = ack_reason; __entry->nobuf = nobuf; ), TP_printk("c=%08x %s %s r=%08x nbf=%u", __entry->call, __print_symbolic(__entry->why, rxrpc_propose_ack_traces), __print_symbolic(__entry->ack_reason, rxrpc_ack_names), __entry->serial, __entry->nobuf) ); TRACE_EVENT(rxrpc_retransmit, TP_PROTO(struct rxrpc_call *call, rxrpc_seq_t seq, s64 expiry), TP_ARGS(call, seq, expiry), TP_STRUCT__entry( __field(unsigned int, call) __field(rxrpc_seq_t, seq) __field(s64, expiry) ), TP_fast_assign( __entry->call = call->debug_id; __entry->seq = seq; __entry->expiry = expiry; ), TP_printk("c=%08x q=%x xp=%lld", __entry->call, __entry->seq, __entry->expiry) ); TRACE_EVENT(rxrpc_congest, TP_PROTO(struct rxrpc_call *call, struct rxrpc_ack_summary *summary, rxrpc_serial_t ack_serial, enum rxrpc_congest_change change), TP_ARGS(call, summary, ack_serial, change), TP_STRUCT__entry( __field(unsigned int, call) __field(enum rxrpc_congest_change, change) __field(rxrpc_seq_t, hard_ack) __field(rxrpc_seq_t, top) __field(rxrpc_seq_t, lowest_nak) __field(rxrpc_serial_t, ack_serial) __field_struct(struct rxrpc_ack_summary, sum) ), TP_fast_assign( __entry->call = call->debug_id; __entry->change = change; __entry->hard_ack = call->acks_hard_ack; __entry->top = call->tx_top; __entry->lowest_nak = call->acks_lowest_nak; __entry->ack_serial = ack_serial; memcpy(&__entry->sum, summary, sizeof(__entry->sum)); ), TP_printk("c=%08x r=%08x %s q=%08x %s cw=%u ss=%u nA=%u,%u+%u,%u b=%u u=%u d=%u l=%x%s%s%s", __entry->call, __entry->ack_serial, __print_symbolic(__entry->sum.ack_reason, rxrpc_ack_names), __entry->hard_ack, __print_symbolic(__entry->sum.mode, rxrpc_congest_modes), __entry->sum.cwnd, __entry->sum.ssthresh, __entry->sum.nr_acks, __entry->sum.nr_retained_nacks, __entry->sum.nr_new_acks, __entry->sum.nr_new_nacks, __entry->top - __entry->hard_ack, __entry->sum.cumulative_acks, __entry->sum.dup_acks, __entry->lowest_nak, __entry->sum.new_low_nack ? "!" : "", __print_symbolic(__entry->change, rxrpc_congest_changes), __entry->sum.retrans_timeo ? " rTxTo" : "") ); TRACE_EVENT(rxrpc_reset_cwnd, TP_PROTO(struct rxrpc_call *call, ktime_t now), TP_ARGS(call, now), TP_STRUCT__entry( __field(unsigned int, call) __field(enum rxrpc_congest_mode, mode) __field(unsigned short, cwnd) __field(unsigned short, extra) __field(rxrpc_seq_t, hard_ack) __field(rxrpc_seq_t, prepared) __field(ktime_t, since_last_tx) __field(bool, has_data) ), TP_fast_assign( __entry->call = call->debug_id; __entry->mode = call->cong_mode; __entry->cwnd = call->cong_cwnd; __entry->extra = call->cong_extra; __entry->hard_ack = call->acks_hard_ack; __entry->prepared = call->tx_prepared - call->tx_bottom; __entry->since_last_tx = ktime_sub(now, call->tx_last_sent); __entry->has_data = !list_empty(&call->tx_sendmsg); ), TP_printk("c=%08x q=%08x %s cw=%u+%u pr=%u tm=%llu d=%u", __entry->call, __entry->hard_ack, __print_symbolic(__entry->mode, rxrpc_congest_modes), __entry->cwnd, __entry->extra, __entry->prepared, ktime_to_ns(__entry->since_last_tx), __entry->has_data) ); TRACE_EVENT(rxrpc_disconnect_call, TP_PROTO(struct rxrpc_call *call), TP_ARGS(call), TP_STRUCT__entry( __field(unsigned int, call) __field(u32, abort_code) ), TP_fast_assign( __entry->call = call->debug_id; __entry->abort_code = call->abort_code; ), TP_printk("c=%08x ab=%08x", __entry->call, __entry->abort_code) ); TRACE_EVENT(rxrpc_improper_term, TP_PROTO(struct rxrpc_call *call), TP_ARGS(call), TP_STRUCT__entry( __field(unsigned int, call) __field(u32, abort_code) ), TP_fast_assign( __entry->call = call->debug_id; __entry->abort_code = call->abort_code; ), TP_printk("c=%08x ab=%08x", __entry->call, __entry->abort_code) ); TRACE_EVENT(rxrpc_connect_call, TP_PROTO(struct rxrpc_call *call), TP_ARGS(call), TP_STRUCT__entry( __field(unsigned int, call) __field(unsigned long, user_call_ID) __field(u32, cid) __field(u32, call_id) __field_struct(struct sockaddr_rxrpc, srx) ), TP_fast_assign( __entry->call = call->debug_id; __entry->user_call_ID = call->user_call_ID; __entry->cid = call->cid; __entry->call_id = call->call_id; __entry->srx = call->dest_srx; ), TP_printk("c=%08x u=%p %08x:%08x dst=%pISp", __entry->call, (void *)__entry->user_call_ID, __entry->cid, __entry->call_id, &__entry->srx.transport) ); TRACE_EVENT(rxrpc_resend, TP_PROTO(struct rxrpc_call *call, struct sk_buff *ack), TP_ARGS(call, ack), TP_STRUCT__entry( __field(unsigned int, call) __field(rxrpc_seq_t, seq) __field(rxrpc_seq_t, transmitted) __field(rxrpc_serial_t, ack_serial) ), TP_fast_assign( struct rxrpc_skb_priv *sp = ack ? rxrpc_skb(ack) : NULL; __entry->call = call->debug_id; __entry->seq = call->acks_hard_ack; __entry->transmitted = call->tx_transmitted; __entry->ack_serial = sp ? sp->hdr.serial : 0; ), TP_printk("c=%08x r=%x q=%x tq=%x", __entry->call, __entry->ack_serial, __entry->seq, __entry->transmitted) ); TRACE_EVENT(rxrpc_rx_icmp, TP_PROTO(struct rxrpc_peer *peer, struct sock_extended_err *ee, struct sockaddr_rxrpc *srx), TP_ARGS(peer, ee, srx), TP_STRUCT__entry( __field(unsigned int, peer) __field_struct(struct sock_extended_err, ee) __field_struct(struct sockaddr_rxrpc, srx) ), TP_fast_assign( __entry->peer = peer->debug_id; memcpy(&__entry->ee, ee, sizeof(__entry->ee)); memcpy(&__entry->srx, srx, sizeof(__entry->srx)); ), TP_printk("P=%08x o=%u t=%u c=%u i=%u d=%u e=%d %pISp", __entry->peer, __entry->ee.ee_origin, __entry->ee.ee_type, __entry->ee.ee_code, __entry->ee.ee_info, __entry->ee.ee_data, __entry->ee.ee_errno, &__entry->srx.transport) ); TRACE_EVENT(rxrpc_tx_fail, TP_PROTO(unsigned int debug_id, rxrpc_serial_t serial, int ret, enum rxrpc_tx_point where), TP_ARGS(debug_id, serial, ret, where), TP_STRUCT__entry( __field(unsigned int, debug_id) __field(rxrpc_serial_t, serial) __field(int, ret) __field(enum rxrpc_tx_point, where) ), TP_fast_assign( __entry->debug_id = debug_id; __entry->serial = serial; __entry->ret = ret; __entry->where = where; ), TP_printk("c=%08x r=%x ret=%d %s", __entry->debug_id, __entry->serial, __entry->ret, __print_symbolic(__entry->where, rxrpc_tx_points)) ); TRACE_EVENT(rxrpc_call_reset, TP_PROTO(struct rxrpc_call *call), TP_ARGS(call), TP_STRUCT__entry( __field(unsigned int, debug_id) __field(u32, cid) __field(u32, call_id) __field(rxrpc_serial_t, call_serial) __field(rxrpc_serial_t, conn_serial) __field(rxrpc_seq_t, tx_seq) __field(rxrpc_seq_t, rx_seq) ), TP_fast_assign( __entry->debug_id = call->debug_id; __entry->cid = call->cid; __entry->call_id = call->call_id; __entry->call_serial = call->rx_serial; __entry->conn_serial = call->conn->hi_serial; __entry->tx_seq = call->acks_hard_ack; __entry->rx_seq = call->rx_highest_seq; ), TP_printk("c=%08x %08x:%08x r=%08x/%08x tx=%08x rx=%08x", __entry->debug_id, __entry->cid, __entry->call_id, __entry->call_serial, __entry->conn_serial, __entry->tx_seq, __entry->rx_seq) ); TRACE_EVENT(rxrpc_notify_socket, TP_PROTO(unsigned int debug_id, rxrpc_serial_t serial), TP_ARGS(debug_id, serial), TP_STRUCT__entry( __field(unsigned int, debug_id) __field(rxrpc_serial_t, serial) ), TP_fast_assign( __entry->debug_id = debug_id; __entry->serial = serial; ), TP_printk("c=%08x r=%08x", __entry->debug_id, __entry->serial) ); TRACE_EVENT(rxrpc_rx_discard_ack, TP_PROTO(unsigned int debug_id, rxrpc_serial_t serial, rxrpc_seq_t first_soft_ack, rxrpc_seq_t call_ackr_first, rxrpc_seq_t prev_pkt, rxrpc_seq_t call_ackr_prev), TP_ARGS(debug_id, serial, first_soft_ack, call_ackr_first, prev_pkt, call_ackr_prev), TP_STRUCT__entry( __field(unsigned int, debug_id) __field(rxrpc_serial_t, serial) __field(rxrpc_seq_t, first_soft_ack) __field(rxrpc_seq_t, call_ackr_first) __field(rxrpc_seq_t, prev_pkt) __field(rxrpc_seq_t, call_ackr_prev) ), TP_fast_assign( __entry->debug_id = debug_id; __entry->serial = serial; __entry->first_soft_ack = first_soft_ack; __entry->call_ackr_first = call_ackr_first; __entry->prev_pkt = prev_pkt; __entry->call_ackr_prev = call_ackr_prev; ), TP_printk("c=%08x r=%08x %08x<%08x %08x<%08x", __entry->debug_id, __entry->serial, __entry->first_soft_ack, __entry->call_ackr_first, __entry->prev_pkt, __entry->call_ackr_prev) ); TRACE_EVENT(rxrpc_req_ack, TP_PROTO(unsigned int call_debug_id, rxrpc_seq_t seq, enum rxrpc_req_ack_trace why), TP_ARGS(call_debug_id, seq, why), TP_STRUCT__entry( __field(unsigned int, call_debug_id) __field(rxrpc_seq_t, seq) __field(enum rxrpc_req_ack_trace, why) ), TP_fast_assign( __entry->call_debug_id = call_debug_id; __entry->seq = seq; __entry->why = why; ), TP_printk("c=%08x q=%08x REQ-%s", __entry->call_debug_id, __entry->seq, __print_symbolic(__entry->why, rxrpc_req_ack_traces)) ); TRACE_EVENT(rxrpc_txbuf, TP_PROTO(unsigned int debug_id, unsigned int call_debug_id, rxrpc_seq_t seq, int ref, enum rxrpc_txbuf_trace what), TP_ARGS(debug_id, call_debug_id, seq, ref, what), TP_STRUCT__entry( __field(unsigned int, debug_id) __field(unsigned int, call_debug_id) __field(rxrpc_seq_t, seq) __field(int, ref) __field(enum rxrpc_txbuf_trace, what) ), TP_fast_assign( __entry->debug_id = debug_id; __entry->call_debug_id = call_debug_id; __entry->seq = seq; __entry->ref = ref; __entry->what = what; ), TP_printk("B=%08x c=%08x q=%08x %s r=%d", __entry->debug_id, __entry->call_debug_id, __entry->seq, __print_symbolic(__entry->what, rxrpc_txbuf_traces), __entry->ref) ); TRACE_EVENT(rxrpc_poke_call, TP_PROTO(struct rxrpc_call *call, bool busy, enum rxrpc_call_poke_trace what), TP_ARGS(call, busy, what), TP_STRUCT__entry( __field(unsigned int, call_debug_id) __field(bool, busy) __field(enum rxrpc_call_poke_trace, what) ), TP_fast_assign( __entry->call_debug_id = call->debug_id; __entry->busy = busy; __entry->what = what; ), TP_printk("c=%08x %s%s", __entry->call_debug_id, __print_symbolic(__entry->what, rxrpc_call_poke_traces), __entry->busy ? "!" : "") ); TRACE_EVENT(rxrpc_call_poked, TP_PROTO(struct rxrpc_call *call), TP_ARGS(call), TP_STRUCT__entry( __field(unsigned int, call_debug_id) ), TP_fast_assign( __entry->call_debug_id = call->debug_id; ), TP_printk("c=%08x", __entry->call_debug_id) ); TRACE_EVENT(rxrpc_sack, TP_PROTO(struct rxrpc_call *call, rxrpc_seq_t seq, unsigned int sack, enum rxrpc_sack_trace what), TP_ARGS(call, seq, sack, what), TP_STRUCT__entry( __field(unsigned int, call_debug_id) __field(rxrpc_seq_t, seq) __field(unsigned int, sack) __field(enum rxrpc_sack_trace, what) ), TP_fast_assign( __entry->call_debug_id = call->debug_id; __entry->seq = seq; __entry->sack = sack; __entry->what = what; ), TP_printk("c=%08x q=%08x %s k=%x", __entry->call_debug_id, __entry->seq, __print_symbolic(__entry->what, rxrpc_sack_traces), __entry->sack) ); #undef EM #undef E_ #endif /* RXRPC_TRACE_ONLY_DEFINE_ENUMS */ #endif /* _TRACE_RXRPC_H */ /* This part must be outside protection */ #include <trace/define_trace.h> |
1 1 21 4 3 3 31 1 23 1 1 20 1 57 4 2 54 57 1 1 2 2 12 17 1 18 5 12 1 1 1 1 6 35 1 4 1 19 10 33 31 1 2 1 28 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 | // SPDX-License-Identifier: GPL-2.0-only /* * Landlock LSM - System call implementations and user space interfaces * * Copyright © 2016-2020 Mickaël Salaün <mic@digikod.net> * Copyright © 2018-2020 ANSSI */ #include <asm/current.h> #include <linux/anon_inodes.h> #include <linux/build_bug.h> #include <linux/capability.h> #include <linux/compiler_types.h> #include <linux/dcache.h> #include <linux/err.h> #include <linux/errno.h> #include <linux/fs.h> #include <linux/limits.h> #include <linux/mount.h> #include <linux/path.h> #include <linux/sched.h> #include <linux/security.h> #include <linux/stddef.h> #include <linux/syscalls.h> #include <linux/types.h> #include <linux/uaccess.h> #include <uapi/linux/landlock.h> #include "cred.h" #include "fs.h" #include "limits.h" #include "net.h" #include "ruleset.h" #include "setup.h" /** * copy_min_struct_from_user - Safe future-proof argument copying * * Extend copy_struct_from_user() to check for consistent user buffer. * * @dst: Kernel space pointer or NULL. * @ksize: Actual size of the data pointed to by @dst. * @ksize_min: Minimal required size to be copied. * @src: User space pointer or NULL. * @usize: (Alleged) size of the data pointed to by @src. */ static __always_inline int copy_min_struct_from_user(void *const dst, const size_t ksize, const size_t ksize_min, const void __user *const src, const size_t usize) { /* Checks buffer inconsistencies. */ BUILD_BUG_ON(!dst); if (!src) return -EFAULT; /* Checks size ranges. */ BUILD_BUG_ON(ksize <= 0); BUILD_BUG_ON(ksize < ksize_min); if (usize < ksize_min) return -EINVAL; if (usize > PAGE_SIZE) return -E2BIG; /* Copies user buffer and fills with zeros. */ return copy_struct_from_user(dst, ksize, src, usize); } /* * This function only contains arithmetic operations with constants, leading to * BUILD_BUG_ON(). The related code is evaluated and checked at build time, * but it is then ignored thanks to compiler optimizations. */ static void build_check_abi(void) { struct landlock_ruleset_attr ruleset_attr; struct landlock_path_beneath_attr path_beneath_attr; struct landlock_net_port_attr net_port_attr; size_t ruleset_size, path_beneath_size, net_port_size; /* * For each user space ABI structures, first checks that there is no * hole in them, then checks that all architectures have the same * struct size. */ ruleset_size = sizeof(ruleset_attr.handled_access_fs); ruleset_size += sizeof(ruleset_attr.handled_access_net); BUILD_BUG_ON(sizeof(ruleset_attr) != ruleset_size); BUILD_BUG_ON(sizeof(ruleset_attr) != 16); path_beneath_size = sizeof(path_beneath_attr.allowed_access); path_beneath_size += sizeof(path_beneath_attr.parent_fd); BUILD_BUG_ON(sizeof(path_beneath_attr) != path_beneath_size); BUILD_BUG_ON(sizeof(path_beneath_attr) != 12); net_port_size = sizeof(net_port_attr.allowed_access); net_port_size += sizeof(net_port_attr.port); BUILD_BUG_ON(sizeof(net_port_attr) != net_port_size); BUILD_BUG_ON(sizeof(net_port_attr) != 16); } /* Ruleset handling */ static int fop_ruleset_release(struct inode *const inode, struct file *const filp) { struct landlock_ruleset *ruleset = filp->private_data; landlock_put_ruleset(ruleset); return 0; } static ssize_t fop_dummy_read(struct file *const filp, char __user *const buf, const size_t size, loff_t *const ppos) { /* Dummy handler to enable FMODE_CAN_READ. */ return -EINVAL; } static ssize_t fop_dummy_write(struct file *const filp, const char __user *const buf, const size_t size, loff_t *const ppos) { /* Dummy handler to enable FMODE_CAN_WRITE. */ return -EINVAL; } /* * A ruleset file descriptor enables to build a ruleset by adding (i.e. * writing) rule after rule, without relying on the task's context. This * reentrant design is also used in a read way to enforce the ruleset on the * current task. */ static const struct file_operations ruleset_fops = { .release = fop_ruleset_release, .read = fop_dummy_read, .write = fop_dummy_write, }; #define LANDLOCK_ABI_VERSION 4 /** * sys_landlock_create_ruleset - Create a new ruleset * * @attr: Pointer to a &struct landlock_ruleset_attr identifying the scope of * the new ruleset. * @size: Size of the pointed &struct landlock_ruleset_attr (needed for * backward and forward compatibility). * @flags: Supported value: %LANDLOCK_CREATE_RULESET_VERSION. * * This system call enables to create a new Landlock ruleset, and returns the * related file descriptor on success. * * If @flags is %LANDLOCK_CREATE_RULESET_VERSION and @attr is NULL and @size is * 0, then the returned value is the highest supported Landlock ABI version * (starting at 1). * * Possible returned errors are: * * - %EOPNOTSUPP: Landlock is supported by the kernel but disabled at boot time; * - %EINVAL: unknown @flags, or unknown access, or too small @size; * - %E2BIG or %EFAULT: @attr or @size inconsistencies; * - %ENOMSG: empty &landlock_ruleset_attr.handled_access_fs. */ SYSCALL_DEFINE3(landlock_create_ruleset, const struct landlock_ruleset_attr __user *const, attr, const size_t, size, const __u32, flags) { struct landlock_ruleset_attr ruleset_attr; struct landlock_ruleset *ruleset; int err, ruleset_fd; /* Build-time checks. */ build_check_abi(); if (!landlock_initialized) return -EOPNOTSUPP; if (flags) { if ((flags == LANDLOCK_CREATE_RULESET_VERSION) && !attr && !size) return LANDLOCK_ABI_VERSION; return -EINVAL; } /* Copies raw user space buffer. */ err = copy_min_struct_from_user(&ruleset_attr, sizeof(ruleset_attr), offsetofend(typeof(ruleset_attr), handled_access_fs), attr, size); if (err) return err; /* Checks content (and 32-bits cast). */ if ((ruleset_attr.handled_access_fs | LANDLOCK_MASK_ACCESS_FS) != LANDLOCK_MASK_ACCESS_FS) return -EINVAL; /* Checks network content (and 32-bits cast). */ if ((ruleset_attr.handled_access_net | LANDLOCK_MASK_ACCESS_NET) != LANDLOCK_MASK_ACCESS_NET) return -EINVAL; /* Checks arguments and transforms to kernel struct. */ ruleset = landlock_create_ruleset(ruleset_attr.handled_access_fs, ruleset_attr.handled_access_net); if (IS_ERR(ruleset)) return PTR_ERR(ruleset); /* Creates anonymous FD referring to the ruleset. */ ruleset_fd = anon_inode_getfd("[landlock-ruleset]", &ruleset_fops, ruleset, O_RDWR | O_CLOEXEC); if (ruleset_fd < 0) landlock_put_ruleset(ruleset); return ruleset_fd; } /* * Returns an owned ruleset from a FD. It is thus needed to call * landlock_put_ruleset() on the return value. */ static struct landlock_ruleset *get_ruleset_from_fd(const int fd, const fmode_t mode) { struct fd ruleset_f; struct landlock_ruleset *ruleset; ruleset_f = fdget(fd); if (!ruleset_f.file) return ERR_PTR(-EBADF); /* Checks FD type and access right. */ if (ruleset_f.file->f_op != &ruleset_fops) { ruleset = ERR_PTR(-EBADFD); goto out_fdput; } if (!(ruleset_f.file->f_mode & mode)) { ruleset = ERR_PTR(-EPERM); goto out_fdput; } ruleset = ruleset_f.file->private_data; if (WARN_ON_ONCE(ruleset->num_layers != 1)) { ruleset = ERR_PTR(-EINVAL); goto out_fdput; } landlock_get_ruleset(ruleset); out_fdput: fdput(ruleset_f); return ruleset; } /* Path handling */ /* * @path: Must call put_path(@path) after the call if it succeeded. */ static int get_path_from_fd(const s32 fd, struct path *const path) { struct fd f; int err = 0; BUILD_BUG_ON(!__same_type( fd, ((struct landlock_path_beneath_attr *)NULL)->parent_fd)); /* Handles O_PATH. */ f = fdget_raw(fd); if (!f.file) return -EBADF; /* * Forbids ruleset FDs, internal filesystems (e.g. nsfs), including * pseudo filesystems that will never be mountable (e.g. sockfs, * pipefs). */ if ((f.file->f_op == &ruleset_fops) || (f.file->f_path.mnt->mnt_flags & MNT_INTERNAL) || (f.file->f_path.dentry->d_sb->s_flags & SB_NOUSER) || d_is_negative(f.file->f_path.dentry) || IS_PRIVATE(d_backing_inode(f.file->f_path.dentry))) { err = -EBADFD; goto out_fdput; } *path = f.file->f_path; path_get(path); out_fdput: fdput(f); return err; } static int add_rule_path_beneath(struct landlock_ruleset *const ruleset, const void __user *const rule_attr) { struct landlock_path_beneath_attr path_beneath_attr; struct path path; int res, err; access_mask_t mask; /* Copies raw user space buffer. */ res = copy_from_user(&path_beneath_attr, rule_attr, sizeof(path_beneath_attr)); if (res) return -EFAULT; /* * Informs about useless rule: empty allowed_access (i.e. deny rules) * are ignored in path walks. */ if (!path_beneath_attr.allowed_access) return -ENOMSG; /* Checks that allowed_access matches the @ruleset constraints. */ mask = landlock_get_raw_fs_access_mask(ruleset, 0); if ((path_beneath_attr.allowed_access | mask) != mask) return -EINVAL; /* Gets and checks the new rule. */ err = get_path_from_fd(path_beneath_attr.parent_fd, &path); if (err) return err; /* Imports the new rule. */ err = landlock_append_fs_rule(ruleset, &path, path_beneath_attr.allowed_access); path_put(&path); return err; } static int add_rule_net_port(struct landlock_ruleset *ruleset, const void __user *const rule_attr) { struct landlock_net_port_attr net_port_attr; int res; access_mask_t mask; /* Copies raw user space buffer. */ res = copy_from_user(&net_port_attr, rule_attr, sizeof(net_port_attr)); if (res) return -EFAULT; /* * Informs about useless rule: empty allowed_access (i.e. deny rules) * are ignored by network actions. */ if (!net_port_attr.allowed_access) return -ENOMSG; /* Checks that allowed_access matches the @ruleset constraints. */ mask = landlock_get_net_access_mask(ruleset, 0); if ((net_port_attr.allowed_access | mask) != mask) return -EINVAL; /* Denies inserting a rule with port greater than 65535. */ if (net_port_attr.port > U16_MAX) return -EINVAL; /* Imports the new rule. */ return landlock_append_net_rule(ruleset, net_port_attr.port, net_port_attr.allowed_access); } /** * sys_landlock_add_rule - Add a new rule to a ruleset * * @ruleset_fd: File descriptor tied to the ruleset that should be extended * with the new rule. * @rule_type: Identify the structure type pointed to by @rule_attr: * %LANDLOCK_RULE_PATH_BENEATH or %LANDLOCK_RULE_NET_PORT. * @rule_attr: Pointer to a rule (only of type &struct * landlock_path_beneath_attr for now). * @flags: Must be 0. * * This system call enables to define a new rule and add it to an existing * ruleset. * * Possible returned errors are: * * - %EOPNOTSUPP: Landlock is supported by the kernel but disabled at boot time; * - %EAFNOSUPPORT: @rule_type is %LANDLOCK_RULE_NET_PORT but TCP/IP is not * supported by the running kernel; * - %EINVAL: @flags is not 0, or inconsistent access in the rule (i.e. * &landlock_path_beneath_attr.allowed_access or * &landlock_net_port_attr.allowed_access is not a subset of the * ruleset handled accesses), or &landlock_net_port_attr.port is * greater than 65535; * - %ENOMSG: Empty accesses (e.g. &landlock_path_beneath_attr.allowed_access); * - %EBADF: @ruleset_fd is not a file descriptor for the current thread, or a * member of @rule_attr is not a file descriptor as expected; * - %EBADFD: @ruleset_fd is not a ruleset file descriptor, or a member of * @rule_attr is not the expected file descriptor type; * - %EPERM: @ruleset_fd has no write access to the underlying ruleset; * - %EFAULT: @rule_attr inconsistency. */ SYSCALL_DEFINE4(landlock_add_rule, const int, ruleset_fd, const enum landlock_rule_type, rule_type, const void __user *const, rule_attr, const __u32, flags) { struct landlock_ruleset *ruleset; int err; if (!landlock_initialized) return -EOPNOTSUPP; /* No flag for now. */ if (flags) return -EINVAL; /* Gets and checks the ruleset. */ ruleset = get_ruleset_from_fd(ruleset_fd, FMODE_CAN_WRITE); if (IS_ERR(ruleset)) return PTR_ERR(ruleset); switch (rule_type) { case LANDLOCK_RULE_PATH_BENEATH: err = add_rule_path_beneath(ruleset, rule_attr); break; case LANDLOCK_RULE_NET_PORT: err = add_rule_net_port(ruleset, rule_attr); break; default: err = -EINVAL; break; } landlock_put_ruleset(ruleset); return err; } /* Enforcement */ /** * sys_landlock_restrict_self - Enforce a ruleset on the calling thread * * @ruleset_fd: File descriptor tied to the ruleset to merge with the target. * @flags: Must be 0. * * This system call enables to enforce a Landlock ruleset on the current * thread. Enforcing a ruleset requires that the task has %CAP_SYS_ADMIN in its * namespace or is running with no_new_privs. This avoids scenarios where * unprivileged tasks can affect the behavior of privileged children. * * Possible returned errors are: * * - %EOPNOTSUPP: Landlock is supported by the kernel but disabled at boot time; * - %EINVAL: @flags is not 0. * - %EBADF: @ruleset_fd is not a file descriptor for the current thread; * - %EBADFD: @ruleset_fd is not a ruleset file descriptor; * - %EPERM: @ruleset_fd has no read access to the underlying ruleset, or the * current thread is not running with no_new_privs, or it doesn't have * %CAP_SYS_ADMIN in its namespace. * - %E2BIG: The maximum number of stacked rulesets is reached for the current * thread. */ SYSCALL_DEFINE2(landlock_restrict_self, const int, ruleset_fd, const __u32, flags) { struct landlock_ruleset *new_dom, *ruleset; struct cred *new_cred; struct landlock_cred_security *new_llcred; int err; if (!landlock_initialized) return -EOPNOTSUPP; /* * Similar checks as for seccomp(2), except that an -EPERM may be * returned. */ if (!task_no_new_privs(current) && !ns_capable_noaudit(current_user_ns(), CAP_SYS_ADMIN)) return -EPERM; /* No flag for now. */ if (flags) return -EINVAL; /* Gets and checks the ruleset. */ ruleset = get_ruleset_from_fd(ruleset_fd, FMODE_CAN_READ); if (IS_ERR(ruleset)) return PTR_ERR(ruleset); /* Prepares new credentials. */ new_cred = prepare_creds(); if (!new_cred) { err = -ENOMEM; goto out_put_ruleset; } new_llcred = landlock_cred(new_cred); /* * There is no possible race condition while copying and manipulating * the current credentials because they are dedicated per thread. */ new_dom = landlock_merge_ruleset(new_llcred->domain, ruleset); if (IS_ERR(new_dom)) { err = PTR_ERR(new_dom); goto out_put_creds; } /* Replaces the old (prepared) domain. */ landlock_put_ruleset(new_llcred->domain); new_llcred->domain = new_dom; landlock_put_ruleset(ruleset); return commit_creds(new_cred); out_put_creds: abort_creds(new_cred); out_put_ruleset: landlock_put_ruleset(ruleset); return err; } |
2 2 2 2 2 2 2 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 | // SPDX-License-Identifier: GPL-2.0 #include <linux/kernel.h> #include <linux/errno.h> #include <linux/err.h> #include <linux/mm.h> #include <linux/slab.h> #include <linux/vmalloc.h> #include <linux/pagemap.h> #include <linux/sched.h> #include <media/frame_vector.h> /** * get_vaddr_frames() - map virtual addresses to pfns * @start: starting user address * @nr_frames: number of pages / pfns from start to map * @write: the mapped address has write permission * @vec: structure which receives pages / pfns of the addresses mapped. * It should have space for at least nr_frames entries. * * This function maps virtual addresses from @start and fills @vec structure * with page frame numbers or page pointers to corresponding pages (choice * depends on the type of the vma underlying the virtual address). If @start * belongs to a normal vma, the function grabs reference to each of the pages * to pin them in memory. If @start belongs to VM_IO | VM_PFNMAP vma, we don't * touch page structures and the caller must make sure pfns aren't reused for * anything else while he is using them. * * The function returns number of pages mapped which may be less than * @nr_frames. In particular we stop mapping if there are more vmas of * different type underlying the specified range of virtual addresses. * When the function isn't able to map a single page, it returns error. * * Note that get_vaddr_frames() cannot follow VM_IO mappings. It used * to be able to do that, but that could (racily) return non-refcounted * pfns. * * This function takes care of grabbing mmap_lock as necessary. */ int get_vaddr_frames(unsigned long start, unsigned int nr_frames, bool write, struct frame_vector *vec) { int ret; unsigned int gup_flags = FOLL_LONGTERM; if (nr_frames == 0) return 0; if (WARN_ON_ONCE(nr_frames > vec->nr_allocated)) nr_frames = vec->nr_allocated; start = untagged_addr(start); if (write) gup_flags |= FOLL_WRITE; ret = pin_user_pages_fast(start, nr_frames, gup_flags, (struct page **)(vec->ptrs)); vec->got_ref = true; vec->is_pfns = false; vec->nr_frames = ret; if (likely(ret > 0)) return ret; vec->nr_frames = 0; return ret ? ret : -EFAULT; } EXPORT_SYMBOL(get_vaddr_frames); /** * put_vaddr_frames() - drop references to pages if get_vaddr_frames() acquired * them * @vec: frame vector to put * * Drop references to pages if get_vaddr_frames() acquired them. We also * invalidate the frame vector so that it is prepared for the next call into * get_vaddr_frames(). */ void put_vaddr_frames(struct frame_vector *vec) { struct page **pages; if (!vec->got_ref) goto out; pages = frame_vector_pages(vec); /* * frame_vector_pages() might needed to do a conversion when * get_vaddr_frames() got pages but vec was later converted to pfns. * But it shouldn't really fail to convert pfns back... */ if (WARN_ON(IS_ERR(pages))) goto out; unpin_user_pages(pages, vec->nr_frames); vec->got_ref = false; out: vec->nr_frames = 0; } EXPORT_SYMBOL(put_vaddr_frames); /** * frame_vector_to_pages - convert frame vector to contain page pointers * @vec: frame vector to convert * * Convert @vec to contain array of page pointers. If the conversion is * successful, return 0. Otherwise return an error. Note that we do not grab * page references for the page structures. */ int frame_vector_to_pages(struct frame_vector *vec) { int i; unsigned long *nums; struct page **pages; if (!vec->is_pfns) return 0; nums = frame_vector_pfns(vec); for (i = 0; i < vec->nr_frames; i++) if (!pfn_valid(nums[i])) return -EINVAL; pages = (struct page **)nums; for (i = 0; i < vec->nr_frames; i++) pages[i] = pfn_to_page(nums[i]); vec->is_pfns = false; return 0; } EXPORT_SYMBOL(frame_vector_to_pages); /** * frame_vector_to_pfns - convert frame vector to contain pfns * @vec: frame vector to convert * * Convert @vec to contain array of pfns. */ void frame_vector_to_pfns(struct frame_vector *vec) { int i; unsigned long *nums; struct page **pages; if (vec->is_pfns) return; pages = (struct page **)(vec->ptrs); nums = (unsigned long *)pages; for (i = 0; i < vec->nr_frames; i++) nums[i] = page_to_pfn(pages[i]); vec->is_pfns = true; } EXPORT_SYMBOL(frame_vector_to_pfns); /** * frame_vector_create() - allocate & initialize structure for pinned pfns * @nr_frames: number of pfns slots we should reserve * * Allocate and initialize struct pinned_pfns to be able to hold @nr_pfns * pfns. */ struct frame_vector *frame_vector_create(unsigned int nr_frames) { struct frame_vector *vec; int size = struct_size(vec, ptrs, nr_frames); if (WARN_ON_ONCE(nr_frames == 0)) return NULL; /* * This is absurdly high. It's here just to avoid strange effects when * arithmetics overflows. */ if (WARN_ON_ONCE(nr_frames > INT_MAX / sizeof(void *) / 2)) return NULL; /* * Avoid higher order allocations, use vmalloc instead. It should * be rare anyway. */ vec = kvmalloc(size, GFP_KERNEL); if (!vec) return NULL; vec->nr_allocated = nr_frames; vec->nr_frames = 0; return vec; } EXPORT_SYMBOL(frame_vector_create); /** * frame_vector_destroy() - free memory allocated to carry frame vector * @vec: Frame vector to free * * Free structure allocated by frame_vector_create() to carry frames. */ void frame_vector_destroy(struct frame_vector *vec) { /* Make sure put_vaddr_frames() got called properly... */ VM_BUG_ON(vec->nr_frames > 0); kvfree(vec); } EXPORT_SYMBOL(frame_vector_destroy); |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 | /* SPDX-License-Identifier: GPL-2.0 */ /* * DFS referral cache routines * * Copyright (c) 2018-2019 Paulo Alcantara <palcantara@suse.de> */ #ifndef _CIFS_DFS_CACHE_H #define _CIFS_DFS_CACHE_H #include <linux/nls.h> #include <linux/list.h> #include <linux/uuid.h> #include "cifsglob.h" extern struct workqueue_struct *dfscache_wq; extern atomic_t dfs_cache_ttl; #define DFS_CACHE_TGT_LIST_INIT(var) \ { .tl_numtgts = 0, .tl_list = LIST_HEAD_INIT((var).tl_list), } #define DFS_CACHE_TGT_LIST(var) \ struct dfs_cache_tgt_list var = DFS_CACHE_TGT_LIST_INIT(var) struct dfs_cache_tgt_list { int tl_numtgts; struct list_head tl_list; }; struct dfs_cache_tgt_iterator { char *it_name; int it_path_consumed; struct list_head it_list; }; int dfs_cache_init(void); void dfs_cache_destroy(void); extern const struct proc_ops dfscache_proc_ops; int dfs_cache_find(const unsigned int xid, struct cifs_ses *ses, const struct nls_table *cp, int remap, const char *path, struct dfs_info3_param *ref, struct dfs_cache_tgt_list *tgt_list); int dfs_cache_noreq_find(const char *path, struct dfs_info3_param *ref, struct dfs_cache_tgt_list *tgt_list); void dfs_cache_noreq_update_tgthint(const char *path, const struct dfs_cache_tgt_iterator *it); int dfs_cache_get_tgt_referral(const char *path, const struct dfs_cache_tgt_iterator *it, struct dfs_info3_param *ref); int dfs_cache_get_tgt_share(char *path, const struct dfs_cache_tgt_iterator *it, char **share, char **prefix); char *dfs_cache_canonical_path(const char *path, const struct nls_table *cp, int remap); int dfs_cache_remount_fs(struct cifs_sb_info *cifs_sb); void dfs_cache_refresh(struct work_struct *work); static inline struct dfs_cache_tgt_iterator * dfs_cache_get_next_tgt(struct dfs_cache_tgt_list *tl, struct dfs_cache_tgt_iterator *it) { if (!tl || !tl->tl_numtgts || list_empty(&tl->tl_list) || !it || list_is_last(&it->it_list, &tl->tl_list)) return NULL; return list_next_entry(it, it_list); } static inline struct dfs_cache_tgt_iterator * dfs_cache_get_tgt_iterator(struct dfs_cache_tgt_list *tl) { if (!tl) return NULL; return list_first_entry_or_null(&tl->tl_list, struct dfs_cache_tgt_iterator, it_list); } static inline void dfs_cache_free_tgts(struct dfs_cache_tgt_list *tl) { struct dfs_cache_tgt_iterator *it, *nit; if (!tl || !tl->tl_numtgts || list_empty(&tl->tl_list)) return; list_for_each_entry_safe(it, nit, &tl->tl_list, it_list) { list_del(&it->it_list); kfree(it->it_name); kfree(it); } tl->tl_numtgts = 0; } static inline const char * dfs_cache_get_tgt_name(const struct dfs_cache_tgt_iterator *it) { return it ? it->it_name : NULL; } static inline int dfs_cache_get_nr_tgts(const struct dfs_cache_tgt_list *tl) { return tl ? tl->tl_numtgts : 0; } static inline int dfs_cache_get_ttl(void) { return atomic_read(&dfs_cache_ttl); } #endif /* _CIFS_DFS_CACHE_H */ |
71 71 2 2 2 9 2 2 9 7 7 6 35 35 5 5 5 5 1 1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 | // SPDX-License-Identifier: GPL-2.0-only /* * Common helpers for stackable filesystems and backing files. * * Forked from fs/overlayfs/file.c. * * Copyright (C) 2017 Red Hat, Inc. * Copyright (C) 2023 CTERA Networks. */ #include <linux/fs.h> #include <linux/backing-file.h> #include <linux/splice.h> #include <linux/mm.h> #include "internal.h" /** * backing_file_open - open a backing file for kernel internal use * @user_path: path that the user reuqested to open * @flags: open flags * @real_path: path of the backing file * @cred: credentials for open * * Open a backing file for a stackable filesystem (e.g., overlayfs). * @user_path may be on the stackable filesystem and @real_path on the * underlying filesystem. In this case, we want to be able to return the * @user_path of the stackable filesystem. This is done by embedding the * returned file into a container structure that also stores the stacked * file's path, which can be retrieved using backing_file_user_path(). */ struct file *backing_file_open(const struct path *user_path, int flags, const struct path *real_path, const struct cred *cred) { struct file *f; int error; f = alloc_empty_backing_file(flags, cred); if (IS_ERR(f)) return f; path_get(user_path); *backing_file_user_path(f) = *user_path; error = vfs_open(real_path, f); if (error) { fput(f); f = ERR_PTR(error); } return f; } EXPORT_SYMBOL_GPL(backing_file_open); struct backing_aio { struct kiocb iocb; refcount_t ref; struct kiocb *orig_iocb; /* used for aio completion */ void (*end_write)(struct file *); struct work_struct work; long res; }; static struct kmem_cache *backing_aio_cachep; #define BACKING_IOCB_MASK \ (IOCB_NOWAIT | IOCB_HIPRI | IOCB_DSYNC | IOCB_SYNC | IOCB_APPEND) static rwf_t iocb_to_rw_flags(int flags) { return (__force rwf_t)(flags & BACKING_IOCB_MASK); } static void backing_aio_put(struct backing_aio *aio) { if (refcount_dec_and_test(&aio->ref)) { fput(aio->iocb.ki_filp); kmem_cache_free(backing_aio_cachep, aio); } } static void backing_aio_cleanup(struct backing_aio *aio, long res) { struct kiocb *iocb = &aio->iocb; struct kiocb *orig_iocb = aio->orig_iocb; if (aio->end_write) aio->end_write(orig_iocb->ki_filp); orig_iocb->ki_pos = iocb->ki_pos; backing_aio_put(aio); } static void backing_aio_rw_complete(struct kiocb *iocb, long res) { struct backing_aio *aio = container_of(iocb, struct backing_aio, iocb); struct kiocb *orig_iocb = aio->orig_iocb; if (iocb->ki_flags & IOCB_WRITE) kiocb_end_write(iocb); backing_aio_cleanup(aio, res); orig_iocb->ki_complete(orig_iocb, res); } static void backing_aio_complete_work(struct work_struct *work) { struct backing_aio *aio = container_of(work, struct backing_aio, work); backing_aio_rw_complete(&aio->iocb, aio->res); } static void backing_aio_queue_completion(struct kiocb *iocb, long res) { struct backing_aio *aio = container_of(iocb, struct backing_aio, iocb); /* * Punt to a work queue to serialize updates of mtime/size. */ aio->res = res; INIT_WORK(&aio->work, backing_aio_complete_work); queue_work(file_inode(aio->orig_iocb->ki_filp)->i_sb->s_dio_done_wq, &aio->work); } static int backing_aio_init_wq(struct kiocb *iocb) { struct super_block *sb = file_inode(iocb->ki_filp)->i_sb; if (sb->s_dio_done_wq) return 0; return sb_init_dio_done_wq(sb); } ssize_t backing_file_read_iter(struct file *file, struct iov_iter *iter, struct kiocb *iocb, int flags, struct backing_file_ctx *ctx) { struct backing_aio *aio = NULL; const struct cred *old_cred; ssize_t ret; if (WARN_ON_ONCE(!(file->f_mode & FMODE_BACKING))) return -EIO; if (!iov_iter_count(iter)) return 0; if (iocb->ki_flags & IOCB_DIRECT && !(file->f_mode & FMODE_CAN_ODIRECT)) return -EINVAL; old_cred = override_creds(ctx->cred); if (is_sync_kiocb(iocb)) { rwf_t rwf = iocb_to_rw_flags(flags); ret = vfs_iter_read(file, iter, &iocb->ki_pos, rwf); } else { ret = -ENOMEM; aio = kmem_cache_zalloc(backing_aio_cachep, GFP_KERNEL); if (!aio) goto out; aio->orig_iocb = iocb; kiocb_clone(&aio->iocb, iocb, get_file(file)); aio->iocb.ki_complete = backing_aio_rw_complete; refcount_set(&aio->ref, 2); ret = vfs_iocb_iter_read(file, &aio->iocb, iter); backing_aio_put(aio); if (ret != -EIOCBQUEUED) backing_aio_cleanup(aio, ret); } out: revert_creds(old_cred); if (ctx->accessed) ctx->accessed(ctx->user_file); return ret; } EXPORT_SYMBOL_GPL(backing_file_read_iter); ssize_t backing_file_write_iter(struct file *file, struct iov_iter *iter, struct kiocb *iocb, int flags, struct backing_file_ctx *ctx) { const struct cred *old_cred; ssize_t ret; if (WARN_ON_ONCE(!(file->f_mode & FMODE_BACKING))) return -EIO; if (!iov_iter_count(iter)) return 0; ret = file_remove_privs(ctx->user_file); if (ret) return ret; if (iocb->ki_flags & IOCB_DIRECT && !(file->f_mode & FMODE_CAN_ODIRECT)) return -EINVAL; /* * Stacked filesystems don't support deferred completions, don't copy * this property in case it is set by the issuer. */ flags &= ~IOCB_DIO_CALLER_COMP; old_cred = override_creds(ctx->cred); if (is_sync_kiocb(iocb)) { rwf_t rwf = iocb_to_rw_flags(flags); ret = vfs_iter_write(file, iter, &iocb->ki_pos, rwf); if (ctx->end_write) ctx->end_write(ctx->user_file); } else { struct backing_aio *aio; ret = backing_aio_init_wq(iocb); if (ret) goto out; ret = -ENOMEM; aio = kmem_cache_zalloc(backing_aio_cachep, GFP_KERNEL); if (!aio) goto out; aio->orig_iocb = iocb; aio->end_write = ctx->end_write; kiocb_clone(&aio->iocb, iocb, get_file(file)); aio->iocb.ki_flags = flags; aio->iocb.ki_complete = backing_aio_queue_completion; refcount_set(&aio->ref, 2); ret = vfs_iocb_iter_write(file, &aio->iocb, iter); backing_aio_put(aio); if (ret != -EIOCBQUEUED) backing_aio_cleanup(aio, ret); } out: revert_creds(old_cred); return ret; } EXPORT_SYMBOL_GPL(backing_file_write_iter); ssize_t backing_file_splice_read(struct file *in, loff_t *ppos, struct pipe_inode_info *pipe, size_t len, unsigned int flags, struct backing_file_ctx *ctx) { const struct cred *old_cred; ssize_t ret; if (WARN_ON_ONCE(!(in->f_mode & FMODE_BACKING))) return -EIO; old_cred = override_creds(ctx->cred); ret = vfs_splice_read(in, ppos, pipe, len, flags); revert_creds(old_cred); if (ctx->accessed) ctx->accessed(ctx->user_file); return ret; } EXPORT_SYMBOL_GPL(backing_file_splice_read); ssize_t backing_file_splice_write(struct pipe_inode_info *pipe, struct file *out, loff_t *ppos, size_t len, unsigned int flags, struct backing_file_ctx *ctx) { const struct cred *old_cred; ssize_t ret; if (WARN_ON_ONCE(!(out->f_mode & FMODE_BACKING))) return -EIO; ret = file_remove_privs(ctx->user_file); if (ret) return ret; old_cred = override_creds(ctx->cred); file_start_write(out); ret = iter_file_splice_write(pipe, out, ppos, len, flags); file_end_write(out); revert_creds(old_cred); if (ctx->end_write) ctx->end_write(ctx->user_file); return ret; } EXPORT_SYMBOL_GPL(backing_file_splice_write); int backing_file_mmap(struct file *file, struct vm_area_struct *vma, struct backing_file_ctx *ctx) { const struct cred *old_cred; int ret; if (WARN_ON_ONCE(!(file->f_mode & FMODE_BACKING)) || WARN_ON_ONCE(ctx->user_file != vma->vm_file)) return -EIO; if (!file->f_op->mmap) return -ENODEV; vma_set_file(vma, file); old_cred = override_creds(ctx->cred); ret = call_mmap(vma->vm_file, vma); revert_creds(old_cred); if (ctx->accessed) ctx->accessed(ctx->user_file); return ret; } EXPORT_SYMBOL_GPL(backing_file_mmap); static int __init backing_aio_init(void) { backing_aio_cachep = kmem_cache_create("backing_aio", sizeof(struct backing_aio), 0, SLAB_HWCACHE_ALIGN, NULL); if (!backing_aio_cachep) return -ENOMEM; return 0; } fs_initcall(backing_aio_init); |
1 1 1 1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 | /* * linux/fs/nls/mac-centeuro.c * * Charset maccenteuro translation tables. * Generated automatically from the Unicode and charset * tables from the Unicode Organization (www.unicode.org). * The Unicode to charset table has only exact mappings. */ /* * COPYRIGHT AND PERMISSION NOTICE * * Copyright 1991-2012 Unicode, Inc. All rights reserved. Distributed under * the Terms of Use in http://www.unicode.org/copyright.html. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of the Unicode data files and any associated documentation (the "Data * Files") or Unicode software and any associated documentation (the * "Software") to deal in the Data Files or Software without restriction, * including without limitation the rights to use, copy, modify, merge, * publish, distribute, and/or sell copies of the Data Files or Software, and * to permit persons to whom the Data Files or Software are furnished to do * so, provided that (a) the above copyright notice(s) and this permission * notice appear with all copies of the Data Files or Software, (b) both the * above copyright notice(s) and this permission notice appear in associated * documentation, and (c) there is clear notice in each modified Data File or * in the Software as well as in the documentation associated with the Data * File(s) or Software that the data or software has been modified. * * THE DATA FILES AND SOFTWARE ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY * KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF * THIRD PARTY RIGHTS. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR HOLDERS * INCLUDED IN THIS NOTICE BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF * USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR * OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR * PERFORMANCE OF THE DATA FILES OR SOFTWARE. * * Except as contained in this notice, the name of a copyright holder shall * not be used in advertising or otherwise to promote the sale, use or other * dealings in these Data Files or Software without prior written * authorization of the copyright holder. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/nls.h> #include <linux/errno.h> static const wchar_t charset2uni[256] = { /* 0x00 */ 0x0000, 0x0001, 0x0002, 0x0003, 0x0004, 0x0005, 0x0006, 0x0007, 0x0008, 0x0009, 0x000a, 0x000b, 0x000c, 0x000d, 0x000e, 0x000f, /* 0x10 */ 0x0010, 0x0011, 0x0012, 0x0013, 0x0014, 0x0015, 0x0016, 0x0017, 0x0018, 0x0019, 0x001a, 0x001b, 0x001c, 0x001d, 0x001e, 0x001f, /* 0x20 */ 0x0020, 0x0021, 0x0022, 0x0023, 0x0024, 0x0025, 0x0026, 0x0027, 0x0028, 0x0029, 0x002a, 0x002b, 0x002c, 0x002d, 0x002e, 0x002f, /* 0x30 */ 0x0030, 0x0031, 0x0032, 0x0033, 0x0034, 0x0035, 0x0036, 0x0037, 0x0038, 0x0039, 0x003a, 0x003b, 0x003c, 0x003d, 0x003e, 0x003f, /* 0x40 */ 0x0040, 0x0041, 0x0042, 0x0043, 0x0044, 0x0045, 0x0046, 0x0047, 0x0048, 0x0049, 0x004a, 0x004b, 0x004c, 0x004d, 0x004e, 0x004f, /* 0x50 */ 0x0050, 0x0051, 0x0052, 0x0053, 0x0054, 0x0055, 0x0056, 0x0057, 0x0058, 0x0059, 0x005a, 0x005b, 0x005c, 0x005d, 0x005e, 0x005f, /* 0x60 */ 0x0060, 0x0061, 0x0062, 0x0063, 0x0064, 0x0065, 0x0066, 0x0067, 0x0068, 0x0069, 0x006a, 0x006b, 0x006c, 0x006d, 0x006e, 0x006f, /* 0x70 */ 0x0070, 0x0071, 0x0072, 0x0073, 0x0074, 0x0075, 0x0076, 0x0077, 0x0078, 0x0079, 0x007a, 0x007b, 0x007c, 0x007d, 0x007e, 0x007f, /* 0x80 */ 0x00c4, 0x0100, 0x0101, 0x00c9, 0x0104, 0x00d6, 0x00dc, 0x00e1, 0x0105, 0x010c, 0x00e4, 0x010d, 0x0106, 0x0107, 0x00e9, 0x0179, /* 0x90 */ 0x017a, 0x010e, 0x00ed, 0x010f, 0x0112, 0x0113, 0x0116, 0x00f3, 0x0117, 0x00f4, 0x00f6, 0x00f5, 0x00fa, 0x011a, 0x011b, 0x00fc, /* 0xa0 */ 0x2020, 0x00b0, 0x0118, 0x00a3, 0x00a7, 0x2022, 0x00b6, 0x00df, 0x00ae, 0x00a9, 0x2122, 0x0119, 0x00a8, 0x2260, 0x0123, 0x012e, /* 0xb0 */ 0x012f, 0x012a, 0x2264, 0x2265, 0x012b, 0x0136, 0x2202, 0x2211, 0x0142, 0x013b, 0x013c, 0x013d, 0x013e, 0x0139, 0x013a, 0x0145, /* 0xc0 */ 0x0146, 0x0143, 0x00ac, 0x221a, 0x0144, 0x0147, 0x2206, 0x00ab, 0x00bb, 0x2026, 0x00a0, 0x0148, 0x0150, 0x00d5, 0x0151, 0x014c, /* 0xd0 */ 0x2013, 0x2014, 0x201c, 0x201d, 0x2018, 0x2019, 0x00f7, 0x25ca, 0x014d, 0x0154, 0x0155, 0x0158, 0x2039, 0x203a, 0x0159, 0x0156, /* 0xe0 */ 0x0157, 0x0160, 0x201a, 0x201e, 0x0161, 0x015a, 0x015b, 0x00c1, 0x0164, 0x0165, 0x00cd, 0x017d, 0x017e, 0x016a, 0x00d3, 0x00d4, /* 0xf0 */ 0x016b, 0x016e, 0x00da, 0x016f, 0x0170, 0x0171, 0x0172, 0x0173, 0x00dd, 0x00fd, 0x0137, 0x017b, 0x0141, 0x017c, 0x0122, 0x02c7, }; static const unsigned char page00[256] = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */ 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */ 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */ 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */ 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */ 0xca, 0x00, 0x00, 0xa3, 0x00, 0x00, 0x00, 0xa4, /* 0xa0-0xa7 */ 0xac, 0xa9, 0x00, 0xc7, 0xc2, 0x00, 0xa8, 0x00, /* 0xa8-0xaf */ 0xa1, 0x00, 0x00, 0x00, 0x00, 0x00, 0xa6, 0x00, /* 0xb0-0xb7 */ 0x00, 0x00, 0x00, 0xc8, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */ 0x00, 0xe7, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */ 0x00, 0x83, 0x00, 0x00, 0x00, 0xea, 0x00, 0x00, /* 0xc8-0xcf */ 0x00, 0x00, 0x00, 0xee, 0xef, 0xcd, 0x85, 0x00, /* 0xd0-0xd7 */ 0x00, 0x00, 0xf2, 0x00, 0x86, 0xf8, 0x00, 0xa7, /* 0xd8-0xdf */ 0x00, 0x87, 0x00, 0x00, 0x8a, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */ 0x00, 0x8e, 0x00, 0x00, 0x00, 0x92, 0x00, 0x00, /* 0xe8-0xef */ 0x00, 0x00, 0x00, 0x97, 0x99, 0x9b, 0x9a, 0xd6, /* 0xf0-0xf7 */ 0x00, 0x00, 0x9c, 0x00, 0x9f, 0xf9, 0x00, 0x00, /* 0xf8-0xff */ }; static const unsigned char page01[256] = { 0x81, 0x82, 0x00, 0x00, 0x84, 0x88, 0x8c, 0x8d, /* 0x00-0x07 */ 0x00, 0x00, 0x00, 0x00, 0x89, 0x8b, 0x91, 0x93, /* 0x08-0x0f */ 0x00, 0x00, 0x94, 0x95, 0x00, 0x00, 0x96, 0x98, /* 0x10-0x17 */ 0xa2, 0xab, 0x9d, 0x9e, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */ 0x00, 0x00, 0xfe, 0xae, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */ 0x00, 0x00, 0xb1, 0xb4, 0x00, 0x00, 0xaf, 0xb0, /* 0x28-0x2f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xb5, 0xfa, /* 0x30-0x37 */ 0x00, 0xbd, 0xbe, 0xb9, 0xba, 0xbb, 0xbc, 0x00, /* 0x38-0x3f */ 0x00, 0xfc, 0xb8, 0xc1, 0xc4, 0xbf, 0xc0, 0xc5, /* 0x40-0x47 */ 0xcb, 0x00, 0x00, 0x00, 0xcf, 0xd8, 0x00, 0x00, /* 0x48-0x4f */ 0xcc, 0xce, 0x00, 0x00, 0xd9, 0xda, 0xdf, 0xe0, /* 0x50-0x57 */ 0xdb, 0xde, 0xe5, 0xe6, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */ 0xe1, 0xe4, 0x00, 0x00, 0xe8, 0xe9, 0x00, 0x00, /* 0x60-0x67 */ 0x00, 0x00, 0xed, 0xf0, 0x00, 0x00, 0xf1, 0xf3, /* 0x68-0x6f */ 0xf4, 0xf5, 0xf6, 0xf7, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */ 0x00, 0x8f, 0x90, 0xfb, 0xfd, 0xeb, 0xec, 0x00, /* 0x78-0x7f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */ }; static const unsigned char page02[256] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, /* 0xc0-0xc7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */ }; static const unsigned char page20[256] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */ 0x00, 0x00, 0x00, 0xd0, 0xd1, 0x00, 0x00, 0x00, /* 0x10-0x17 */ 0xd4, 0xd5, 0xe2, 0x00, 0xd2, 0xd3, 0xe3, 0x00, /* 0x18-0x1f */ 0xa0, 0x00, 0xa5, 0x00, 0x00, 0x00, 0xc9, 0x00, /* 0x20-0x27 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */ 0x00, 0xdc, 0xdd, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */ }; static const unsigned char page21[256] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */ 0x00, 0x00, 0xaa, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */ }; static const unsigned char page22[256] = { 0x00, 0x00, 0xb6, 0x00, 0x00, 0x00, 0xc6, 0x00, /* 0x00-0x07 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */ 0x00, 0xb7, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */ 0x00, 0x00, 0xc3, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */ 0xad, 0x00, 0x00, 0x00, 0xb2, 0xb3, 0x00, 0x00, /* 0x60-0x67 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */ }; static const unsigned char page25[256] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */ 0x00, 0x00, 0xd7, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */ }; static const unsigned char *const page_uni2charset[256] = { page00, page01, page02, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, page20, page21, page22, NULL, NULL, page25, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, }; static const unsigned char charset2lower[256] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x00-0x07 */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x08-0x0f */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x10-0x17 */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x18-0x1f */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x20-0x27 */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x28-0x2f */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x30-0x37 */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x38-0x3f */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x40-0x47 */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x48-0x4f */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x50-0x57 */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x58-0x5f */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x60-0x67 */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x68-0x6f */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x70-0x77 */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x78-0x7f */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x80-0x87 */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x88-0x8f */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x90-0x97 */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x98-0x9f */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa0-0xa7 */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa8-0xaf */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb0-0xb7 */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb8-0xbf */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc0-0xc7 */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc8-0xcf */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd0-0xd7 */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd8-0xdf */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe0-0xe7 */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe8-0xef */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf0-0xf7 */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf8-0xff */ }; static const unsigned char charset2upper[256] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x00-0x07 */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x08-0x0f */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x10-0x17 */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x18-0x1f */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x20-0x27 */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x28-0x2f */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x30-0x37 */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x38-0x3f */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x40-0x47 */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x48-0x4f */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x50-0x57 */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x58-0x5f */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x60-0x67 */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x68-0x6f */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x70-0x77 */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x78-0x7f */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x80-0x87 */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x88-0x8f */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x90-0x97 */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x98-0x9f */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa0-0xa7 */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa8-0xaf */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb0-0xb7 */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb8-0xbf */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc0-0xc7 */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc8-0xcf */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd0-0xd7 */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd8-0xdf */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe0-0xe7 */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe8-0xef */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf0-0xf7 */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf8-0xff */ }; static int uni2char(wchar_t uni, unsigned char *out, int boundlen) { const unsigned char *uni2charset; unsigned char cl = uni & 0x00ff; unsigned char ch = (uni & 0xff00) >> 8; if (boundlen <= 0) return -ENAMETOOLONG; uni2charset = page_uni2charset[ch]; if (uni2charset && uni2charset[cl]) out[0] = uni2charset[cl]; else return -EINVAL; return 1; } static int char2uni(const unsigned char *rawstring, int boundlen, wchar_t *uni) { *uni = charset2uni[*rawstring]; if (*uni == 0x0000) return -EINVAL; return 1; } static struct nls_table table = { .charset = "maccenteuro", .uni2char = uni2char, .char2uni = char2uni, .charset2lower = charset2lower, .charset2upper = charset2upper, }; static int __init init_nls_maccenteuro(void) { return register_nls(&table); } static void __exit exit_nls_maccenteuro(void) { unregister_nls(&table); } module_init(init_nls_maccenteuro) module_exit(exit_nls_maccenteuro) MODULE_LICENSE("Dual BSD/GPL"); |
135 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 | /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _ASM_X86_LOCAL_H #define _ASM_X86_LOCAL_H #include <linux/percpu.h> #include <linux/atomic.h> #include <asm/asm.h> typedef struct { atomic_long_t a; } local_t; #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) } #define local_read(l) atomic_long_read(&(l)->a) #define local_set(l, i) atomic_long_set(&(l)->a, (i)) static inline void local_inc(local_t *l) { asm volatile(_ASM_INC "%0" : "+m" (l->a.counter)); } static inline void local_dec(local_t *l) { asm volatile(_ASM_DEC "%0" : "+m" (l->a.counter)); } static inline void local_add(long i, local_t *l) { asm volatile(_ASM_ADD "%1,%0" : "+m" (l->a.counter) : "ir" (i)); } static inline void local_sub(long i, local_t *l) { asm volatile(_ASM_SUB "%1,%0" : "+m" (l->a.counter) : "ir" (i)); } /** * local_sub_and_test - subtract value from variable and test result * @i: integer value to subtract * @l: pointer to type local_t * * Atomically subtracts @i from @l and returns * true if the result is zero, or false for all * other cases. */ static inline bool local_sub_and_test(long i, local_t *l) { return GEN_BINARY_RMWcc(_ASM_SUB, l->a.counter, e, "er", i); } /** * local_dec_and_test - decrement and test * @l: pointer to type local_t * * Atomically decrements @l by 1 and * returns true if the result is 0, or false for all other * cases. */ static inline bool local_dec_and_test(local_t *l) { return GEN_UNARY_RMWcc(_ASM_DEC, l->a.counter, e); } /** * local_inc_and_test - increment and test * @l: pointer to type local_t * * Atomically increments @l by 1 * and returns true if the result is zero, or false for all * other cases. */ static inline bool local_inc_and_test(local_t *l) { return GEN_UNARY_RMWcc(_ASM_INC, l->a.counter, e); } /** * local_add_negative - add and test if negative * @i: integer value to add * @l: pointer to type local_t * * Atomically adds @i to @l and returns true * if the result is negative, or false when * result is greater than or equal to zero. */ static inline bool local_add_negative(long i, local_t *l) { return GEN_BINARY_RMWcc(_ASM_ADD, l->a.counter, s, "er", i); } /** * local_add_return - add and return * @i: integer value to add * @l: pointer to type local_t * * Atomically adds @i to @l and returns @i + @l */ static inline long local_add_return(long i, local_t *l) { long __i = i; asm volatile(_ASM_XADD "%0, %1;" : "+r" (i), "+m" (l->a.counter) : : "memory"); return i + __i; } static inline long local_sub_return(long i, local_t *l) { return local_add_return(-i, l); } #define local_inc_return(l) (local_add_return(1, l)) #define local_dec_return(l) (local_sub_return(1, l)) static inline long local_cmpxchg(local_t *l, long old, long new) { return cmpxchg_local(&l->a.counter, old, new); } static inline bool local_try_cmpxchg(local_t *l, long *old, long new) { return try_cmpxchg_local(&l->a.counter, (typeof(l->a.counter) *) old, new); } /* Always has a lock prefix */ #define local_xchg(l, n) (xchg(&((l)->a.counter), (n))) /** * local_add_unless - add unless the number is already a given value * @l: pointer of type local_t * @a: the amount to add to l... * @u: ...unless l is equal to u. * * Atomically adds @a to @l, if @v was not already @u. * Returns true if the addition was done. */ static __always_inline bool local_add_unless(local_t *l, long a, long u) { long c = local_read(l); do { if (unlikely(c == u)) return false; } while (!local_try_cmpxchg(l, &c, c + a)); return true; } #define local_inc_not_zero(l) local_add_unless((l), 1, 0) /* On x86_32, these are no better than the atomic variants. * On x86-64 these are better than the atomic variants on SMP kernels * because they dont use a lock prefix. */ #define __local_inc(l) local_inc(l) #define __local_dec(l) local_dec(l) #define __local_add(i, l) local_add((i), (l)) #define __local_sub(i, l) local_sub((i), (l)) #endif /* _ASM_X86_LOCAL_H */ |
1 1 1 1 3 2 43 4 41 53 53 52 52 48 48 44 48 2 42 1 6 43 43 6 37 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 | // SPDX-License-Identifier: GPL-2.0+ #include <linux/dma-fence.h> #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_probe_helper.h> #include <drm/drm_vblank.h> #include "vkms_drv.h" static enum hrtimer_restart vkms_vblank_simulate(struct hrtimer *timer) { struct vkms_output *output = container_of(timer, struct vkms_output, vblank_hrtimer); struct drm_crtc *crtc = &output->crtc; struct vkms_crtc_state *state; u64 ret_overrun; bool ret, fence_cookie; fence_cookie = dma_fence_begin_signalling(); ret_overrun = hrtimer_forward_now(&output->vblank_hrtimer, output->period_ns); if (ret_overrun != 1) pr_warn("%s: vblank timer overrun\n", __func__); spin_lock(&output->lock); ret = drm_crtc_handle_vblank(crtc); if (!ret) DRM_ERROR("vkms failure on handling vblank"); state = output->composer_state; spin_unlock(&output->lock); if (state && output->composer_enabled) { u64 frame = drm_crtc_accurate_vblank_count(crtc); /* update frame_start only if a queued vkms_composer_worker() * has read the data */ spin_lock(&output->composer_lock); if (!state->crc_pending) state->frame_start = frame; else DRM_DEBUG_DRIVER("crc worker falling behind, frame_start: %llu, frame_end: %llu\n", state->frame_start, frame); state->frame_end = frame; state->crc_pending = true; spin_unlock(&output->composer_lock); ret = queue_work(output->composer_workq, &state->composer_work); if (!ret) DRM_DEBUG_DRIVER("Composer worker already queued\n"); } dma_fence_end_signalling(fence_cookie); return HRTIMER_RESTART; } static int vkms_enable_vblank(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; unsigned int pipe = drm_crtc_index(crtc); struct drm_vblank_crtc *vblank = &dev->vblank[pipe]; struct vkms_output *out = drm_crtc_to_vkms_output(crtc); drm_calc_timestamping_constants(crtc, &crtc->mode); hrtimer_init(&out->vblank_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); out->vblank_hrtimer.function = &vkms_vblank_simulate; out->period_ns = ktime_set(0, vblank->framedur_ns); hrtimer_start(&out->vblank_hrtimer, out->period_ns, HRTIMER_MODE_REL); return 0; } static void vkms_disable_vblank(struct drm_crtc *crtc) { struct vkms_output *out = drm_crtc_to_vkms_output(crtc); hrtimer_cancel(&out->vblank_hrtimer); } static bool vkms_get_vblank_timestamp(struct drm_crtc *crtc, int *max_error, ktime_t *vblank_time, bool in_vblank_irq) { struct drm_device *dev = crtc->dev; unsigned int pipe = crtc->index; struct vkms_device *vkmsdev = drm_device_to_vkms_device(dev); struct vkms_output *output = &vkmsdev->output; struct drm_vblank_crtc *vblank = &dev->vblank[pipe]; if (!READ_ONCE(vblank->enabled)) { *vblank_time = ktime_get(); return true; } *vblank_time = READ_ONCE(output->vblank_hrtimer.node.expires); if (WARN_ON(*vblank_time == vblank->time)) return true; /* * To prevent races we roll the hrtimer forward before we do any * interrupt processing - this is how real hw works (the interrupt is * only generated after all the vblank registers are updated) and what * the vblank core expects. Therefore we need to always correct the * timestampe by one frame. */ *vblank_time -= output->period_ns; return true; } static struct drm_crtc_state * vkms_atomic_crtc_duplicate_state(struct drm_crtc *crtc) { struct vkms_crtc_state *vkms_state; if (WARN_ON(!crtc->state)) return NULL; vkms_state = kzalloc(sizeof(*vkms_state), GFP_KERNEL); if (!vkms_state) return NULL; __drm_atomic_helper_crtc_duplicate_state(crtc, &vkms_state->base); INIT_WORK(&vkms_state->composer_work, vkms_composer_worker); return &vkms_state->base; } static void vkms_atomic_crtc_destroy_state(struct drm_crtc *crtc, struct drm_crtc_state *state) { struct vkms_crtc_state *vkms_state = to_vkms_crtc_state(state); __drm_atomic_helper_crtc_destroy_state(state); WARN_ON(work_pending(&vkms_state->composer_work)); kfree(vkms_state->active_planes); kfree(vkms_state); } static void vkms_atomic_crtc_reset(struct drm_crtc *crtc) { struct vkms_crtc_state *vkms_state = kzalloc(sizeof(*vkms_state), GFP_KERNEL); if (crtc->state) vkms_atomic_crtc_destroy_state(crtc, crtc->state); __drm_atomic_helper_crtc_reset(crtc, &vkms_state->base); if (vkms_state) INIT_WORK(&vkms_state->composer_work, vkms_composer_worker); } static const struct drm_crtc_funcs vkms_crtc_funcs = { .set_config = drm_atomic_helper_set_config, .page_flip = drm_atomic_helper_page_flip, .reset = vkms_atomic_crtc_reset, .atomic_duplicate_state = vkms_atomic_crtc_duplicate_state, .atomic_destroy_state = vkms_atomic_crtc_destroy_state, .enable_vblank = vkms_enable_vblank, .disable_vblank = vkms_disable_vblank, .get_vblank_timestamp = vkms_get_vblank_timestamp, .get_crc_sources = vkms_get_crc_sources, .set_crc_source = vkms_set_crc_source, .verify_crc_source = vkms_verify_crc_source, }; static int vkms_crtc_atomic_check(struct drm_crtc *crtc, struct drm_atomic_state *state) { struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, crtc); struct vkms_crtc_state *vkms_state = to_vkms_crtc_state(crtc_state); struct drm_plane *plane; struct drm_plane_state *plane_state; int i = 0, ret; if (vkms_state->active_planes) return 0; ret = drm_atomic_add_affected_planes(crtc_state->state, crtc); if (ret < 0) return ret; drm_for_each_plane_mask(plane, crtc->dev, crtc_state->plane_mask) { plane_state = drm_atomic_get_existing_plane_state(crtc_state->state, plane); WARN_ON(!plane_state); if (!plane_state->visible) continue; i++; } vkms_state->active_planes = kcalloc(i, sizeof(plane), GFP_KERNEL); if (!vkms_state->active_planes) return -ENOMEM; vkms_state->num_active_planes = i; i = 0; drm_for_each_plane_mask(plane, crtc->dev, crtc_state->plane_mask) { plane_state = drm_atomic_get_existing_plane_state(crtc_state->state, plane); if (!plane_state->visible) continue; vkms_state->active_planes[i++] = to_vkms_plane_state(plane_state); } return 0; } static void vkms_crtc_atomic_enable(struct drm_crtc *crtc, struct drm_atomic_state *state) { drm_crtc_vblank_on(crtc); } static void vkms_crtc_atomic_disable(struct drm_crtc *crtc, struct drm_atomic_state *state) { drm_crtc_vblank_off(crtc); } static void vkms_crtc_atomic_begin(struct drm_crtc *crtc, struct drm_atomic_state *state) { struct vkms_output *vkms_output = drm_crtc_to_vkms_output(crtc); /* This lock is held across the atomic commit to block vblank timer * from scheduling vkms_composer_worker until the composer is updated */ spin_lock_irq(&vkms_output->lock); } static void vkms_crtc_atomic_flush(struct drm_crtc *crtc, struct drm_atomic_state *state) { struct vkms_output *vkms_output = drm_crtc_to_vkms_output(crtc); if (crtc->state->event) { spin_lock(&crtc->dev->event_lock); if (drm_crtc_vblank_get(crtc) != 0) drm_crtc_send_vblank_event(crtc, crtc->state->event); else drm_crtc_arm_vblank_event(crtc, crtc->state->event); spin_unlock(&crtc->dev->event_lock); crtc->state->event = NULL; } vkms_output->composer_state = to_vkms_crtc_state(crtc->state); spin_unlock_irq(&vkms_output->lock); } static const struct drm_crtc_helper_funcs vkms_crtc_helper_funcs = { .atomic_check = vkms_crtc_atomic_check, .atomic_begin = vkms_crtc_atomic_begin, .atomic_flush = vkms_crtc_atomic_flush, .atomic_enable = vkms_crtc_atomic_enable, .atomic_disable = vkms_crtc_atomic_disable, }; int vkms_crtc_init(struct drm_device *dev, struct drm_crtc *crtc, struct drm_plane *primary, struct drm_plane *cursor) { struct vkms_output *vkms_out = drm_crtc_to_vkms_output(crtc); int ret; ret = drmm_crtc_init_with_planes(dev, crtc, primary, cursor, &vkms_crtc_funcs, NULL); if (ret) { DRM_ERROR("Failed to init CRTC\n"); return ret; } drm_crtc_helper_add(crtc, &vkms_crtc_helper_funcs); drm_mode_crtc_set_gamma_size(crtc, VKMS_LUT_SIZE); drm_crtc_enable_color_mgmt(crtc, 0, false, VKMS_LUT_SIZE); spin_lock_init(&vkms_out->lock); spin_lock_init(&vkms_out->composer_lock); vkms_out->composer_workq = alloc_ordered_workqueue("vkms_composer", 0); if (!vkms_out->composer_workq) return -ENOMEM; return ret; } |
67 75 46 46 64 68 75 65 46 45 64 4 65 46 66 38 66 79 80 80 75 39 2 78 80 14 9 9 9 14 14 11 11 14 14 14 14 4 4 64 64 64 64 64 35 35 2 2 7 7 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 | // SPDX-License-Identifier: GPL-2.0-or-later /* * net/dccp/output.c * * An implementation of the DCCP protocol * Arnaldo Carvalho de Melo <acme@conectiva.com.br> */ #include <linux/dccp.h> #include <linux/kernel.h> #include <linux/skbuff.h> #include <linux/slab.h> #include <linux/sched/signal.h> #include <net/inet_sock.h> #include <net/sock.h> #include "ackvec.h" #include "ccid.h" #include "dccp.h" static inline void dccp_event_ack_sent(struct sock *sk) { inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK); } /* enqueue @skb on sk_send_head for retransmission, return clone to send now */ static struct sk_buff *dccp_skb_entail(struct sock *sk, struct sk_buff *skb) { skb_set_owner_w(skb, sk); WARN_ON(sk->sk_send_head); sk->sk_send_head = skb; return skb_clone(sk->sk_send_head, gfp_any()); } /* * All SKB's seen here are completely headerless. It is our * job to build the DCCP header, and pass the packet down to * IP so it can do the same plus pass the packet off to the * device. */ static int dccp_transmit_skb(struct sock *sk, struct sk_buff *skb) { if (likely(skb != NULL)) { struct inet_sock *inet = inet_sk(sk); const struct inet_connection_sock *icsk = inet_csk(sk); struct dccp_sock *dp = dccp_sk(sk); struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb); struct dccp_hdr *dh; /* XXX For now we're using only 48 bits sequence numbers */ const u32 dccp_header_size = sizeof(*dh) + sizeof(struct dccp_hdr_ext) + dccp_packet_hdr_len(dcb->dccpd_type); int err, set_ack = 1; u64 ackno = dp->dccps_gsr; /* * Increment GSS here already in case the option code needs it. * Update GSS for real only if option processing below succeeds. */ dcb->dccpd_seq = ADD48(dp->dccps_gss, 1); switch (dcb->dccpd_type) { case DCCP_PKT_DATA: set_ack = 0; fallthrough; case DCCP_PKT_DATAACK: case DCCP_PKT_RESET: break; case DCCP_PKT_REQUEST: set_ack = 0; /* Use ISS on the first (non-retransmitted) Request. */ if (icsk->icsk_retransmits == 0) dcb->dccpd_seq = dp->dccps_iss; fallthrough; case DCCP_PKT_SYNC: case DCCP_PKT_SYNCACK: ackno = dcb->dccpd_ack_seq; fallthrough; default: /* * Set owner/destructor: some skbs are allocated via * alloc_skb (e.g. when retransmission may happen). * Only Data, DataAck, and Reset packets should come * through here with skb->sk set. */ WARN_ON(skb->sk); skb_set_owner_w(skb, sk); break; } if (dccp_insert_options(sk, skb)) { kfree_skb(skb); return -EPROTO; } /* Build DCCP header and checksum it. */ dh = dccp_zeroed_hdr(skb, dccp_header_size); dh->dccph_type = dcb->dccpd_type; dh->dccph_sport = inet->inet_sport; dh->dccph_dport = inet->inet_dport; dh->dccph_doff = (dccp_header_size + dcb->dccpd_opt_len) / 4; dh->dccph_ccval = dcb->dccpd_ccval; dh->dccph_cscov = dp->dccps_pcslen; /* XXX For now we're using only 48 bits sequence numbers */ dh->dccph_x = 1; dccp_update_gss(sk, dcb->dccpd_seq); dccp_hdr_set_seq(dh, dp->dccps_gss); if (set_ack) dccp_hdr_set_ack(dccp_hdr_ack_bits(skb), ackno); switch (dcb->dccpd_type) { case DCCP_PKT_REQUEST: dccp_hdr_request(skb)->dccph_req_service = dp->dccps_service; /* * Limit Ack window to ISS <= P.ackno <= GSS, so that * only Responses to Requests we sent are considered. */ dp->dccps_awl = dp->dccps_iss; break; case DCCP_PKT_RESET: dccp_hdr_reset(skb)->dccph_reset_code = dcb->dccpd_reset_code; break; } icsk->icsk_af_ops->send_check(sk, skb); if (set_ack) dccp_event_ack_sent(sk); DCCP_INC_STATS(DCCP_MIB_OUTSEGS); err = icsk->icsk_af_ops->queue_xmit(sk, skb, &inet->cork.fl); return net_xmit_eval(err); } return -ENOBUFS; } /** * dccp_determine_ccmps - Find out about CCID-specific packet-size limits * @dp: socket to find packet size limits of * * We only consider the HC-sender CCID for setting the CCMPS (RFC 4340, 14.), * since the RX CCID is restricted to feedback packets (Acks), which are small * in comparison with the data traffic. A value of 0 means "no current CCMPS". */ static u32 dccp_determine_ccmps(const struct dccp_sock *dp) { const struct ccid *tx_ccid = dp->dccps_hc_tx_ccid; if (tx_ccid == NULL || tx_ccid->ccid_ops == NULL) return 0; return tx_ccid->ccid_ops->ccid_ccmps; } unsigned int dccp_sync_mss(struct sock *sk, u32 pmtu) { struct inet_connection_sock *icsk = inet_csk(sk); struct dccp_sock *dp = dccp_sk(sk); u32 ccmps = dccp_determine_ccmps(dp); u32 cur_mps = ccmps ? min(pmtu, ccmps) : pmtu; /* Account for header lengths and IPv4/v6 option overhead */ cur_mps -= (icsk->icsk_af_ops->net_header_len + icsk->icsk_ext_hdr_len + sizeof(struct dccp_hdr) + sizeof(struct dccp_hdr_ext)); /* * Leave enough headroom for common DCCP header options. * This only considers options which may appear on DCCP-Data packets, as * per table 3 in RFC 4340, 5.8. When running out of space for other * options (eg. Ack Vector which can take up to 255 bytes), it is better * to schedule a separate Ack. Thus we leave headroom for the following: * - 1 byte for Slow Receiver (11.6) * - 6 bytes for Timestamp (13.1) * - 10 bytes for Timestamp Echo (13.3) * - 8 bytes for NDP count (7.7, when activated) * - 6 bytes for Data Checksum (9.3) * - %DCCPAV_MIN_OPTLEN bytes for Ack Vector size (11.4, when enabled) */ cur_mps -= roundup(1 + 6 + 10 + dp->dccps_send_ndp_count * 8 + 6 + (dp->dccps_hc_rx_ackvec ? DCCPAV_MIN_OPTLEN : 0), 4); /* And store cached results */ icsk->icsk_pmtu_cookie = pmtu; WRITE_ONCE(dp->dccps_mss_cache, cur_mps); return cur_mps; } EXPORT_SYMBOL_GPL(dccp_sync_mss); void dccp_write_space(struct sock *sk) { struct socket_wq *wq; rcu_read_lock(); wq = rcu_dereference(sk->sk_wq); if (skwq_has_sleeper(wq)) wake_up_interruptible(&wq->wait); /* Should agree with poll, otherwise some programs break */ if (sock_writeable(sk)) sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT); rcu_read_unlock(); } /** * dccp_wait_for_ccid - Await CCID send permission * @sk: socket to wait for * @delay: timeout in jiffies * * This is used by CCIDs which need to delay the send time in process context. */ static int dccp_wait_for_ccid(struct sock *sk, unsigned long delay) { DEFINE_WAIT(wait); long remaining; prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); sk->sk_write_pending++; release_sock(sk); remaining = schedule_timeout(delay); lock_sock(sk); sk->sk_write_pending--; finish_wait(sk_sleep(sk), &wait); if (signal_pending(current) || sk->sk_err) return -1; return remaining; } /** * dccp_xmit_packet - Send data packet under control of CCID * @sk: socket to send data packet on * * Transmits next-queued payload and informs CCID to account for the packet. */ static void dccp_xmit_packet(struct sock *sk) { int err, len; struct dccp_sock *dp = dccp_sk(sk); struct sk_buff *skb = dccp_qpolicy_pop(sk); if (unlikely(skb == NULL)) return; len = skb->len; if (sk->sk_state == DCCP_PARTOPEN) { const u32 cur_mps = dp->dccps_mss_cache - DCCP_FEATNEG_OVERHEAD; /* * See 8.1.5 - Handshake Completion. * * For robustness we resend Confirm options until the client has * entered OPEN. During the initial feature negotiation, the MPS * is smaller than usual, reduced by the Change/Confirm options. */ if (!list_empty(&dp->dccps_featneg) && len > cur_mps) { DCCP_WARN("Payload too large (%d) for featneg.\n", len); dccp_send_ack(sk); dccp_feat_list_purge(&dp->dccps_featneg); } inet_csk_schedule_ack(sk); inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK, inet_csk(sk)->icsk_rto, DCCP_RTO_MAX); DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_DATAACK; } else if (dccp_ack_pending(sk)) { DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_DATAACK; } else { DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_DATA; } err = dccp_transmit_skb(sk, skb); if (err) dccp_pr_debug("transmit_skb() returned err=%d\n", err); /* * Register this one as sent even if an error occurred. To the remote * end a local packet drop is indistinguishable from network loss, i.e. * any local drop will eventually be reported via receiver feedback. */ ccid_hc_tx_packet_sent(dp->dccps_hc_tx_ccid, sk, len); /* * If the CCID needs to transfer additional header options out-of-band * (e.g. Ack Vectors or feature-negotiation options), it activates this * flag to schedule a Sync. The Sync will automatically incorporate all * currently pending header options, thus clearing the backlog. */ if (dp->dccps_sync_scheduled) dccp_send_sync(sk, dp->dccps_gsr, DCCP_PKT_SYNC); } /** * dccp_flush_write_queue - Drain queue at end of connection * @sk: socket to be drained * @time_budget: time allowed to drain the queue * * Since dccp_sendmsg queues packets without waiting for them to be sent, it may * happen that the TX queue is not empty at the end of a connection. We give the * HC-sender CCID a grace period of up to @time_budget jiffies. If this function * returns with a non-empty write queue, it will be purged later. */ void dccp_flush_write_queue(struct sock *sk, long *time_budget) { struct dccp_sock *dp = dccp_sk(sk); struct sk_buff *skb; long delay, rc; while (*time_budget > 0 && (skb = skb_peek(&sk->sk_write_queue))) { rc = ccid_hc_tx_send_packet(dp->dccps_hc_tx_ccid, sk, skb); switch (ccid_packet_dequeue_eval(rc)) { case CCID_PACKET_WILL_DEQUEUE_LATER: /* * If the CCID determines when to send, the next sending * time is unknown or the CCID may not even send again * (e.g. remote host crashes or lost Ack packets). */ DCCP_WARN("CCID did not manage to send all packets\n"); return; case CCID_PACKET_DELAY: delay = msecs_to_jiffies(rc); if (delay > *time_budget) return; rc = dccp_wait_for_ccid(sk, delay); if (rc < 0) return; *time_budget -= (delay - rc); /* check again if we can send now */ break; case CCID_PACKET_SEND_AT_ONCE: dccp_xmit_packet(sk); break; case CCID_PACKET_ERR: skb_dequeue(&sk->sk_write_queue); kfree_skb(skb); dccp_pr_debug("packet discarded due to err=%ld\n", rc); } } } void dccp_write_xmit(struct sock *sk) { struct dccp_sock *dp = dccp_sk(sk); struct sk_buff *skb; while ((skb = dccp_qpolicy_top(sk))) { int rc = ccid_hc_tx_send_packet(dp->dccps_hc_tx_ccid, sk, skb); switch (ccid_packet_dequeue_eval(rc)) { case CCID_PACKET_WILL_DEQUEUE_LATER: return; case CCID_PACKET_DELAY: sk_reset_timer(sk, &dp->dccps_xmit_timer, jiffies + msecs_to_jiffies(rc)); return; case CCID_PACKET_SEND_AT_ONCE: dccp_xmit_packet(sk); break; case CCID_PACKET_ERR: dccp_qpolicy_drop(sk, skb); dccp_pr_debug("packet discarded due to err=%d\n", rc); } } } /** * dccp_retransmit_skb - Retransmit Request, Close, or CloseReq packets * @sk: socket to perform retransmit on * * There are only four retransmittable packet types in DCCP: * - Request in client-REQUEST state (sec. 8.1.1), * - CloseReq in server-CLOSEREQ state (sec. 8.3), * - Close in node-CLOSING state (sec. 8.3), * - Acks in client-PARTOPEN state (sec. 8.1.5, handled by dccp_delack_timer()). * This function expects sk->sk_send_head to contain the original skb. */ int dccp_retransmit_skb(struct sock *sk) { WARN_ON(sk->sk_send_head == NULL); if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk) != 0) return -EHOSTUNREACH; /* Routing failure or similar. */ /* this count is used to distinguish original and retransmitted skb */ inet_csk(sk)->icsk_retransmits++; return dccp_transmit_skb(sk, skb_clone(sk->sk_send_head, GFP_ATOMIC)); } struct sk_buff *dccp_make_response(const struct sock *sk, struct dst_entry *dst, struct request_sock *req) { struct dccp_hdr *dh; struct dccp_request_sock *dreq; const u32 dccp_header_size = sizeof(struct dccp_hdr) + sizeof(struct dccp_hdr_ext) + sizeof(struct dccp_hdr_response); struct sk_buff *skb; /* sk is marked const to clearly express we dont hold socket lock. * sock_wmalloc() will atomically change sk->sk_wmem_alloc, * it is safe to promote sk to non const. */ skb = sock_wmalloc((struct sock *)sk, MAX_DCCP_HEADER, 1, GFP_ATOMIC); if (!skb) return NULL; skb_reserve(skb, MAX_DCCP_HEADER); skb_dst_set(skb, dst_clone(dst)); dreq = dccp_rsk(req); if (inet_rsk(req)->acked) /* increase GSS upon retransmission */ dccp_inc_seqno(&dreq->dreq_gss); DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_RESPONSE; DCCP_SKB_CB(skb)->dccpd_seq = dreq->dreq_gss; /* Resolve feature dependencies resulting from choice of CCID */ if (dccp_feat_server_ccid_dependencies(dreq)) goto response_failed; if (dccp_insert_options_rsk(dreq, skb)) goto response_failed; /* Build and checksum header */ dh = dccp_zeroed_hdr(skb, dccp_header_size); dh->dccph_sport = htons(inet_rsk(req)->ir_num); dh->dccph_dport = inet_rsk(req)->ir_rmt_port; dh->dccph_doff = (dccp_header_size + DCCP_SKB_CB(skb)->dccpd_opt_len) / 4; dh->dccph_type = DCCP_PKT_RESPONSE; dh->dccph_x = 1; dccp_hdr_set_seq(dh, dreq->dreq_gss); dccp_hdr_set_ack(dccp_hdr_ack_bits(skb), dreq->dreq_gsr); dccp_hdr_response(skb)->dccph_resp_service = dreq->dreq_service; dccp_csum_outgoing(skb); /* We use `acked' to remember that a Response was already sent. */ inet_rsk(req)->acked = 1; DCCP_INC_STATS(DCCP_MIB_OUTSEGS); return skb; response_failed: kfree_skb(skb); return NULL; } EXPORT_SYMBOL_GPL(dccp_make_response); /* answer offending packet in @rcv_skb with Reset from control socket @ctl */ struct sk_buff *dccp_ctl_make_reset(struct sock *sk, struct sk_buff *rcv_skb) { struct dccp_hdr *rxdh = dccp_hdr(rcv_skb), *dh; struct dccp_skb_cb *dcb = DCCP_SKB_CB(rcv_skb); const u32 dccp_hdr_reset_len = sizeof(struct dccp_hdr) + sizeof(struct dccp_hdr_ext) + sizeof(struct dccp_hdr_reset); struct dccp_hdr_reset *dhr; struct sk_buff *skb; skb = alloc_skb(sk->sk_prot->max_header, GFP_ATOMIC); if (skb == NULL) return NULL; skb_reserve(skb, sk->sk_prot->max_header); /* Swap the send and the receive. */ dh = dccp_zeroed_hdr(skb, dccp_hdr_reset_len); dh->dccph_type = DCCP_PKT_RESET; dh->dccph_sport = rxdh->dccph_dport; dh->dccph_dport = rxdh->dccph_sport; dh->dccph_doff = dccp_hdr_reset_len / 4; dh->dccph_x = 1; dhr = dccp_hdr_reset(skb); dhr->dccph_reset_code = dcb->dccpd_reset_code; switch (dcb->dccpd_reset_code) { case DCCP_RESET_CODE_PACKET_ERROR: dhr->dccph_reset_data[0] = rxdh->dccph_type; break; case DCCP_RESET_CODE_OPTION_ERROR: case DCCP_RESET_CODE_MANDATORY_ERROR: memcpy(dhr->dccph_reset_data, dcb->dccpd_reset_data, 3); break; } /* * From RFC 4340, 8.3.1: * If P.ackno exists, set R.seqno := P.ackno + 1. * Else set R.seqno := 0. */ if (dcb->dccpd_ack_seq != DCCP_PKT_WITHOUT_ACK_SEQ) dccp_hdr_set_seq(dh, ADD48(dcb->dccpd_ack_seq, 1)); dccp_hdr_set_ack(dccp_hdr_ack_bits(skb), dcb->dccpd_seq); dccp_csum_outgoing(skb); return skb; } EXPORT_SYMBOL_GPL(dccp_ctl_make_reset); /* send Reset on established socket, to close or abort the connection */ int dccp_send_reset(struct sock *sk, enum dccp_reset_codes code) { struct sk_buff *skb; /* * FIXME: what if rebuild_header fails? * Should we be doing a rebuild_header here? */ int err = inet_csk(sk)->icsk_af_ops->rebuild_header(sk); if (err != 0) return err; skb = sock_wmalloc(sk, sk->sk_prot->max_header, 1, GFP_ATOMIC); if (skb == NULL) return -ENOBUFS; /* Reserve space for headers and prepare control bits. */ skb_reserve(skb, sk->sk_prot->max_header); DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_RESET; DCCP_SKB_CB(skb)->dccpd_reset_code = code; return dccp_transmit_skb(sk, skb); } /* * Do all connect socket setups that can be done AF independent. */ int dccp_connect(struct sock *sk) { struct sk_buff *skb; struct dccp_sock *dp = dccp_sk(sk); struct dst_entry *dst = __sk_dst_get(sk); struct inet_connection_sock *icsk = inet_csk(sk); sk->sk_err = 0; sock_reset_flag(sk, SOCK_DONE); dccp_sync_mss(sk, dst_mtu(dst)); /* do not connect if feature negotiation setup fails */ if (dccp_feat_finalise_settings(dccp_sk(sk))) return -EPROTO; /* Initialise GAR as per 8.5; AWL/AWH are set in dccp_transmit_skb() */ dp->dccps_gar = dp->dccps_iss; skb = alloc_skb(sk->sk_prot->max_header, sk->sk_allocation); if (unlikely(skb == NULL)) return -ENOBUFS; /* Reserve space for headers. */ skb_reserve(skb, sk->sk_prot->max_header); DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_REQUEST; dccp_transmit_skb(sk, dccp_skb_entail(sk, skb)); DCCP_INC_STATS(DCCP_MIB_ACTIVEOPENS); /* Timer for repeating the REQUEST until an answer. */ icsk->icsk_retransmits = 0; inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, icsk->icsk_rto, DCCP_RTO_MAX); return 0; } EXPORT_SYMBOL_GPL(dccp_connect); void dccp_send_ack(struct sock *sk) { /* If we have been reset, we may not send again. */ if (sk->sk_state != DCCP_CLOSED) { struct sk_buff *skb = alloc_skb(sk->sk_prot->max_header, GFP_ATOMIC); if (skb == NULL) { inet_csk_schedule_ack(sk); inet_csk(sk)->icsk_ack.ato = TCP_ATO_MIN; inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK, TCP_DELACK_MAX, DCCP_RTO_MAX); return; } /* Reserve space for headers */ skb_reserve(skb, sk->sk_prot->max_header); DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_ACK; dccp_transmit_skb(sk, skb); } } EXPORT_SYMBOL_GPL(dccp_send_ack); #if 0 /* FIXME: Is this still necessary (11.3) - currently nowhere used by DCCP. */ void dccp_send_delayed_ack(struct sock *sk) { struct inet_connection_sock *icsk = inet_csk(sk); /* * FIXME: tune this timer. elapsed time fixes the skew, so no problem * with using 2s, and active senders also piggyback the ACK into a * DATAACK packet, so this is really for quiescent senders. */ unsigned long timeout = jiffies + 2 * HZ; /* Use new timeout only if there wasn't a older one earlier. */ if (icsk->icsk_ack.pending & ICSK_ACK_TIMER) { /* If delack timer was blocked or is about to expire, * send ACK now. * * FIXME: check the "about to expire" part */ if (icsk->icsk_ack.blocked) { dccp_send_ack(sk); return; } if (!time_before(timeout, icsk->icsk_ack.timeout)) timeout = icsk->icsk_ack.timeout; } icsk->icsk_ack.pending |= ICSK_ACK_SCHED | ICSK_ACK_TIMER; icsk->icsk_ack.timeout = timeout; sk_reset_timer(sk, &icsk->icsk_delack_timer, timeout); } #endif void dccp_send_sync(struct sock *sk, const u64 ackno, const enum dccp_pkt_type pkt_type) { /* * We are not putting this on the write queue, so * dccp_transmit_skb() will set the ownership to this * sock. */ struct sk_buff *skb = alloc_skb(sk->sk_prot->max_header, GFP_ATOMIC); if (skb == NULL) { /* FIXME: how to make sure the sync is sent? */ DCCP_CRIT("could not send %s", dccp_packet_name(pkt_type)); return; } /* Reserve space for headers and prepare control bits. */ skb_reserve(skb, sk->sk_prot->max_header); DCCP_SKB_CB(skb)->dccpd_type = pkt_type; DCCP_SKB_CB(skb)->dccpd_ack_seq = ackno; /* * Clear the flag in case the Sync was scheduled for out-of-band data, * such as carrying a long Ack Vector. */ dccp_sk(sk)->dccps_sync_scheduled = 0; dccp_transmit_skb(sk, skb); } EXPORT_SYMBOL_GPL(dccp_send_sync); /* * Send a DCCP_PKT_CLOSE/CLOSEREQ. The caller locks the socket for us. This * cannot be allowed to fail queueing a DCCP_PKT_CLOSE/CLOSEREQ frame under * any circumstances. */ void dccp_send_close(struct sock *sk, const int active) { struct dccp_sock *dp = dccp_sk(sk); struct sk_buff *skb; const gfp_t prio = active ? GFP_KERNEL : GFP_ATOMIC; skb = alloc_skb(sk->sk_prot->max_header, prio); if (skb == NULL) return; /* Reserve space for headers and prepare control bits. */ skb_reserve(skb, sk->sk_prot->max_header); if (dp->dccps_role == DCCP_ROLE_SERVER && !dp->dccps_server_timewait) DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_CLOSEREQ; else DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_CLOSE; if (active) { skb = dccp_skb_entail(sk, skb); /* * Retransmission timer for active-close: RFC 4340, 8.3 requires * to retransmit the Close/CloseReq until the CLOSING/CLOSEREQ * state can be left. The initial timeout is 2 RTTs. * Since RTT measurement is done by the CCIDs, there is no easy * way to get an RTT sample. The fallback RTT from RFC 4340, 3.4 * is too low (200ms); we use a high value to avoid unnecessary * retransmissions when the link RTT is > 0.2 seconds. * FIXME: Let main module sample RTTs and use that instead. */ inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, DCCP_TIMEOUT_INIT, DCCP_RTO_MAX); } dccp_transmit_skb(sk, skb); } |
14 14 14 38 37 38 15 15 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 | // SPDX-License-Identifier: GPL-2.0+ /* * NILFS inode file * * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation. * * Written by Amagai Yoshiji. * Revised by Ryusuke Konishi. * */ #include <linux/types.h> #include <linux/buffer_head.h> #include "nilfs.h" #include "mdt.h" #include "alloc.h" #include "ifile.h" /** * struct nilfs_ifile_info - on-memory private data of ifile * @mi: on-memory private data of metadata file * @palloc_cache: persistent object allocator cache of ifile */ struct nilfs_ifile_info { struct nilfs_mdt_info mi; struct nilfs_palloc_cache palloc_cache; }; static inline struct nilfs_ifile_info *NILFS_IFILE_I(struct inode *ifile) { return (struct nilfs_ifile_info *)NILFS_MDT(ifile); } /** * nilfs_ifile_create_inode - create a new disk inode * @ifile: ifile inode * @out_ino: pointer to a variable to store inode number * @out_bh: buffer_head contains newly allocated disk inode * * Return Value: On success, 0 is returned and the newly allocated inode * number is stored in the place pointed by @ino, and buffer_head pointer * that contains newly allocated disk inode structure is stored in the * place pointed by @out_bh * On error, one of the following negative error codes is returned. * * %-EIO - I/O error. * * %-ENOMEM - Insufficient amount of memory available. * * %-ENOSPC - No inode left. */ int nilfs_ifile_create_inode(struct inode *ifile, ino_t *out_ino, struct buffer_head **out_bh) { struct nilfs_palloc_req req; int ret; req.pr_entry_nr = 0; /* * 0 says find free inode from beginning * of a group. dull code!! */ req.pr_entry_bh = NULL; ret = nilfs_palloc_prepare_alloc_entry(ifile, &req); if (!ret) { ret = nilfs_palloc_get_entry_block(ifile, req.pr_entry_nr, 1, &req.pr_entry_bh); if (ret < 0) nilfs_palloc_abort_alloc_entry(ifile, &req); } if (ret < 0) { brelse(req.pr_entry_bh); return ret; } nilfs_palloc_commit_alloc_entry(ifile, &req); mark_buffer_dirty(req.pr_entry_bh); nilfs_mdt_mark_dirty(ifile); *out_ino = (ino_t)req.pr_entry_nr; *out_bh = req.pr_entry_bh; return 0; } /** * nilfs_ifile_delete_inode - delete a disk inode * @ifile: ifile inode * @ino: inode number * * Return Value: On success, 0 is returned. On error, one of the following * negative error codes is returned. * * %-EIO - I/O error. * * %-ENOMEM - Insufficient amount of memory available. * * %-ENOENT - The inode number @ino have not been allocated. */ int nilfs_ifile_delete_inode(struct inode *ifile, ino_t ino) { struct nilfs_palloc_req req = { .pr_entry_nr = ino, .pr_entry_bh = NULL }; struct nilfs_inode *raw_inode; void *kaddr; int ret; ret = nilfs_palloc_prepare_free_entry(ifile, &req); if (!ret) { ret = nilfs_palloc_get_entry_block(ifile, req.pr_entry_nr, 0, &req.pr_entry_bh); if (ret < 0) nilfs_palloc_abort_free_entry(ifile, &req); } if (ret < 0) { brelse(req.pr_entry_bh); return ret; } kaddr = kmap_atomic(req.pr_entry_bh->b_page); raw_inode = nilfs_palloc_block_get_entry(ifile, req.pr_entry_nr, req.pr_entry_bh, kaddr); raw_inode->i_flags = 0; kunmap_atomic(kaddr); mark_buffer_dirty(req.pr_entry_bh); brelse(req.pr_entry_bh); nilfs_palloc_commit_free_entry(ifile, &req); return 0; } int nilfs_ifile_get_inode_block(struct inode *ifile, ino_t ino, struct buffer_head **out_bh) { struct super_block *sb = ifile->i_sb; int err; if (unlikely(!NILFS_VALID_INODE(sb, ino))) { nilfs_error(sb, "bad inode number: %lu", (unsigned long)ino); return -EINVAL; } err = nilfs_palloc_get_entry_block(ifile, ino, 0, out_bh); if (unlikely(err)) nilfs_warn(sb, "error %d reading inode: ino=%lu", err, (unsigned long)ino); return err; } /** * nilfs_ifile_count_free_inodes - calculate free inodes count * @ifile: ifile inode * @nmaxinodes: current maximum of available inodes count [out] * @nfreeinodes: free inodes count [out] */ int nilfs_ifile_count_free_inodes(struct inode *ifile, u64 *nmaxinodes, u64 *nfreeinodes) { u64 nused; int err; *nmaxinodes = 0; *nfreeinodes = 0; nused = atomic64_read(&NILFS_I(ifile)->i_root->inodes_count); err = nilfs_palloc_count_max_entries(ifile, nused, nmaxinodes); if (likely(!err)) *nfreeinodes = *nmaxinodes - nused; return err; } /** * nilfs_ifile_read - read or get ifile inode * @sb: super block instance * @root: root object * @inode_size: size of an inode * @raw_inode: on-disk ifile inode * @inodep: buffer to store the inode */ int nilfs_ifile_read(struct super_block *sb, struct nilfs_root *root, size_t inode_size, struct nilfs_inode *raw_inode, struct inode **inodep) { struct inode *ifile; int err; ifile = nilfs_iget_locked(sb, root, NILFS_IFILE_INO); if (unlikely(!ifile)) return -ENOMEM; if (!(ifile->i_state & I_NEW)) goto out; err = nilfs_mdt_init(ifile, NILFS_MDT_GFP, sizeof(struct nilfs_ifile_info)); if (err) goto failed; err = nilfs_palloc_init_blockgroup(ifile, inode_size); if (err) goto failed; nilfs_palloc_setup_cache(ifile, &NILFS_IFILE_I(ifile)->palloc_cache); err = nilfs_read_inode_common(ifile, raw_inode); if (err) goto failed; unlock_new_inode(ifile); out: *inodep = ifile; return 0; failed: iget_failed(ifile); return err; } |
349 13 2575 102 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 | /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _NF_CONNTRACK_EXTEND_H #define _NF_CONNTRACK_EXTEND_H #include <linux/slab.h> #include <net/netfilter/nf_conntrack.h> enum nf_ct_ext_id { NF_CT_EXT_HELPER, #if IS_ENABLED(CONFIG_NF_NAT) NF_CT_EXT_NAT, #endif NF_CT_EXT_SEQADJ, NF_CT_EXT_ACCT, #ifdef CONFIG_NF_CONNTRACK_EVENTS NF_CT_EXT_ECACHE, #endif #ifdef CONFIG_NF_CONNTRACK_TIMESTAMP NF_CT_EXT_TSTAMP, #endif #ifdef CONFIG_NF_CONNTRACK_TIMEOUT NF_CT_EXT_TIMEOUT, #endif #ifdef CONFIG_NF_CONNTRACK_LABELS NF_CT_EXT_LABELS, #endif #if IS_ENABLED(CONFIG_NETFILTER_SYNPROXY) NF_CT_EXT_SYNPROXY, #endif #if IS_ENABLED(CONFIG_NET_ACT_CT) NF_CT_EXT_ACT_CT, #endif NF_CT_EXT_NUM, }; /* Extensions: optional stuff which isn't permanently in struct. */ struct nf_ct_ext { u8 offset[NF_CT_EXT_NUM]; u8 len; unsigned int gen_id; char data[] __aligned(8); }; static inline bool __nf_ct_ext_exist(const struct nf_ct_ext *ext, u8 id) { return !!ext->offset[id]; } static inline bool nf_ct_ext_exist(const struct nf_conn *ct, u8 id) { return (ct->ext && __nf_ct_ext_exist(ct->ext, id)); } void *__nf_ct_ext_find(const struct nf_ct_ext *ext, u8 id); static inline void *nf_ct_ext_find(const struct nf_conn *ct, u8 id) { struct nf_ct_ext *ext = ct->ext; if (!ext || !__nf_ct_ext_exist(ext, id)) return NULL; if (unlikely(ext->gen_id)) return __nf_ct_ext_find(ext, id); return (void *)ct->ext + ct->ext->offset[id]; } /* Add this type, returns pointer to data or NULL. */ void *nf_ct_ext_add(struct nf_conn *ct, enum nf_ct_ext_id id, gfp_t gfp); /* ext genid. if ext->id != ext_genid, extensions cannot be used * anymore unless conntrack has CONFIRMED bit set. */ extern atomic_t nf_conntrack_ext_genid; void nf_ct_ext_bump_genid(void); #endif /* _NF_CONNTRACK_EXTEND_H */ |
60 6 1 3 1 2 60 59 6 55 4 46 59 59 59 59 59 54 4 59 60 60 60 60 60 5 59 59 59 60 60 5 5 5 63 62 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 | // SPDX-License-Identifier: GPL-2.0-or-later /* RxRPC remote transport endpoint record management * * Copyright (C) 2007, 2016 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/net.h> #include <linux/skbuff.h> #include <linux/udp.h> #include <linux/in.h> #include <linux/in6.h> #include <linux/slab.h> #include <linux/hashtable.h> #include <net/sock.h> #include <net/af_rxrpc.h> #include <net/ip.h> #include <net/route.h> #include <net/ip6_route.h> #include "ar-internal.h" static const struct sockaddr_rxrpc rxrpc_null_addr; /* * Hash a peer key. */ static unsigned long rxrpc_peer_hash_key(struct rxrpc_local *local, const struct sockaddr_rxrpc *srx) { const u16 *p; unsigned int i, size; unsigned long hash_key; _enter(""); hash_key = (unsigned long)local / __alignof__(*local); hash_key += srx->transport_type; hash_key += srx->transport_len; hash_key += srx->transport.family; switch (srx->transport.family) { case AF_INET: hash_key += (u16 __force)srx->transport.sin.sin_port; size = sizeof(srx->transport.sin.sin_addr); p = (u16 *)&srx->transport.sin.sin_addr; break; #ifdef CONFIG_AF_RXRPC_IPV6 case AF_INET6: hash_key += (u16 __force)srx->transport.sin.sin_port; size = sizeof(srx->transport.sin6.sin6_addr); p = (u16 *)&srx->transport.sin6.sin6_addr; break; #endif default: WARN(1, "AF_RXRPC: Unsupported transport address family\n"); return 0; } /* Step through the peer address in 16-bit portions for speed */ for (i = 0; i < size; i += sizeof(*p), p++) hash_key += *p; _leave(" 0x%lx", hash_key); return hash_key; } /* * Compare a peer to a key. Return -ve, 0 or +ve to indicate less than, same * or greater than. * * Unfortunately, the primitives in linux/hashtable.h don't allow for sorted * buckets and mid-bucket insertion, so we don't make full use of this * information at this point. */ static long rxrpc_peer_cmp_key(const struct rxrpc_peer *peer, struct rxrpc_local *local, const struct sockaddr_rxrpc *srx, unsigned long hash_key) { long diff; diff = ((peer->hash_key - hash_key) ?: ((unsigned long)peer->local - (unsigned long)local) ?: (peer->srx.transport_type - srx->transport_type) ?: (peer->srx.transport_len - srx->transport_len) ?: (peer->srx.transport.family - srx->transport.family)); if (diff != 0) return diff; switch (srx->transport.family) { case AF_INET: return ((u16 __force)peer->srx.transport.sin.sin_port - (u16 __force)srx->transport.sin.sin_port) ?: memcmp(&peer->srx.transport.sin.sin_addr, &srx->transport.sin.sin_addr, sizeof(struct in_addr)); #ifdef CONFIG_AF_RXRPC_IPV6 case AF_INET6: return ((u16 __force)peer->srx.transport.sin6.sin6_port - (u16 __force)srx->transport.sin6.sin6_port) ?: memcmp(&peer->srx.transport.sin6.sin6_addr, &srx->transport.sin6.sin6_addr, sizeof(struct in6_addr)); #endif default: BUG(); } } /* * Look up a remote transport endpoint for the specified address using RCU. */ static struct rxrpc_peer *__rxrpc_lookup_peer_rcu( struct rxrpc_local *local, const struct sockaddr_rxrpc *srx, unsigned long hash_key) { struct rxrpc_peer *peer; struct rxrpc_net *rxnet = local->rxnet; hash_for_each_possible_rcu(rxnet->peer_hash, peer, hash_link, hash_key) { if (rxrpc_peer_cmp_key(peer, local, srx, hash_key) == 0 && refcount_read(&peer->ref) > 0) return peer; } return NULL; } /* * Look up a remote transport endpoint for the specified address using RCU. */ struct rxrpc_peer *rxrpc_lookup_peer_rcu(struct rxrpc_local *local, const struct sockaddr_rxrpc *srx) { struct rxrpc_peer *peer; unsigned long hash_key = rxrpc_peer_hash_key(local, srx); peer = __rxrpc_lookup_peer_rcu(local, srx, hash_key); if (peer) _leave(" = %p {u=%d}", peer, refcount_read(&peer->ref)); return peer; } /* * assess the MTU size for the network interface through which this peer is * reached */ static void rxrpc_assess_MTU_size(struct rxrpc_local *local, struct rxrpc_peer *peer) { struct net *net = local->net; struct dst_entry *dst; struct rtable *rt; struct flowi fl; struct flowi4 *fl4 = &fl.u.ip4; #ifdef CONFIG_AF_RXRPC_IPV6 struct flowi6 *fl6 = &fl.u.ip6; #endif peer->if_mtu = 1500; memset(&fl, 0, sizeof(fl)); switch (peer->srx.transport.family) { case AF_INET: rt = ip_route_output_ports( net, fl4, NULL, peer->srx.transport.sin.sin_addr.s_addr, 0, htons(7000), htons(7001), IPPROTO_UDP, 0, 0); if (IS_ERR(rt)) { _leave(" [route err %ld]", PTR_ERR(rt)); return; } dst = &rt->dst; break; #ifdef CONFIG_AF_RXRPC_IPV6 case AF_INET6: fl6->flowi6_iif = LOOPBACK_IFINDEX; fl6->flowi6_scope = RT_SCOPE_UNIVERSE; fl6->flowi6_proto = IPPROTO_UDP; memcpy(&fl6->daddr, &peer->srx.transport.sin6.sin6_addr, sizeof(struct in6_addr)); fl6->fl6_dport = htons(7001); fl6->fl6_sport = htons(7000); dst = ip6_route_output(net, NULL, fl6); if (dst->error) { _leave(" [route err %d]", dst->error); return; } break; #endif default: BUG(); } peer->if_mtu = dst_mtu(dst); dst_release(dst); _leave(" [if_mtu %u]", peer->if_mtu); } /* * Allocate a peer. */ struct rxrpc_peer *rxrpc_alloc_peer(struct rxrpc_local *local, gfp_t gfp, enum rxrpc_peer_trace why) { struct rxrpc_peer *peer; _enter(""); peer = kzalloc(sizeof(struct rxrpc_peer), gfp); if (peer) { refcount_set(&peer->ref, 1); peer->local = rxrpc_get_local(local, rxrpc_local_get_peer); INIT_HLIST_HEAD(&peer->error_targets); peer->service_conns = RB_ROOT; seqlock_init(&peer->service_conn_lock); spin_lock_init(&peer->lock); spin_lock_init(&peer->rtt_input_lock); peer->debug_id = atomic_inc_return(&rxrpc_debug_id); rxrpc_peer_init_rtt(peer); peer->cong_ssthresh = RXRPC_TX_MAX_WINDOW; trace_rxrpc_peer(peer->debug_id, 1, why); } _leave(" = %p", peer); return peer; } /* * Initialise peer record. */ static void rxrpc_init_peer(struct rxrpc_local *local, struct rxrpc_peer *peer, unsigned long hash_key) { peer->hash_key = hash_key; rxrpc_assess_MTU_size(local, peer); peer->mtu = peer->if_mtu; peer->rtt_last_req = ktime_get_real(); switch (peer->srx.transport.family) { case AF_INET: peer->hdrsize = sizeof(struct iphdr); break; #ifdef CONFIG_AF_RXRPC_IPV6 case AF_INET6: peer->hdrsize = sizeof(struct ipv6hdr); break; #endif default: BUG(); } switch (peer->srx.transport_type) { case SOCK_DGRAM: peer->hdrsize += sizeof(struct udphdr); break; default: BUG(); } peer->hdrsize += sizeof(struct rxrpc_wire_header); peer->maxdata = peer->mtu - peer->hdrsize; } /* * Set up a new peer. */ static struct rxrpc_peer *rxrpc_create_peer(struct rxrpc_local *local, struct sockaddr_rxrpc *srx, unsigned long hash_key, gfp_t gfp) { struct rxrpc_peer *peer; _enter(""); peer = rxrpc_alloc_peer(local, gfp, rxrpc_peer_new_client); if (peer) { memcpy(&peer->srx, srx, sizeof(*srx)); rxrpc_init_peer(local, peer, hash_key); } _leave(" = %p", peer); return peer; } static void rxrpc_free_peer(struct rxrpc_peer *peer) { trace_rxrpc_peer(peer->debug_id, 0, rxrpc_peer_free); rxrpc_put_local(peer->local, rxrpc_local_put_peer); kfree_rcu(peer, rcu); } /* * Set up a new incoming peer. There shouldn't be any other matching peers * since we've already done a search in the list from the non-reentrant context * (the data_ready handler) that is the only place we can add new peers. */ void rxrpc_new_incoming_peer(struct rxrpc_local *local, struct rxrpc_peer *peer) { struct rxrpc_net *rxnet = local->rxnet; unsigned long hash_key; hash_key = rxrpc_peer_hash_key(local, &peer->srx); rxrpc_init_peer(local, peer, hash_key); spin_lock(&rxnet->peer_hash_lock); hash_add_rcu(rxnet->peer_hash, &peer->hash_link, hash_key); list_add_tail(&peer->keepalive_link, &rxnet->peer_keepalive_new); spin_unlock(&rxnet->peer_hash_lock); } /* * obtain a remote transport endpoint for the specified address */ struct rxrpc_peer *rxrpc_lookup_peer(struct rxrpc_local *local, struct sockaddr_rxrpc *srx, gfp_t gfp) { struct rxrpc_peer *peer, *candidate; struct rxrpc_net *rxnet = local->rxnet; unsigned long hash_key = rxrpc_peer_hash_key(local, srx); _enter("{%pISp}", &srx->transport); /* search the peer list first */ rcu_read_lock(); peer = __rxrpc_lookup_peer_rcu(local, srx, hash_key); if (peer && !rxrpc_get_peer_maybe(peer, rxrpc_peer_get_lookup_client)) peer = NULL; rcu_read_unlock(); if (!peer) { /* The peer is not yet present in hash - create a candidate * for a new record and then redo the search. */ candidate = rxrpc_create_peer(local, srx, hash_key, gfp); if (!candidate) { _leave(" = NULL [nomem]"); return NULL; } spin_lock(&rxnet->peer_hash_lock); /* Need to check that we aren't racing with someone else */ peer = __rxrpc_lookup_peer_rcu(local, srx, hash_key); if (peer && !rxrpc_get_peer_maybe(peer, rxrpc_peer_get_lookup_client)) peer = NULL; if (!peer) { hash_add_rcu(rxnet->peer_hash, &candidate->hash_link, hash_key); list_add_tail(&candidate->keepalive_link, &rxnet->peer_keepalive_new); } spin_unlock(&rxnet->peer_hash_lock); if (peer) rxrpc_free_peer(candidate); else peer = candidate; } _leave(" = %p {u=%d}", peer, refcount_read(&peer->ref)); return peer; } /* * Get a ref on a peer record. */ struct rxrpc_peer *rxrpc_get_peer(struct rxrpc_peer *peer, enum rxrpc_peer_trace why) { int r; __refcount_inc(&peer->ref, &r); trace_rxrpc_peer(peer->debug_id, r + 1, why); return peer; } /* * Get a ref on a peer record unless its usage has already reached 0. */ struct rxrpc_peer *rxrpc_get_peer_maybe(struct rxrpc_peer *peer, enum rxrpc_peer_trace why) { int r; if (peer) { if (__refcount_inc_not_zero(&peer->ref, &r)) trace_rxrpc_peer(peer->debug_id, r + 1, why); else peer = NULL; } return peer; } /* * Discard a peer record. */ static void __rxrpc_put_peer(struct rxrpc_peer *peer) { struct rxrpc_net *rxnet = peer->local->rxnet; ASSERT(hlist_empty(&peer->error_targets)); spin_lock(&rxnet->peer_hash_lock); hash_del_rcu(&peer->hash_link); list_del_init(&peer->keepalive_link); spin_unlock(&rxnet->peer_hash_lock); rxrpc_free_peer(peer); } /* * Drop a ref on a peer record. */ void rxrpc_put_peer(struct rxrpc_peer *peer, enum rxrpc_peer_trace why) { unsigned int debug_id; bool dead; int r; if (peer) { debug_id = peer->debug_id; dead = __refcount_dec_and_test(&peer->ref, &r); trace_rxrpc_peer(debug_id, r - 1, why); if (dead) __rxrpc_put_peer(peer); } } /* * Make sure all peer records have been discarded. */ void rxrpc_destroy_all_peers(struct rxrpc_net *rxnet) { struct rxrpc_peer *peer; int i; for (i = 0; i < HASH_SIZE(rxnet->peer_hash); i++) { if (hlist_empty(&rxnet->peer_hash[i])) continue; hlist_for_each_entry(peer, &rxnet->peer_hash[i], hash_link) { pr_err("Leaked peer %u {%u} %pISp\n", peer->debug_id, refcount_read(&peer->ref), &peer->srx.transport); } } } /** * rxrpc_kernel_get_call_peer - Get the peer address of a call * @sock: The socket on which the call is in progress. * @call: The call to query * * Get a record for the remote peer in a call. */ struct rxrpc_peer *rxrpc_kernel_get_call_peer(struct socket *sock, struct rxrpc_call *call) { return call->peer; } EXPORT_SYMBOL(rxrpc_kernel_get_call_peer); /** * rxrpc_kernel_get_srtt - Get a call's peer smoothed RTT * @peer: The peer to query * * Get the call's peer smoothed RTT in uS or UINT_MAX if we have no samples. */ unsigned int rxrpc_kernel_get_srtt(const struct rxrpc_peer *peer) { return peer->rtt_count > 0 ? peer->srtt_us >> 3 : UINT_MAX; } EXPORT_SYMBOL(rxrpc_kernel_get_srtt); /** * rxrpc_kernel_remote_srx - Get the address of a peer * @peer: The peer to query * * Get a pointer to the address from a peer record. The caller is responsible * for making sure that the address is not deallocated. */ const struct sockaddr_rxrpc *rxrpc_kernel_remote_srx(const struct rxrpc_peer *peer) { return peer ? &peer->srx : &rxrpc_null_addr; } EXPORT_SYMBOL(rxrpc_kernel_remote_srx); /** * rxrpc_kernel_remote_addr - Get the peer transport address of a call * @peer: The peer to query * * Get a pointer to the transport address from a peer record. The caller is * responsible for making sure that the address is not deallocated. */ const struct sockaddr *rxrpc_kernel_remote_addr(const struct rxrpc_peer *peer) { return (const struct sockaddr *) (peer ? &peer->srx.transport : &rxrpc_null_addr.transport); } EXPORT_SYMBOL(rxrpc_kernel_remote_addr); |
42 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 | /* SPDX-License-Identifier: GPL-2.0 */ #undef TRACE_SYSTEM #define TRACE_SYSTEM smc #if !defined(_TRACE_SMC_H) || defined(TRACE_HEADER_MULTI_READ) #define _TRACE_SMC_H #include <linux/ipv6.h> #include <linux/tcp.h> #include <linux/tracepoint.h> #include <net/ipv6.h> #include "smc.h" #include "smc_core.h" TRACE_EVENT(smc_switch_to_fallback, TP_PROTO(const struct smc_sock *smc, int fallback_rsn), TP_ARGS(smc, fallback_rsn), TP_STRUCT__entry( __field(const void *, sk) __field(const void *, clcsk) __field(u64, net_cookie) __field(int, fallback_rsn) ), TP_fast_assign( const struct sock *sk = &smc->sk; const struct sock *clcsk = smc->clcsock->sk; __entry->sk = sk; __entry->clcsk = clcsk; __entry->net_cookie = sock_net(sk)->net_cookie; __entry->fallback_rsn = fallback_rsn; ), TP_printk("sk=%p clcsk=%p net=%llu fallback_rsn=%d", __entry->sk, __entry->clcsk, __entry->net_cookie, __entry->fallback_rsn) ); DECLARE_EVENT_CLASS(smc_msg_event, TP_PROTO(const struct smc_sock *smc, size_t len), TP_ARGS(smc, len), TP_STRUCT__entry( __field(const void *, smc) __field(u64, net_cookie) __field(size_t, len) __string(name, smc->conn.lnk->ibname) ), TP_fast_assign( const struct sock *sk = &smc->sk; __entry->smc = smc; __entry->net_cookie = sock_net(sk)->net_cookie; __entry->len = len; __assign_str(name, smc->conn.lnk->ibname); ), TP_printk("smc=%p net=%llu len=%zu dev=%s", __entry->smc, __entry->net_cookie, __entry->len, __get_str(name)) ); DEFINE_EVENT(smc_msg_event, smc_tx_sendmsg, TP_PROTO(const struct smc_sock *smc, size_t len), TP_ARGS(smc, len) ); DEFINE_EVENT(smc_msg_event, smc_rx_recvmsg, TP_PROTO(const struct smc_sock *smc, size_t len), TP_ARGS(smc, len) ); TRACE_EVENT(smcr_link_down, TP_PROTO(const struct smc_link *lnk, void *location), TP_ARGS(lnk, location), TP_STRUCT__entry( __field(const void *, lnk) __field(const void *, lgr) __field(u64, net_cookie) __field(int, state) __string(name, lnk->ibname) __field(void *, location) ), TP_fast_assign( const struct smc_link_group *lgr = lnk->lgr; __entry->lnk = lnk; __entry->lgr = lgr; __entry->net_cookie = lgr->net->net_cookie; __entry->state = lnk->state; __assign_str(name, lnk->ibname); __entry->location = location; ), TP_printk("lnk=%p lgr=%p net=%llu state=%d dev=%s location=%pS", __entry->lnk, __entry->lgr, __entry->net_cookie, __entry->state, __get_str(name), __entry->location) ); #endif /* _TRACE_SMC_H */ #undef TRACE_INCLUDE_PATH #define TRACE_INCLUDE_PATH . #undef TRACE_INCLUDE_FILE #define TRACE_INCLUDE_FILE smc_tracepoint #include <trace/define_trace.h> |
9 9 9 1 1 1 6 7 7 7 6 6 6 6 8 8 2 1 1 1 1 8 3 5 8 8 8 8 8 8 8 6 2 2 6 8 8 2 8 8 5 4 1 1 4 4 4 4 4 4 4 4 4 4 2 2 4 1 3 2 1 1 1 1 3 1 2 8 8 2 4 1 1 2 16 16 16 4 1 5 5 5 5 5 5 1 5 5 5 5 5 5 3 1 2 1 1 1 1 2 1 1 1 1 1 1 1 1 1 1 1 1 1 2 2 1 1 4 1 1 2 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 | // SPDX-License-Identifier: GPL-2.0-only /* * vivid-vid-out.c - video output support functions. * * Copyright 2014 Cisco Systems, Inc. and/or its affiliates. All rights reserved. */ #include <linux/errno.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/videodev2.h> #include <linux/v4l2-dv-timings.h> #include <media/v4l2-common.h> #include <media/v4l2-event.h> #include <media/v4l2-dv-timings.h> #include <media/v4l2-rect.h> #include "vivid-core.h" #include "vivid-vid-common.h" #include "vivid-kthread-out.h" #include "vivid-vid-out.h" static int vid_out_queue_setup(struct vb2_queue *vq, unsigned *nbuffers, unsigned *nplanes, unsigned sizes[], struct device *alloc_devs[]) { struct vivid_dev *dev = vb2_get_drv_priv(vq); const struct vivid_fmt *vfmt = dev->fmt_out; unsigned planes = vfmt->buffers; unsigned h = dev->fmt_out_rect.height; unsigned int size = dev->bytesperline_out[0] * h + vfmt->data_offset[0]; unsigned p; for (p = vfmt->buffers; p < vfmt->planes; p++) size += dev->bytesperline_out[p] * h / vfmt->vdownsampling[p] + vfmt->data_offset[p]; if (dev->field_out == V4L2_FIELD_ALTERNATE) { /* * You cannot use write() with FIELD_ALTERNATE since the field * information (TOP/BOTTOM) cannot be passed to the kernel. */ if (vb2_fileio_is_active(vq)) return -EINVAL; } if (dev->queue_setup_error) { /* * Error injection: test what happens if queue_setup() returns * an error. */ dev->queue_setup_error = false; return -EINVAL; } if (*nplanes) { /* * Check if the number of requested planes match * the number of planes in the current format. You can't mix that. */ if (*nplanes != planes) return -EINVAL; if (sizes[0] < size) return -EINVAL; for (p = 1; p < planes; p++) { if (sizes[p] < dev->bytesperline_out[p] * h + vfmt->data_offset[p]) return -EINVAL; } } else { for (p = 0; p < planes; p++) sizes[p] = p ? dev->bytesperline_out[p] * h + vfmt->data_offset[p] : size; } *nplanes = planes; dprintk(dev, 1, "%s: count=%u\n", __func__, *nbuffers); for (p = 0; p < planes; p++) dprintk(dev, 1, "%s: size[%u]=%u\n", __func__, p, sizes[p]); return 0; } static int vid_out_buf_out_validate(struct vb2_buffer *vb) { struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue); dprintk(dev, 1, "%s\n", __func__); if (dev->field_out != V4L2_FIELD_ALTERNATE) vbuf->field = dev->field_out; else if (vbuf->field != V4L2_FIELD_TOP && vbuf->field != V4L2_FIELD_BOTTOM) return -EINVAL; return 0; } static int vid_out_buf_prepare(struct vb2_buffer *vb) { struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue); const struct vivid_fmt *vfmt = dev->fmt_out; unsigned int planes = vfmt->buffers; unsigned int h = dev->fmt_out_rect.height; unsigned int size = dev->bytesperline_out[0] * h; unsigned p; for (p = vfmt->buffers; p < vfmt->planes; p++) size += dev->bytesperline_out[p] * h / vfmt->vdownsampling[p]; dprintk(dev, 1, "%s\n", __func__); if (WARN_ON(NULL == dev->fmt_out)) return -EINVAL; if (dev->buf_prepare_error) { /* * Error injection: test what happens if buf_prepare() returns * an error. */ dev->buf_prepare_error = false; return -EINVAL; } for (p = 0; p < planes; p++) { if (p) size = dev->bytesperline_out[p] * h; size += vb->planes[p].data_offset; if (vb2_get_plane_payload(vb, p) < size) { dprintk(dev, 1, "%s the payload is too small for plane %u (%lu < %u)\n", __func__, p, vb2_get_plane_payload(vb, p), size); return -EINVAL; } } return 0; } static void vid_out_buf_queue(struct vb2_buffer *vb) { struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue); struct vivid_buffer *buf = container_of(vbuf, struct vivid_buffer, vb); dprintk(dev, 1, "%s\n", __func__); spin_lock(&dev->slock); list_add_tail(&buf->list, &dev->vid_out_active); spin_unlock(&dev->slock); } static int vid_out_start_streaming(struct vb2_queue *vq, unsigned count) { struct vivid_dev *dev = vb2_get_drv_priv(vq); int err; if (vb2_is_streaming(&dev->vb_vid_cap_q)) dev->can_loop_video = vivid_vid_can_loop(dev); dev->vid_out_seq_count = 0; dprintk(dev, 1, "%s\n", __func__); if (dev->start_streaming_error) { dev->start_streaming_error = false; err = -EINVAL; } else { err = vivid_start_generating_vid_out(dev, &dev->vid_out_streaming); } if (err) { struct vivid_buffer *buf, *tmp; list_for_each_entry_safe(buf, tmp, &dev->vid_out_active, list) { list_del(&buf->list); vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_QUEUED); } } return err; } /* abort streaming and wait for last buffer */ static void vid_out_stop_streaming(struct vb2_queue *vq) { struct vivid_dev *dev = vb2_get_drv_priv(vq); dprintk(dev, 1, "%s\n", __func__); vivid_stop_generating_vid_out(dev, &dev->vid_out_streaming); dev->can_loop_video = false; } static void vid_out_buf_request_complete(struct vb2_buffer *vb) { struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue); v4l2_ctrl_request_complete(vb->req_obj.req, &dev->ctrl_hdl_vid_out); } const struct vb2_ops vivid_vid_out_qops = { .queue_setup = vid_out_queue_setup, .buf_out_validate = vid_out_buf_out_validate, .buf_prepare = vid_out_buf_prepare, .buf_queue = vid_out_buf_queue, .start_streaming = vid_out_start_streaming, .stop_streaming = vid_out_stop_streaming, .buf_request_complete = vid_out_buf_request_complete, .wait_prepare = vb2_ops_wait_prepare, .wait_finish = vb2_ops_wait_finish, }; /* * Called whenever the format has to be reset which can occur when * changing outputs, standard, timings, etc. */ void vivid_update_format_out(struct vivid_dev *dev) { struct v4l2_bt_timings *bt = &dev->dv_timings_out.bt; unsigned size, p; u64 pixelclock; switch (dev->output_type[dev->output]) { case SVID: default: dev->field_out = dev->tv_field_out; dev->sink_rect.width = 720; if (dev->std_out & V4L2_STD_525_60) { dev->sink_rect.height = 480; dev->timeperframe_vid_out = (struct v4l2_fract) { 1001, 30000 }; dev->service_set_out = V4L2_SLICED_CAPTION_525; } else { dev->sink_rect.height = 576; dev->timeperframe_vid_out = (struct v4l2_fract) { 1000, 25000 }; dev->service_set_out = V4L2_SLICED_WSS_625 | V4L2_SLICED_TELETEXT_B; } dev->colorspace_out = V4L2_COLORSPACE_SMPTE170M; break; case HDMI: dev->sink_rect.width = bt->width; dev->sink_rect.height = bt->height; size = V4L2_DV_BT_FRAME_WIDTH(bt) * V4L2_DV_BT_FRAME_HEIGHT(bt); if (can_reduce_fps(bt) && (bt->flags & V4L2_DV_FL_REDUCED_FPS)) pixelclock = div_u64(bt->pixelclock * 1000, 1001); else pixelclock = bt->pixelclock; dev->timeperframe_vid_out = (struct v4l2_fract) { size / 100, (u32)pixelclock / 100 }; if (bt->interlaced) dev->field_out = V4L2_FIELD_ALTERNATE; else dev->field_out = V4L2_FIELD_NONE; if (!dev->dvi_d_out && (bt->flags & V4L2_DV_FL_IS_CE_VIDEO)) { if (bt->width == 720 && bt->height <= 576) dev->colorspace_out = V4L2_COLORSPACE_SMPTE170M; else dev->colorspace_out = V4L2_COLORSPACE_REC709; } else { dev->colorspace_out = V4L2_COLORSPACE_SRGB; } break; } dev->xfer_func_out = V4L2_XFER_FUNC_DEFAULT; dev->ycbcr_enc_out = V4L2_YCBCR_ENC_DEFAULT; dev->hsv_enc_out = V4L2_HSV_ENC_180; dev->quantization_out = V4L2_QUANTIZATION_DEFAULT; dev->compose_out = dev->sink_rect; dev->compose_bounds_out = dev->sink_rect; dev->crop_out = dev->compose_out; if (V4L2_FIELD_HAS_T_OR_B(dev->field_out)) dev->crop_out.height /= 2; dev->fmt_out_rect = dev->crop_out; for (p = 0; p < dev->fmt_out->planes; p++) dev->bytesperline_out[p] = (dev->sink_rect.width * dev->fmt_out->bit_depth[p]) / 8; } /* Map the field to something that is valid for the current output */ static enum v4l2_field vivid_field_out(struct vivid_dev *dev, enum v4l2_field field) { if (vivid_is_svid_out(dev)) { switch (field) { case V4L2_FIELD_INTERLACED_TB: case V4L2_FIELD_INTERLACED_BT: case V4L2_FIELD_SEQ_TB: case V4L2_FIELD_SEQ_BT: case V4L2_FIELD_ALTERNATE: return field; case V4L2_FIELD_INTERLACED: default: return V4L2_FIELD_INTERLACED; } } if (vivid_is_hdmi_out(dev)) return dev->dv_timings_out.bt.interlaced ? V4L2_FIELD_ALTERNATE : V4L2_FIELD_NONE; return V4L2_FIELD_NONE; } static enum tpg_pixel_aspect vivid_get_pixel_aspect(const struct vivid_dev *dev) { if (vivid_is_svid_out(dev)) return (dev->std_out & V4L2_STD_525_60) ? TPG_PIXEL_ASPECT_NTSC : TPG_PIXEL_ASPECT_PAL; if (vivid_is_hdmi_out(dev) && dev->sink_rect.width == 720 && dev->sink_rect.height <= 576) return dev->sink_rect.height == 480 ? TPG_PIXEL_ASPECT_NTSC : TPG_PIXEL_ASPECT_PAL; return TPG_PIXEL_ASPECT_SQUARE; } int vivid_g_fmt_vid_out(struct file *file, void *priv, struct v4l2_format *f) { struct vivid_dev *dev = video_drvdata(file); struct v4l2_pix_format_mplane *mp = &f->fmt.pix_mp; const struct vivid_fmt *fmt = dev->fmt_out; unsigned p; mp->width = dev->fmt_out_rect.width; mp->height = dev->fmt_out_rect.height; mp->field = dev->field_out; mp->pixelformat = fmt->fourcc; mp->colorspace = dev->colorspace_out; mp->xfer_func = dev->xfer_func_out; mp->ycbcr_enc = dev->ycbcr_enc_out; mp->quantization = dev->quantization_out; mp->num_planes = fmt->buffers; for (p = 0; p < mp->num_planes; p++) { mp->plane_fmt[p].bytesperline = dev->bytesperline_out[p]; mp->plane_fmt[p].sizeimage = mp->plane_fmt[p].bytesperline * mp->height + fmt->data_offset[p]; } for (p = fmt->buffers; p < fmt->planes; p++) { unsigned stride = dev->bytesperline_out[p]; mp->plane_fmt[0].sizeimage += (stride * mp->height) / fmt->vdownsampling[p]; } return 0; } int vivid_try_fmt_vid_out(struct file *file, void *priv, struct v4l2_format *f) { struct vivid_dev *dev = video_drvdata(file); struct v4l2_bt_timings *bt = &dev->dv_timings_out.bt; struct v4l2_pix_format_mplane *mp = &f->fmt.pix_mp; struct v4l2_plane_pix_format *pfmt = mp->plane_fmt; const struct vivid_fmt *fmt; unsigned bytesperline, max_bpl; unsigned factor = 1; unsigned w, h; unsigned p; fmt = vivid_get_format(dev, mp->pixelformat); if (!fmt) { dprintk(dev, 1, "Fourcc format (0x%08x) unknown.\n", mp->pixelformat); mp->pixelformat = V4L2_PIX_FMT_YUYV; fmt = vivid_get_format(dev, mp->pixelformat); } mp->field = vivid_field_out(dev, mp->field); if (vivid_is_svid_out(dev)) { w = 720; h = (dev->std_out & V4L2_STD_525_60) ? 480 : 576; } else { w = dev->sink_rect.width; h = dev->sink_rect.height; } if (V4L2_FIELD_HAS_T_OR_B(mp->field)) factor = 2; if (!dev->has_scaler_out && !dev->has_crop_out && !dev->has_compose_out) { mp->width = w; mp->height = h / factor; } else { struct v4l2_rect r = { 0, 0, mp->width, mp->height * factor }; v4l2_rect_set_min_size(&r, &vivid_min_rect); v4l2_rect_set_max_size(&r, &vivid_max_rect); if (dev->has_scaler_out && !dev->has_crop_out) { struct v4l2_rect max_r = { 0, 0, MAX_ZOOM * w, MAX_ZOOM * h }; v4l2_rect_set_max_size(&r, &max_r); } else if (!dev->has_scaler_out && dev->has_compose_out && !dev->has_crop_out) { v4l2_rect_set_max_size(&r, &dev->sink_rect); } else if (!dev->has_scaler_out && !dev->has_compose_out) { v4l2_rect_set_min_size(&r, &dev->sink_rect); } mp->width = r.width; mp->height = r.height / factor; } /* This driver supports custom bytesperline values */ mp->num_planes = fmt->buffers; for (p = 0; p < fmt->buffers; p++) { /* Calculate the minimum supported bytesperline value */ bytesperline = (mp->width * fmt->bit_depth[p]) >> 3; /* Calculate the maximum supported bytesperline value */ max_bpl = (MAX_ZOOM * MAX_WIDTH * fmt->bit_depth[p]) >> 3; if (pfmt[p].bytesperline > max_bpl) pfmt[p].bytesperline = max_bpl; if (pfmt[p].bytesperline < bytesperline) pfmt[p].bytesperline = bytesperline; pfmt[p].sizeimage = (pfmt[p].bytesperline * mp->height) / fmt->vdownsampling[p] + fmt->data_offset[p]; memset(pfmt[p].reserved, 0, sizeof(pfmt[p].reserved)); } for (p = fmt->buffers; p < fmt->planes; p++) pfmt[0].sizeimage += (pfmt[0].bytesperline * mp->height * (fmt->bit_depth[p] / fmt->vdownsampling[p])) / (fmt->bit_depth[0] / fmt->vdownsampling[0]); mp->xfer_func = V4L2_XFER_FUNC_DEFAULT; mp->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT; mp->quantization = V4L2_QUANTIZATION_DEFAULT; if (vivid_is_svid_out(dev)) { mp->colorspace = V4L2_COLORSPACE_SMPTE170M; } else if (dev->dvi_d_out || !(bt->flags & V4L2_DV_FL_IS_CE_VIDEO)) { mp->colorspace = V4L2_COLORSPACE_SRGB; if (dev->dvi_d_out) mp->quantization = V4L2_QUANTIZATION_LIM_RANGE; } else if (bt->width == 720 && bt->height <= 576) { mp->colorspace = V4L2_COLORSPACE_SMPTE170M; } else if (mp->colorspace != V4L2_COLORSPACE_SMPTE170M && mp->colorspace != V4L2_COLORSPACE_REC709 && mp->colorspace != V4L2_COLORSPACE_OPRGB && mp->colorspace != V4L2_COLORSPACE_BT2020 && mp->colorspace != V4L2_COLORSPACE_SRGB) { mp->colorspace = V4L2_COLORSPACE_REC709; } memset(mp->reserved, 0, sizeof(mp->reserved)); return 0; } int vivid_s_fmt_vid_out(struct file *file, void *priv, struct v4l2_format *f) { struct v4l2_pix_format_mplane *mp = &f->fmt.pix_mp; struct vivid_dev *dev = video_drvdata(file); struct v4l2_rect *crop = &dev->crop_out; struct v4l2_rect *compose = &dev->compose_out; struct vb2_queue *q = &dev->vb_vid_out_q; int ret = vivid_try_fmt_vid_out(file, priv, f); unsigned factor = 1; unsigned p; if (ret < 0) return ret; if (vb2_is_busy(q) && (vivid_is_svid_out(dev) || mp->width != dev->fmt_out_rect.width || mp->height != dev->fmt_out_rect.height || mp->pixelformat != dev->fmt_out->fourcc || mp->field != dev->field_out)) { dprintk(dev, 1, "%s device busy\n", __func__); return -EBUSY; } /* * Allow for changing the colorspace on the fly. Useful for testing * purposes, and it is something that HDMI transmitters are able * to do. */ if (vb2_is_busy(q)) goto set_colorspace; dev->fmt_out = vivid_get_format(dev, mp->pixelformat); if (V4L2_FIELD_HAS_T_OR_B(mp->field)) factor = 2; if (dev->has_scaler_out || dev->has_crop_out || dev->has_compose_out) { struct v4l2_rect r = { 0, 0, mp->width, mp->height }; if (dev->has_scaler_out) { if (dev->has_crop_out) v4l2_rect_map_inside(crop, &r); else *crop = r; if (dev->has_compose_out && !dev->has_crop_out) { struct v4l2_rect min_r = { 0, 0, r.width / MAX_ZOOM, factor * r.height / MAX_ZOOM }; struct v4l2_rect max_r = { 0, 0, r.width * MAX_ZOOM, factor * r.height * MAX_ZOOM }; v4l2_rect_set_min_size(compose, &min_r); v4l2_rect_set_max_size(compose, &max_r); v4l2_rect_map_inside(compose, &dev->compose_bounds_out); } else if (dev->has_compose_out) { struct v4l2_rect min_r = { 0, 0, crop->width / MAX_ZOOM, factor * crop->height / MAX_ZOOM }; struct v4l2_rect max_r = { 0, 0, crop->width * MAX_ZOOM, factor * crop->height * MAX_ZOOM }; v4l2_rect_set_min_size(compose, &min_r); v4l2_rect_set_max_size(compose, &max_r); v4l2_rect_map_inside(compose, &dev->compose_bounds_out); } } else if (dev->has_compose_out && !dev->has_crop_out) { v4l2_rect_set_size_to(crop, &r); r.height *= factor; v4l2_rect_set_size_to(compose, &r); v4l2_rect_map_inside(compose, &dev->compose_bounds_out); } else if (!dev->has_compose_out) { v4l2_rect_map_inside(crop, &r); r.height /= factor; v4l2_rect_set_size_to(compose, &r); } else { r.height *= factor; v4l2_rect_set_max_size(compose, &r); v4l2_rect_map_inside(compose, &dev->compose_bounds_out); crop->top *= factor; crop->height *= factor; v4l2_rect_set_size_to(crop, compose); v4l2_rect_map_inside(crop, &r); crop->top /= factor; crop->height /= factor; } } else { struct v4l2_rect r = { 0, 0, mp->width, mp->height }; v4l2_rect_set_size_to(crop, &r); r.height /= factor; v4l2_rect_set_size_to(compose, &r); } dev->fmt_out_rect.width = mp->width; dev->fmt_out_rect.height = mp->height; for (p = 0; p < mp->num_planes; p++) dev->bytesperline_out[p] = mp->plane_fmt[p].bytesperline; for (p = dev->fmt_out->buffers; p < dev->fmt_out->planes; p++) dev->bytesperline_out[p] = (dev->bytesperline_out[0] * dev->fmt_out->bit_depth[p]) / dev->fmt_out->bit_depth[0]; dev->field_out = mp->field; if (vivid_is_svid_out(dev)) dev->tv_field_out = mp->field; set_colorspace: dev->colorspace_out = mp->colorspace; dev->xfer_func_out = mp->xfer_func; dev->ycbcr_enc_out = mp->ycbcr_enc; dev->quantization_out = mp->quantization; if (dev->loop_video) { vivid_send_source_change(dev, SVID); vivid_send_source_change(dev, HDMI); } return 0; } int vidioc_g_fmt_vid_out_mplane(struct file *file, void *priv, struct v4l2_format *f) { struct vivid_dev *dev = video_drvdata(file); if (!dev->multiplanar) return -ENOTTY; return vivid_g_fmt_vid_out(file, priv, f); } int vidioc_try_fmt_vid_out_mplane(struct file *file, void *priv, struct v4l2_format *f) { struct vivid_dev *dev = video_drvdata(file); if (!dev->multiplanar) return -ENOTTY; return vivid_try_fmt_vid_out(file, priv, f); } int vidioc_s_fmt_vid_out_mplane(struct file *file, void *priv, struct v4l2_format *f) { struct vivid_dev *dev = video_drvdata(file); if (!dev->multiplanar) return -ENOTTY; return vivid_s_fmt_vid_out(file, priv, f); } int vidioc_g_fmt_vid_out(struct file *file, void *priv, struct v4l2_format *f) { struct vivid_dev *dev = video_drvdata(file); if (dev->multiplanar) return -ENOTTY; return fmt_sp2mp_func(file, priv, f, vivid_g_fmt_vid_out); } int vidioc_try_fmt_vid_out(struct file *file, void *priv, struct v4l2_format *f) { struct vivid_dev *dev = video_drvdata(file); if (dev->multiplanar) return -ENOTTY; return fmt_sp2mp_func(file, priv, f, vivid_try_fmt_vid_out); } int vidioc_s_fmt_vid_out(struct file *file, void *priv, struct v4l2_format *f) { struct vivid_dev *dev = video_drvdata(file); if (dev->multiplanar) return -ENOTTY; return fmt_sp2mp_func(file, priv, f, vivid_s_fmt_vid_out); } int vivid_vid_out_g_selection(struct file *file, void *priv, struct v4l2_selection *sel) { struct vivid_dev *dev = video_drvdata(file); if (!dev->has_crop_out && !dev->has_compose_out) return -ENOTTY; if (sel->type != V4L2_BUF_TYPE_VIDEO_OUTPUT) return -EINVAL; sel->r.left = sel->r.top = 0; switch (sel->target) { case V4L2_SEL_TGT_CROP: if (!dev->has_crop_out) return -EINVAL; sel->r = dev->crop_out; break; case V4L2_SEL_TGT_CROP_DEFAULT: if (!dev->has_crop_out) return -EINVAL; sel->r = dev->fmt_out_rect; break; case V4L2_SEL_TGT_CROP_BOUNDS: if (!dev->has_crop_out) return -EINVAL; sel->r = vivid_max_rect; break; case V4L2_SEL_TGT_COMPOSE: if (!dev->has_compose_out) return -EINVAL; sel->r = dev->compose_out; break; case V4L2_SEL_TGT_COMPOSE_DEFAULT: case V4L2_SEL_TGT_COMPOSE_BOUNDS: if (!dev->has_compose_out) return -EINVAL; sel->r = dev->sink_rect; break; default: return -EINVAL; } return 0; } int vivid_vid_out_s_selection(struct file *file, void *fh, struct v4l2_selection *s) { struct vivid_dev *dev = video_drvdata(file); struct v4l2_rect *crop = &dev->crop_out; struct v4l2_rect *compose = &dev->compose_out; unsigned factor = V4L2_FIELD_HAS_T_OR_B(dev->field_out) ? 2 : 1; int ret; if (!dev->has_crop_out && !dev->has_compose_out) return -ENOTTY; if (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT) return -EINVAL; switch (s->target) { case V4L2_SEL_TGT_CROP: if (!dev->has_crop_out) return -EINVAL; ret = vivid_vid_adjust_sel(s->flags, &s->r); if (ret) return ret; v4l2_rect_set_min_size(&s->r, &vivid_min_rect); v4l2_rect_set_max_size(&s->r, &dev->fmt_out_rect); if (dev->has_scaler_out) { struct v4l2_rect max_rect = { 0, 0, dev->sink_rect.width * MAX_ZOOM, (dev->sink_rect.height / factor) * MAX_ZOOM }; v4l2_rect_set_max_size(&s->r, &max_rect); if (dev->has_compose_out) { struct v4l2_rect min_rect = { 0, 0, s->r.width / MAX_ZOOM, (s->r.height * factor) / MAX_ZOOM }; struct v4l2_rect max_rect = { 0, 0, s->r.width * MAX_ZOOM, (s->r.height * factor) * MAX_ZOOM }; v4l2_rect_set_min_size(compose, &min_rect); v4l2_rect_set_max_size(compose, &max_rect); v4l2_rect_map_inside(compose, &dev->compose_bounds_out); } } else if (dev->has_compose_out) { s->r.top *= factor; s->r.height *= factor; v4l2_rect_set_max_size(&s->r, &dev->sink_rect); v4l2_rect_set_size_to(compose, &s->r); v4l2_rect_map_inside(compose, &dev->compose_bounds_out); s->r.top /= factor; s->r.height /= factor; } else { v4l2_rect_set_size_to(&s->r, &dev->sink_rect); s->r.height /= factor; } v4l2_rect_map_inside(&s->r, &dev->fmt_out_rect); *crop = s->r; break; case V4L2_SEL_TGT_COMPOSE: if (!dev->has_compose_out) return -EINVAL; ret = vivid_vid_adjust_sel(s->flags, &s->r); if (ret) return ret; v4l2_rect_set_min_size(&s->r, &vivid_min_rect); v4l2_rect_set_max_size(&s->r, &dev->sink_rect); v4l2_rect_map_inside(&s->r, &dev->compose_bounds_out); s->r.top /= factor; s->r.height /= factor; if (dev->has_scaler_out) { struct v4l2_rect fmt = dev->fmt_out_rect; struct v4l2_rect max_rect = { 0, 0, s->r.width * MAX_ZOOM, s->r.height * MAX_ZOOM }; struct v4l2_rect min_rect = { 0, 0, s->r.width / MAX_ZOOM, s->r.height / MAX_ZOOM }; v4l2_rect_set_min_size(&fmt, &min_rect); if (!dev->has_crop_out) v4l2_rect_set_max_size(&fmt, &max_rect); if (!v4l2_rect_same_size(&dev->fmt_out_rect, &fmt) && vb2_is_busy(&dev->vb_vid_out_q)) return -EBUSY; if (dev->has_crop_out) { v4l2_rect_set_min_size(crop, &min_rect); v4l2_rect_set_max_size(crop, &max_rect); } dev->fmt_out_rect = fmt; } else if (dev->has_crop_out) { struct v4l2_rect fmt = dev->fmt_out_rect; v4l2_rect_set_min_size(&fmt, &s->r); if (!v4l2_rect_same_size(&dev->fmt_out_rect, &fmt) && vb2_is_busy(&dev->vb_vid_out_q)) return -EBUSY; dev->fmt_out_rect = fmt; v4l2_rect_set_size_to(crop, &s->r); v4l2_rect_map_inside(crop, &dev->fmt_out_rect); } else { if (!v4l2_rect_same_size(&s->r, &dev->fmt_out_rect) && vb2_is_busy(&dev->vb_vid_out_q)) return -EBUSY; v4l2_rect_set_size_to(&dev->fmt_out_rect, &s->r); v4l2_rect_set_size_to(crop, &s->r); crop->height /= factor; v4l2_rect_map_inside(crop, &dev->fmt_out_rect); } s->r.top *= factor; s->r.height *= factor; *compose = s->r; break; default: return -EINVAL; } return 0; } int vivid_vid_out_g_pixelaspect(struct file *file, void *priv, int type, struct v4l2_fract *f) { struct vivid_dev *dev = video_drvdata(file); if (type != V4L2_BUF_TYPE_VIDEO_OUTPUT) return -EINVAL; switch (vivid_get_pixel_aspect(dev)) { case TPG_PIXEL_ASPECT_NTSC: f->numerator = 11; f->denominator = 10; break; case TPG_PIXEL_ASPECT_PAL: f->numerator = 54; f->denominator = 59; break; default: break; } return 0; } int vidioc_g_fmt_vid_out_overlay(struct file *file, void *priv, struct v4l2_format *f) { struct vivid_dev *dev = video_drvdata(file); const struct v4l2_rect *compose = &dev->compose_out; struct v4l2_window *win = &f->fmt.win; if (!dev->has_fb) return -EINVAL; win->w.top = dev->overlay_out_top; win->w.left = dev->overlay_out_left; win->w.width = compose->width; win->w.height = compose->height; win->field = V4L2_FIELD_ANY; win->chromakey = dev->chromakey_out; win->global_alpha = dev->global_alpha_out; return 0; } int vidioc_try_fmt_vid_out_overlay(struct file *file, void *priv, struct v4l2_format *f) { struct vivid_dev *dev = video_drvdata(file); const struct v4l2_rect *compose = &dev->compose_out; struct v4l2_window *win = &f->fmt.win; if (!dev->has_fb) return -EINVAL; win->w.left = clamp_t(int, win->w.left, -dev->display_width, dev->display_width); win->w.top = clamp_t(int, win->w.top, -dev->display_height, dev->display_height); win->w.width = compose->width; win->w.height = compose->height; /* * It makes no sense for an OSD to overlay only top or bottom fields, * so always set this to ANY. */ win->field = V4L2_FIELD_ANY; return 0; } int vidioc_s_fmt_vid_out_overlay(struct file *file, void *priv, struct v4l2_format *f) { struct vivid_dev *dev = video_drvdata(file); struct v4l2_window *win = &f->fmt.win; int ret = vidioc_try_fmt_vid_out_overlay(file, priv, f); if (ret) return ret; dev->overlay_out_top = win->w.top; dev->overlay_out_left = win->w.left; dev->chromakey_out = win->chromakey; dev->global_alpha_out = win->global_alpha; return ret; } int vivid_vid_out_overlay(struct file *file, void *fh, unsigned i) { struct vivid_dev *dev = video_drvdata(file); if (i && !dev->fmt_out->can_do_overlay) { dprintk(dev, 1, "unsupported output format for output overlay\n"); return -EINVAL; } dev->overlay_out_enabled = i; return 0; } int vivid_vid_out_g_fbuf(struct file *file, void *fh, struct v4l2_framebuffer *a) { struct vivid_dev *dev = video_drvdata(file); a->capability = V4L2_FBUF_CAP_EXTERNOVERLAY | V4L2_FBUF_CAP_CHROMAKEY | V4L2_FBUF_CAP_SRC_CHROMAKEY | V4L2_FBUF_CAP_GLOBAL_ALPHA | V4L2_FBUF_CAP_LOCAL_ALPHA | V4L2_FBUF_CAP_LOCAL_INV_ALPHA; a->flags = V4L2_FBUF_FLAG_OVERLAY | dev->fbuf_out_flags; a->base = (void *)dev->video_pbase; a->fmt.width = dev->display_width; a->fmt.height = dev->display_height; if (dev->fb_defined.green.length == 5) a->fmt.pixelformat = V4L2_PIX_FMT_ARGB555; else a->fmt.pixelformat = V4L2_PIX_FMT_RGB565; a->fmt.bytesperline = dev->display_byte_stride; a->fmt.sizeimage = a->fmt.height * a->fmt.bytesperline; a->fmt.field = V4L2_FIELD_NONE; a->fmt.colorspace = V4L2_COLORSPACE_SRGB; a->fmt.priv = 0; return 0; } int vivid_vid_out_s_fbuf(struct file *file, void *fh, const struct v4l2_framebuffer *a) { struct vivid_dev *dev = video_drvdata(file); const unsigned chroma_flags = V4L2_FBUF_FLAG_CHROMAKEY | V4L2_FBUF_FLAG_SRC_CHROMAKEY; const unsigned alpha_flags = V4L2_FBUF_FLAG_GLOBAL_ALPHA | V4L2_FBUF_FLAG_LOCAL_ALPHA | V4L2_FBUF_FLAG_LOCAL_INV_ALPHA; if ((a->flags & chroma_flags) == chroma_flags) return -EINVAL; switch (a->flags & alpha_flags) { case 0: case V4L2_FBUF_FLAG_GLOBAL_ALPHA: case V4L2_FBUF_FLAG_LOCAL_ALPHA: case V4L2_FBUF_FLAG_LOCAL_INV_ALPHA: break; default: return -EINVAL; } dev->fbuf_out_flags &= ~(chroma_flags | alpha_flags); dev->fbuf_out_flags |= a->flags & (chroma_flags | alpha_flags); return 0; } static const struct v4l2_audioout vivid_audio_outputs[] = { { 0, "Line-Out 1" }, { 1, "Line-Out 2" }, }; int vidioc_enum_output(struct file *file, void *priv, struct v4l2_output *out) { struct vivid_dev *dev = video_drvdata(file); if (out->index >= dev->num_outputs) return -EINVAL; out->type = V4L2_OUTPUT_TYPE_ANALOG; switch (dev->output_type[out->index]) { case SVID: snprintf(out->name, sizeof(out->name), "S-Video %u", dev->output_name_counter[out->index]); out->std = V4L2_STD_ALL; if (dev->has_audio_outputs) out->audioset = (1 << ARRAY_SIZE(vivid_audio_outputs)) - 1; out->capabilities = V4L2_OUT_CAP_STD; break; case HDMI: snprintf(out->name, sizeof(out->name), "HDMI %u", dev->output_name_counter[out->index]); out->capabilities = V4L2_OUT_CAP_DV_TIMINGS; break; } return 0; } int vidioc_g_output(struct file *file, void *priv, unsigned *o) { struct vivid_dev *dev = video_drvdata(file); *o = dev->output; return 0; } int vidioc_s_output(struct file *file, void *priv, unsigned o) { struct vivid_dev *dev = video_drvdata(file); if (o >= dev->num_outputs) return -EINVAL; if (o == dev->output) return 0; if (vb2_is_busy(&dev->vb_vid_out_q) || vb2_is_busy(&dev->vb_vbi_out_q) || vb2_is_busy(&dev->vb_meta_out_q)) return -EBUSY; dev->output = o; dev->tv_audio_output = 0; if (dev->output_type[o] == SVID) dev->vid_out_dev.tvnorms = V4L2_STD_ALL; else dev->vid_out_dev.tvnorms = 0; dev->vbi_out_dev.tvnorms = dev->vid_out_dev.tvnorms; dev->meta_out_dev.tvnorms = dev->vid_out_dev.tvnorms; vivid_update_format_out(dev); v4l2_ctrl_activate(dev->ctrl_display_present, vivid_is_hdmi_out(dev)); if (vivid_is_hdmi_out(dev)) v4l2_ctrl_s_ctrl(dev->ctrl_display_present, dev->display_present[dev->output]); return 0; } int vidioc_enumaudout(struct file *file, void *fh, struct v4l2_audioout *vout) { if (vout->index >= ARRAY_SIZE(vivid_audio_outputs)) return -EINVAL; *vout = vivid_audio_outputs[vout->index]; return 0; } int vidioc_g_audout(struct file *file, void *fh, struct v4l2_audioout *vout) { struct vivid_dev *dev = video_drvdata(file); if (!vivid_is_svid_out(dev)) return -EINVAL; *vout = vivid_audio_outputs[dev->tv_audio_output]; return 0; } int vidioc_s_audout(struct file *file, void *fh, const struct v4l2_audioout *vout) { struct vivid_dev *dev = video_drvdata(file); if (!vivid_is_svid_out(dev)) return -EINVAL; if (vout->index >= ARRAY_SIZE(vivid_audio_outputs)) return -EINVAL; dev->tv_audio_output = vout->index; return 0; } int vivid_vid_out_s_std(struct file *file, void *priv, v4l2_std_id id) { struct vivid_dev *dev = video_drvdata(file); if (!vivid_is_svid_out(dev)) return -ENODATA; if (dev->std_out == id) return 0; if (vb2_is_busy(&dev->vb_vid_out_q) || vb2_is_busy(&dev->vb_vbi_out_q)) return -EBUSY; dev->std_out = id; vivid_update_format_out(dev); return 0; } static bool valid_cvt_gtf_timings(struct v4l2_dv_timings *timings) { struct v4l2_bt_timings *bt = &timings->bt; if ((bt->standards & (V4L2_DV_BT_STD_CVT | V4L2_DV_BT_STD_GTF)) && v4l2_valid_dv_timings(timings, &vivid_dv_timings_cap, NULL, NULL)) return true; return false; } int vivid_vid_out_s_dv_timings(struct file *file, void *_fh, struct v4l2_dv_timings *timings) { struct vivid_dev *dev = video_drvdata(file); if (!vivid_is_hdmi_out(dev)) return -ENODATA; if (!v4l2_find_dv_timings_cap(timings, &vivid_dv_timings_cap, 0, NULL, NULL) && !valid_cvt_gtf_timings(timings)) return -EINVAL; if (v4l2_match_dv_timings(timings, &dev->dv_timings_out, 0, true)) return 0; if (vb2_is_busy(&dev->vb_vid_out_q)) return -EBUSY; dev->dv_timings_out = *timings; vivid_update_format_out(dev); return 0; } int vivid_vid_out_g_parm(struct file *file, void *priv, struct v4l2_streamparm *parm) { struct vivid_dev *dev = video_drvdata(file); if (parm->type != (dev->multiplanar ? V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE : V4L2_BUF_TYPE_VIDEO_OUTPUT)) return -EINVAL; parm->parm.output.capability = V4L2_CAP_TIMEPERFRAME; parm->parm.output.timeperframe = dev->timeperframe_vid_out; parm->parm.output.writebuffers = 1; return 0; } int vidioc_subscribe_event(struct v4l2_fh *fh, const struct v4l2_event_subscription *sub) { switch (sub->type) { case V4L2_EVENT_SOURCE_CHANGE: if (fh->vdev->vfl_dir == VFL_DIR_RX) return v4l2_src_change_event_subscribe(fh, sub); break; default: return v4l2_ctrl_subscribe_event(fh, sub); } return -EINVAL; } |
2 8 3 16 17 11 7 8 4 4 3 3 2 2 2 2 2 2 2 2 2 2 4 4 2 2 2 3 5 4 2 5 2 2 2 2 2 2 2 2 2 4 4 4 4 4 4 3 3 4 2 4 2 4 4 2 4 3 3 2 2 2 2 2 2 2 2 2 2 2 1 4 4 4 2 4 2 2 3 4 4 4 2 2 3 4 3 3 3 4 4 2 2 21 1 1 19 18 8 7 8 8 8 8 8 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 16 16 12 1 2 2 2 3 3 3 2 2 2 2 2 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 | // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES. * * The iopt_pages is the center of the storage and motion of PFNs. Each * iopt_pages represents a logical linear array of full PFNs. The array is 0 * based and has npages in it. Accessors use 'index' to refer to the entry in * this logical array, regardless of its storage location. * * PFNs are stored in a tiered scheme: * 1) iopt_pages::pinned_pfns xarray * 2) An iommu_domain * 3) The origin of the PFNs, i.e. the userspace pointer * * PFN have to be copied between all combinations of tiers, depending on the * configuration. * * When a PFN is taken out of the userspace pointer it is pinned exactly once. * The storage locations of the PFN's index are tracked in the two interval * trees. If no interval includes the index then it is not pinned. * * If access_itree includes the PFN's index then an in-kernel access has * requested the page. The PFN is stored in the xarray so other requestors can * continue to find it. * * If the domains_itree includes the PFN's index then an iommu_domain is storing * the PFN and it can be read back using iommu_iova_to_phys(). To avoid * duplicating storage the xarray is not used if only iommu_domains are using * the PFN's index. * * As a general principle this is designed so that destroy never fails. This * means removing an iommu_domain or releasing a in-kernel access will not fail * due to insufficient memory. In practice this means some cases have to hold * PFNs in the xarray even though they are also being stored in an iommu_domain. * * While the iopt_pages can use an iommu_domain as storage, it does not have an * IOVA itself. Instead the iopt_area represents a range of IOVA and uses the * iopt_pages as the PFN provider. Multiple iopt_areas can share the iopt_pages * and reference their own slice of the PFN array, with sub page granularity. * * In this file the term 'last' indicates an inclusive and closed interval, eg * [0,0] refers to a single PFN. 'end' means an open range, eg [0,0) refers to * no PFNs. * * Be cautious of overflow. An IOVA can go all the way up to U64_MAX, so * last_iova + 1 can overflow. An iopt_pages index will always be much less than * ULONG_MAX so last_index + 1 cannot overflow. */ #include <linux/overflow.h> #include <linux/slab.h> #include <linux/iommu.h> #include <linux/sched/mm.h> #include <linux/highmem.h> #include <linux/kthread.h> #include <linux/iommufd.h> #include "io_pagetable.h" #include "double_span.h" #ifndef CONFIG_IOMMUFD_TEST #define TEMP_MEMORY_LIMIT 65536 #else #define TEMP_MEMORY_LIMIT iommufd_test_memory_limit #endif #define BATCH_BACKUP_SIZE 32 /* * More memory makes pin_user_pages() and the batching more efficient, but as * this is only a performance optimization don't try too hard to get it. A 64k * allocation can hold about 26M of 4k pages and 13G of 2M pages in an * pfn_batch. Various destroy paths cannot fail and provide a small amount of * stack memory as a backup contingency. If backup_len is given this cannot * fail. */ static void *temp_kmalloc(size_t *size, void *backup, size_t backup_len) { void *res; if (WARN_ON(*size == 0)) return NULL; if (*size < backup_len) return backup; if (!backup && iommufd_should_fail()) return NULL; *size = min_t(size_t, *size, TEMP_MEMORY_LIMIT); res = kmalloc(*size, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY); if (res) return res; *size = PAGE_SIZE; if (backup_len) { res = kmalloc(*size, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY); if (res) return res; *size = backup_len; return backup; } return kmalloc(*size, GFP_KERNEL); } void interval_tree_double_span_iter_update( struct interval_tree_double_span_iter *iter) { unsigned long last_hole = ULONG_MAX; unsigned int i; for (i = 0; i != ARRAY_SIZE(iter->spans); i++) { if (interval_tree_span_iter_done(&iter->spans[i])) { iter->is_used = -1; return; } if (iter->spans[i].is_hole) { last_hole = min(last_hole, iter->spans[i].last_hole); continue; } iter->is_used = i + 1; iter->start_used = iter->spans[i].start_used; iter->last_used = min(iter->spans[i].last_used, last_hole); return; } iter->is_used = 0; iter->start_hole = iter->spans[0].start_hole; iter->last_hole = min(iter->spans[0].last_hole, iter->spans[1].last_hole); } void interval_tree_double_span_iter_first( struct interval_tree_double_span_iter *iter, struct rb_root_cached *itree1, struct rb_root_cached *itree2, unsigned long first_index, unsigned long last_index) { unsigned int i; iter->itrees[0] = itree1; iter->itrees[1] = itree2; for (i = 0; i != ARRAY_SIZE(iter->spans); i++) interval_tree_span_iter_first(&iter->spans[i], iter->itrees[i], first_index, last_index); interval_tree_double_span_iter_update(iter); } void interval_tree_double_span_iter_next( struct interval_tree_double_span_iter *iter) { unsigned int i; if (iter->is_used == -1 || iter->last_hole == iter->spans[0].last_index) { iter->is_used = -1; return; } for (i = 0; i != ARRAY_SIZE(iter->spans); i++) interval_tree_span_iter_advance( &iter->spans[i], iter->itrees[i], iter->last_hole + 1); interval_tree_double_span_iter_update(iter); } static void iopt_pages_add_npinned(struct iopt_pages *pages, size_t npages) { int rc; rc = check_add_overflow(pages->npinned, npages, &pages->npinned); if (IS_ENABLED(CONFIG_IOMMUFD_TEST)) WARN_ON(rc || pages->npinned > pages->npages); } static void iopt_pages_sub_npinned(struct iopt_pages *pages, size_t npages) { int rc; rc = check_sub_overflow(pages->npinned, npages, &pages->npinned); if (IS_ENABLED(CONFIG_IOMMUFD_TEST)) WARN_ON(rc || pages->npinned > pages->npages); } static void iopt_pages_err_unpin(struct iopt_pages *pages, unsigned long start_index, unsigned long last_index, struct page **page_list) { unsigned long npages = last_index - start_index + 1; unpin_user_pages(page_list, npages); iopt_pages_sub_npinned(pages, npages); } /* * index is the number of PAGE_SIZE units from the start of the area's * iopt_pages. If the iova is sub page-size then the area has an iova that * covers a portion of the first and last pages in the range. */ static unsigned long iopt_area_index_to_iova(struct iopt_area *area, unsigned long index) { if (IS_ENABLED(CONFIG_IOMMUFD_TEST)) WARN_ON(index < iopt_area_index(area) || index > iopt_area_last_index(area)); index -= iopt_area_index(area); if (index == 0) return iopt_area_iova(area); return iopt_area_iova(area) - area->page_offset + index * PAGE_SIZE; } static unsigned long iopt_area_index_to_iova_last(struct iopt_area *area, unsigned long index) { if (IS_ENABLED(CONFIG_IOMMUFD_TEST)) WARN_ON(index < iopt_area_index(area) || index > iopt_area_last_index(area)); if (index == iopt_area_last_index(area)) return iopt_area_last_iova(area); return iopt_area_iova(area) - area->page_offset + (index - iopt_area_index(area) + 1) * PAGE_SIZE - 1; } static void iommu_unmap_nofail(struct iommu_domain *domain, unsigned long iova, size_t size) { size_t ret; ret = iommu_unmap(domain, iova, size); /* * It is a logic error in this code or a driver bug if the IOMMU unmaps * something other than exactly as requested. This implies that the * iommu driver may not fail unmap for reasons beyond bad agruments. * Particularly, the iommu driver may not do a memory allocation on the * unmap path. */ WARN_ON(ret != size); } static void iopt_area_unmap_domain_range(struct iopt_area *area, struct iommu_domain *domain, unsigned long start_index, unsigned long last_index) { unsigned long start_iova = iopt_area_index_to_iova(area, start_index); iommu_unmap_nofail(domain, start_iova, iopt_area_index_to_iova_last(area, last_index) - start_iova + 1); } static struct iopt_area *iopt_pages_find_domain_area(struct iopt_pages *pages, unsigned long index) { struct interval_tree_node *node; node = interval_tree_iter_first(&pages->domains_itree, index, index); if (!node) return NULL; return container_of(node, struct iopt_area, pages_node); } /* * A simple datastructure to hold a vector of PFNs, optimized for contiguous * PFNs. This is used as a temporary holding memory for shuttling pfns from one * place to another. Generally everything is made more efficient if operations * work on the largest possible grouping of pfns. eg fewer lock/unlock cycles, * better cache locality, etc */ struct pfn_batch { unsigned long *pfns; u32 *npfns; unsigned int array_size; unsigned int end; unsigned int total_pfns; }; static void batch_clear(struct pfn_batch *batch) { batch->total_pfns = 0; batch->end = 0; batch->pfns[0] = 0; batch->npfns[0] = 0; } /* * Carry means we carry a portion of the final hugepage over to the front of the * batch */ static void batch_clear_carry(struct pfn_batch *batch, unsigned int keep_pfns) { if (!keep_pfns) return batch_clear(batch); if (IS_ENABLED(CONFIG_IOMMUFD_TEST)) WARN_ON(!batch->end || batch->npfns[batch->end - 1] < keep_pfns); batch->total_pfns = keep_pfns; batch->pfns[0] = batch->pfns[batch->end - 1] + (batch->npfns[batch->end - 1] - keep_pfns); batch->npfns[0] = keep_pfns; batch->end = 1; } static void batch_skip_carry(struct pfn_batch *batch, unsigned int skip_pfns) { if (!batch->total_pfns) return; if (IS_ENABLED(CONFIG_IOMMUFD_TEST)) WARN_ON(batch->total_pfns != batch->npfns[0]); skip_pfns = min(batch->total_pfns, skip_pfns); batch->pfns[0] += skip_pfns; batch->npfns[0] -= skip_pfns; batch->total_pfns -= skip_pfns; } static int __batch_init(struct pfn_batch *batch, size_t max_pages, void *backup, size_t backup_len) { const size_t elmsz = sizeof(*batch->pfns) + sizeof(*batch->npfns); size_t size = max_pages * elmsz; batch->pfns = temp_kmalloc(&size, backup, backup_len); if (!batch->pfns) return -ENOMEM; if (IS_ENABLED(CONFIG_IOMMUFD_TEST) && WARN_ON(size < elmsz)) return -EINVAL; batch->array_size = size / elmsz; batch->npfns = (u32 *)(batch->pfns + batch->array_size); batch_clear(batch); return 0; } static int batch_init(struct pfn_batch *batch, size_t max_pages) { return __batch_init(batch, max_pages, NULL, 0); } static void batch_init_backup(struct pfn_batch *batch, size_t max_pages, void *backup, size_t backup_len) { __batch_init(batch, max_pages, backup, backup_len); } static void batch_destroy(struct pfn_batch *batch, void *backup) { if (batch->pfns != backup) kfree(batch->pfns); } /* true if the pfn was added, false otherwise */ static bool batch_add_pfn(struct pfn_batch *batch, unsigned long pfn) { const unsigned int MAX_NPFNS = type_max(typeof(*batch->npfns)); if (batch->end && pfn == batch->pfns[batch->end - 1] + batch->npfns[batch->end - 1] && batch->npfns[batch->end - 1] != MAX_NPFNS) { batch->npfns[batch->end - 1]++; batch->total_pfns++; return true; } if (batch->end == batch->array_size) return false; batch->total_pfns++; batch->pfns[batch->end] = pfn; batch->npfns[batch->end] = 1; batch->end++; return true; } /* * Fill the batch with pfns from the domain. When the batch is full, or it * reaches last_index, the function will return. The caller should use * batch->total_pfns to determine the starting point for the next iteration. */ static void batch_from_domain(struct pfn_batch *batch, struct iommu_domain *domain, struct iopt_area *area, unsigned long start_index, unsigned long last_index) { unsigned int page_offset = 0; unsigned long iova; phys_addr_t phys; iova = iopt_area_index_to_iova(area, start_index); if (start_index == iopt_area_index(area)) page_offset = area->page_offset; while (start_index <= last_index) { /* * This is pretty slow, it would be nice to get the page size * back from the driver, or have the driver directly fill the * batch. */ phys = iommu_iova_to_phys(domain, iova) - page_offset; if (!batch_add_pfn(batch, PHYS_PFN(phys))) return; iova += PAGE_SIZE - page_offset; page_offset = 0; start_index++; } } static struct page **raw_pages_from_domain(struct iommu_domain *domain, struct iopt_area *area, unsigned long start_index, unsigned long last_index, struct page **out_pages) { unsigned int page_offset = 0; unsigned long iova; phys_addr_t phys; iova = iopt_area_index_to_iova(area, start_index); if (start_index == iopt_area_index(area)) page_offset = area->page_offset; while (start_index <= last_index) { phys = iommu_iova_to_phys(domain, iova) - page_offset; *(out_pages++) = pfn_to_page(PHYS_PFN(phys)); iova += PAGE_SIZE - page_offset; page_offset = 0; start_index++; } return out_pages; } /* Continues reading a domain until we reach a discontinuity in the pfns. */ static void batch_from_domain_continue(struct pfn_batch *batch, struct iommu_domain *domain, struct iopt_area *area, unsigned long start_index, unsigned long last_index) { unsigned int array_size = batch->array_size; batch->array_size = batch->end; batch_from_domain(batch, domain, area, start_index, last_index); batch->array_size = array_size; } /* * This is part of the VFIO compatibility support for VFIO_TYPE1_IOMMU. That * mode permits splitting a mapped area up, and then one of the splits is * unmapped. Doing this normally would cause us to violate our invariant of * pairing map/unmap. Thus, to support old VFIO compatibility disable support * for batching consecutive PFNs. All PFNs mapped into the iommu are done in * PAGE_SIZE units, not larger or smaller. */ static int batch_iommu_map_small(struct iommu_domain *domain, unsigned long iova, phys_addr_t paddr, size_t size, int prot) { unsigned long start_iova = iova; int rc; if (IS_ENABLED(CONFIG_IOMMUFD_TEST)) WARN_ON(paddr % PAGE_SIZE || iova % PAGE_SIZE || size % PAGE_SIZE); while (size) { rc = iommu_map(domain, iova, paddr, PAGE_SIZE, prot, GFP_KERNEL_ACCOUNT); if (rc) goto err_unmap; iova += PAGE_SIZE; paddr += PAGE_SIZE; size -= PAGE_SIZE; } return 0; err_unmap: if (start_iova != iova) iommu_unmap_nofail(domain, start_iova, iova - start_iova); return rc; } static int batch_to_domain(struct pfn_batch *batch, struct iommu_domain *domain, struct iopt_area *area, unsigned long start_index) { bool disable_large_pages = area->iopt->disable_large_pages; unsigned long last_iova = iopt_area_last_iova(area); unsigned int page_offset = 0; unsigned long start_iova; unsigned long next_iova; unsigned int cur = 0; unsigned long iova; int rc; /* The first index might be a partial page */ if (start_index == iopt_area_index(area)) page_offset = area->page_offset; next_iova = iova = start_iova = iopt_area_index_to_iova(area, start_index); while (cur < batch->end) { next_iova = min(last_iova + 1, next_iova + batch->npfns[cur] * PAGE_SIZE - page_offset); if (disable_large_pages) rc = batch_iommu_map_small( domain, iova, PFN_PHYS(batch->pfns[cur]) + page_offset, next_iova - iova, area->iommu_prot); else rc = iommu_map(domain, iova, PFN_PHYS(batch->pfns[cur]) + page_offset, next_iova - iova, area->iommu_prot, GFP_KERNEL_ACCOUNT); if (rc) goto err_unmap; iova = next_iova; page_offset = 0; cur++; } return 0; err_unmap: if (start_iova != iova) iommu_unmap_nofail(domain, start_iova, iova - start_iova); return rc; } static void batch_from_xarray(struct pfn_batch *batch, struct xarray *xa, unsigned long start_index, unsigned long last_index) { XA_STATE(xas, xa, start_index); void *entry; rcu_read_lock(); while (true) { entry = xas_next(&xas); if (xas_retry(&xas, entry)) continue; WARN_ON(!xa_is_value(entry)); if (!batch_add_pfn(batch, xa_to_value(entry)) || start_index == last_index) break; start_index++; } rcu_read_unlock(); } static void batch_from_xarray_clear(struct pfn_batch *batch, struct xarray *xa, unsigned long start_index, unsigned long last_index) { XA_STATE(xas, xa, start_index); void *entry; xas_lock(&xas); while (true) { entry = xas_next(&xas); if (xas_retry(&xas, entry)) continue; WARN_ON(!xa_is_value(entry)); if (!batch_add_pfn(batch, xa_to_value(entry))) break; xas_store(&xas, NULL); if (start_index == last_index) break; start_index++; } xas_unlock(&xas); } static void clear_xarray(struct xarray *xa, unsigned long start_index, unsigned long last_index) { XA_STATE(xas, xa, start_index); void *entry; xas_lock(&xas); xas_for_each(&xas, entry, last_index) xas_store(&xas, NULL); xas_unlock(&xas); } static int pages_to_xarray(struct xarray *xa, unsigned long start_index, unsigned long last_index, struct page **pages) { struct page **end_pages = pages + (last_index - start_index) + 1; struct page **half_pages = pages + (end_pages - pages) / 2; XA_STATE(xas, xa, start_index); do { void *old; xas_lock(&xas); while (pages != end_pages) { /* xarray does not participate in fault injection */ if (pages == half_pages && iommufd_should_fail()) { xas_set_err(&xas, -EINVAL); xas_unlock(&xas); /* aka xas_destroy() */ xas_nomem(&xas, GFP_KERNEL); goto err_clear; } old = xas_store(&xas, xa_mk_value(page_to_pfn(*pages))); if (xas_error(&xas)) break; WARN_ON(old); pages++; xas_next(&xas); } xas_unlock(&xas); } while (xas_nomem(&xas, GFP_KERNEL)); err_clear: if (xas_error(&xas)) { if (xas.xa_index != start_index) clear_xarray(xa, start_index, xas.xa_index - 1); return xas_error(&xas); } return 0; } static void batch_from_pages(struct pfn_batch *batch, struct page **pages, size_t npages) { struct page **end = pages + npages; for (; pages != end; pages++) if (!batch_add_pfn(batch, page_to_pfn(*pages))) break; } static void batch_unpin(struct pfn_batch *batch, struct iopt_pages *pages, unsigned int first_page_off, size_t npages) { unsigned int cur = 0; while (first_page_off) { if (batch->npfns[cur] > first_page_off) break; first_page_off -= batch->npfns[cur]; cur++; } while (npages) { size_t to_unpin = min_t(size_t, npages, batch->npfns[cur] - first_page_off); unpin_user_page_range_dirty_lock( pfn_to_page(batch->pfns[cur] + first_page_off), to_unpin, pages->writable); iopt_pages_sub_npinned(pages, to_unpin); cur++; first_page_off = 0; npages -= to_unpin; } } static void copy_data_page(struct page *page, void *data, unsigned long offset, size_t length, unsigned int flags) { void *mem; mem = kmap_local_page(page); if (flags & IOMMUFD_ACCESS_RW_WRITE) { memcpy(mem + offset, data, length); set_page_dirty_lock(page); } else { memcpy(data, mem + offset, length); } kunmap_local(mem); } static unsigned long batch_rw(struct pfn_batch *batch, void *data, unsigned long offset, unsigned long length, unsigned int flags) { unsigned long copied = 0; unsigned int npage = 0; unsigned int cur = 0; while (cur < batch->end) { unsigned long bytes = min(length, PAGE_SIZE - offset); copy_data_page(pfn_to_page(batch->pfns[cur] + npage), data, offset, bytes, flags); offset = 0; length -= bytes; data += bytes; copied += bytes; npage++; if (npage == batch->npfns[cur]) { npage = 0; cur++; } if (!length) break; } return copied; } /* pfn_reader_user is just the pin_user_pages() path */ struct pfn_reader_user { struct page **upages; size_t upages_len; unsigned long upages_start; unsigned long upages_end; unsigned int gup_flags; /* * 1 means mmget() and mmap_read_lock(), 0 means only mmget(), -1 is * neither */ int locked; }; static void pfn_reader_user_init(struct pfn_reader_user *user, struct iopt_pages *pages) { user->upages = NULL; user->upages_start = 0; user->upages_end = 0; user->locked = -1; user->gup_flags = FOLL_LONGTERM; if (pages->writable) user->gup_flags |= FOLL_WRITE; } static void pfn_reader_user_destroy(struct pfn_reader_user *user, struct iopt_pages *pages) { if (user->locked != -1) { if (user->locked) mmap_read_unlock(pages->source_mm); if (pages->source_mm != current->mm) mmput(pages->source_mm); user->locked = -1; } kfree(user->upages); user->upages = NULL; } static int pfn_reader_user_pin(struct pfn_reader_user *user, struct iopt_pages *pages, unsigned long start_index, unsigned long last_index) { bool remote_mm = pages->source_mm != current->mm; unsigned long npages; uintptr_t uptr; long rc; if (IS_ENABLED(CONFIG_IOMMUFD_TEST) && WARN_ON(last_index < start_index)) return -EINVAL; if (!user->upages) { /* All undone in pfn_reader_destroy() */ user->upages_len = (last_index - start_index + 1) * sizeof(*user->upages); user->upages = temp_kmalloc(&user->upages_len, NULL, 0); if (!user->upages) return -ENOMEM; } if (user->locked == -1) { /* * The majority of usages will run the map task within the mm * providing the pages, so we can optimize into * get_user_pages_fast() */ if (remote_mm) { if (!mmget_not_zero(pages->source_mm)) return -EFAULT; } user->locked = 0; } npages = min_t(unsigned long, last_index - start_index + 1, user->upages_len / sizeof(*user->upages)); if (iommufd_should_fail()) return -EFAULT; uptr = (uintptr_t)(pages->uptr + start_index * PAGE_SIZE); if (!remote_mm) rc = pin_user_pages_fast(uptr, npages, user->gup_flags, user->upages); else { if (!user->locked) { mmap_read_lock(pages->source_mm); user->locked = 1; } rc = pin_user_pages_remote(pages->source_mm, uptr, npages, user->gup_flags, user->upages, &user->locked); } if (rc <= 0) { if (WARN_ON(!rc)) return -EFAULT; return rc; } iopt_pages_add_npinned(pages, rc); user->upages_start = start_index; user->upages_end = start_index + rc; return 0; } /* This is the "modern" and faster accounting method used by io_uring */ static int incr_user_locked_vm(struct iopt_pages *pages, unsigned long npages) { unsigned long lock_limit; unsigned long cur_pages; unsigned long new_pages; lock_limit = task_rlimit(pages->source_task, RLIMIT_MEMLOCK) >> PAGE_SHIFT; do { cur_pages = atomic_long_read(&pages->source_user->locked_vm); new_pages = cur_pages + npages; if (new_pages > lock_limit) return -ENOMEM; } while (atomic_long_cmpxchg(&pages->source_user->locked_vm, cur_pages, new_pages) != cur_pages); return 0; } static void decr_user_locked_vm(struct iopt_pages *pages, unsigned long npages) { if (WARN_ON(atomic_long_read(&pages->source_user->locked_vm) < npages)) return; atomic_long_sub(npages, &pages->source_user->locked_vm); } /* This is the accounting method used for compatibility with VFIO */ static int update_mm_locked_vm(struct iopt_pages *pages, unsigned long npages, bool inc, struct pfn_reader_user *user) { bool do_put = false; int rc; if (user && user->locked) { mmap_read_unlock(pages->source_mm); user->locked = 0; /* If we had the lock then we also have a get */ } else if ((!user || !user->upages) && pages->source_mm != current->mm) { if (!mmget_not_zero(pages->source_mm)) return -EINVAL; do_put = true; } mmap_write_lock(pages->source_mm); rc = __account_locked_vm(pages->source_mm, npages, inc, pages->source_task, false); mmap_write_unlock(pages->source_mm); if (do_put) mmput(pages->source_mm); return rc; } static int do_update_pinned(struct iopt_pages *pages, unsigned long npages, bool inc, struct pfn_reader_user *user) { int rc = 0; switch (pages->account_mode) { case IOPT_PAGES_ACCOUNT_NONE: break; case IOPT_PAGES_ACCOUNT_USER: if (inc) rc = incr_user_locked_vm(pages, npages); else decr_user_locked_vm(pages, npages); break; case IOPT_PAGES_ACCOUNT_MM: rc = update_mm_locked_vm(pages, npages, inc, user); break; } if (rc) return rc; pages->last_npinned = pages->npinned; if (inc) atomic64_add(npages, &pages->source_mm->pinned_vm); else atomic64_sub(npages, &pages->source_mm->pinned_vm); return 0; } static void update_unpinned(struct iopt_pages *pages) { if (WARN_ON(pages->npinned > pages->last_npinned)) return; if (pages->npinned == pages->last_npinned) return; do_update_pinned(pages, pages->last_npinned - pages->npinned, false, NULL); } /* * Changes in the number of pages pinned is done after the pages have been read * and processed. If the user lacked the limit then the error unwind will unpin * everything that was just pinned. This is because it is expensive to calculate * how many pages we have already pinned within a range to generate an accurate * prediction in advance of doing the work to actually pin them. */ static int pfn_reader_user_update_pinned(struct pfn_reader_user *user, struct iopt_pages *pages) { unsigned long npages; bool inc; lockdep_assert_held(&pages->mutex); if (pages->npinned == pages->last_npinned) return 0; if (pages->npinned < pages->last_npinned) { npages = pages->last_npinned - pages->npinned; inc = false; } else { if (iommufd_should_fail()) return -ENOMEM; npages = pages->npinned - pages->last_npinned; inc = true; } return do_update_pinned(pages, npages, inc, user); } /* * PFNs are stored in three places, in order of preference: * - The iopt_pages xarray. This is only populated if there is a * iopt_pages_access * - The iommu_domain under an area * - The original PFN source, ie pages->source_mm * * This iterator reads the pfns optimizing to load according to the * above order. */ struct pfn_reader { struct iopt_pages *pages; struct interval_tree_double_span_iter span; struct pfn_batch batch; unsigned long batch_start_index; unsigned long batch_end_index; unsigned long last_index; struct pfn_reader_user user; }; static int pfn_reader_update_pinned(struct pfn_reader *pfns) { return pfn_reader_user_update_pinned(&pfns->user, pfns->pages); } /* * The batch can contain a mixture of pages that are still in use and pages that * need to be unpinned. Unpin only pages that are not held anywhere else. */ static void pfn_reader_unpin(struct pfn_reader *pfns) { unsigned long last = pfns->batch_end_index - 1; unsigned long start = pfns->batch_start_index; struct interval_tree_double_span_iter span; struct iopt_pages *pages = pfns->pages; lockdep_assert_held(&pages->mutex); interval_tree_for_each_double_span(&span, &pages->access_itree, &pages->domains_itree, start, last) { if (span.is_used) continue; batch_unpin(&pfns->batch, pages, span.start_hole - start, span.last_hole - span.start_hole + 1); } } /* Process a single span to load it from the proper storage */ static int pfn_reader_fill_span(struct pfn_reader *pfns) { struct interval_tree_double_span_iter *span = &pfns->span; unsigned long start_index = pfns->batch_end_index; struct iopt_area *area; int rc; if (IS_ENABLED(CONFIG_IOMMUFD_TEST) && WARN_ON(span->last_used < start_index)) return -EINVAL; if (span->is_used == 1) { batch_from_xarray(&pfns->batch, &pfns->pages->pinned_pfns, start_index, span->last_used); return 0; } if (span->is_used == 2) { /* * Pull as many pages from the first domain we find in the * target span. If it is too small then we will be called again * and we'll find another area. */ area = iopt_pages_find_domain_area(pfns->pages, start_index); if (WARN_ON(!area)) return -EINVAL; /* The storage_domain cannot change without the pages mutex */ batch_from_domain( &pfns->batch, area->storage_domain, area, start_index, min(iopt_area_last_index(area), span->last_used)); return 0; } if (start_index >= pfns->user.upages_end) { rc = pfn_reader_user_pin(&pfns->user, pfns->pages, start_index, span->last_hole); if (rc) return rc; } batch_from_pages(&pfns->batch, pfns->user.upages + (start_index - pfns->user.upages_start), pfns->user.upages_end - start_index); return 0; } static bool pfn_reader_done(struct pfn_reader *pfns) { return pfns->batch_start_index == pfns->last_index + 1; } static int pfn_reader_next(struct pfn_reader *pfns) { int rc; batch_clear(&pfns->batch); pfns->batch_start_index = pfns->batch_end_index; while (pfns->batch_end_index != pfns->last_index + 1) { unsigned int npfns = pfns->batch.total_pfns; if (IS_ENABLED(CONFIG_IOMMUFD_TEST) && WARN_ON(interval_tree_double_span_iter_done(&pfns->span))) return -EINVAL; rc = pfn_reader_fill_span(pfns); if (rc) return rc; if (WARN_ON(!pfns->batch.total_pfns)) return -EINVAL; pfns->batch_end_index = pfns->batch_start_index + pfns->batch.total_pfns; if (pfns->batch_end_index == pfns->span.last_used + 1) interval_tree_double_span_iter_next(&pfns->span); /* Batch is full */ if (npfns == pfns->batch.total_pfns) return 0; } return 0; } static int pfn_reader_init(struct pfn_reader *pfns, struct iopt_pages *pages, unsigned long start_index, unsigned long last_index) { int rc; lockdep_assert_held(&pages->mutex); pfns->pages = pages; pfns->batch_start_index = start_index; pfns->batch_end_index = start_index; pfns->last_index = last_index; pfn_reader_user_init(&pfns->user, pages); rc = batch_init(&pfns->batch, last_index - start_index + 1); if (rc) return rc; interval_tree_double_span_iter_first(&pfns->span, &pages->access_itree, &pages->domains_itree, start_index, last_index); return 0; } /* * There are many assertions regarding the state of pages->npinned vs * pages->last_pinned, for instance something like unmapping a domain must only * decrement the npinned, and pfn_reader_destroy() must be called only after all * the pins are updated. This is fine for success flows, but error flows * sometimes need to release the pins held inside the pfn_reader before going on * to complete unmapping and releasing pins held in domains. */ static void pfn_reader_release_pins(struct pfn_reader *pfns) { struct iopt_pages *pages = pfns->pages; if (pfns->user.upages_end > pfns->batch_end_index) { size_t npages = pfns->user.upages_end - pfns->batch_end_index; /* Any pages not transferred to the batch are just unpinned */ unpin_user_pages(pfns->user.upages + (pfns->batch_end_index - pfns->user.upages_start), npages); iopt_pages_sub_npinned(pages, npages); pfns->user.upages_end = pfns->batch_end_index; } if (pfns->batch_start_index != pfns->batch_end_index) { pfn_reader_unpin(pfns); pfns->batch_start_index = pfns->batch_end_index; } } static void pfn_reader_destroy(struct pfn_reader *pfns) { struct iopt_pages *pages = pfns->pages; pfn_reader_release_pins(pfns); pfn_reader_user_destroy(&pfns->user, pfns->pages); batch_destroy(&pfns->batch, NULL); WARN_ON(pages->last_npinned != pages->npinned); } static int pfn_reader_first(struct pfn_reader *pfns, struct iopt_pages *pages, unsigned long start_index, unsigned long last_index) { int rc; if (IS_ENABLED(CONFIG_IOMMUFD_TEST) && WARN_ON(last_index < start_index)) return -EINVAL; rc = pfn_reader_init(pfns, pages, start_index, last_index); if (rc) return rc; rc = pfn_reader_next(pfns); if (rc) { pfn_reader_destroy(pfns); return rc; } return 0; } struct iopt_pages *iopt_alloc_pages(void __user *uptr, unsigned long length, bool writable) { struct iopt_pages *pages; unsigned long end; /* * The iommu API uses size_t as the length, and protect the DIV_ROUND_UP * below from overflow */ if (length > SIZE_MAX - PAGE_SIZE || length == 0) return ERR_PTR(-EINVAL); if (check_add_overflow((unsigned long)uptr, length, &end)) return ERR_PTR(-EOVERFLOW); pages = kzalloc(sizeof(*pages), GFP_KERNEL_ACCOUNT); if (!pages) return ERR_PTR(-ENOMEM); kref_init(&pages->kref); xa_init_flags(&pages->pinned_pfns, XA_FLAGS_ACCOUNT); mutex_init(&pages->mutex); pages->source_mm = current->mm; mmgrab(pages->source_mm); pages->uptr = (void __user *)ALIGN_DOWN((uintptr_t)uptr, PAGE_SIZE); pages->npages = DIV_ROUND_UP(length + (uptr - pages->uptr), PAGE_SIZE); pages->access_itree = RB_ROOT_CACHED; pages->domains_itree = RB_ROOT_CACHED; pages->writable = writable; if (capable(CAP_IPC_LOCK)) pages->account_mode = IOPT_PAGES_ACCOUNT_NONE; else pages->account_mode = IOPT_PAGES_ACCOUNT_USER; pages->source_task = current->group_leader; get_task_struct(current->group_leader); pages->source_user = get_uid(current_user()); return pages; } void iopt_release_pages(struct kref *kref) { struct iopt_pages *pages = container_of(kref, struct iopt_pages, kref); WARN_ON(!RB_EMPTY_ROOT(&pages->access_itree.rb_root)); WARN_ON(!RB_EMPTY_ROOT(&pages->domains_itree.rb_root)); WARN_ON(pages->npinned); WARN_ON(!xa_empty(&pages->pinned_pfns)); mmdrop(pages->source_mm); mutex_destroy(&pages->mutex); put_task_struct(pages->source_task); free_uid(pages->source_user); kfree(pages); } static void iopt_area_unpin_domain(struct pfn_batch *batch, struct iopt_area *area, struct iopt_pages *pages, struct iommu_domain *domain, unsigned long start_index, unsigned long last_index, unsigned long *unmapped_end_index, unsigned long real_last_index) { while (start_index <= last_index) { unsigned long batch_last_index; if (*unmapped_end_index <= last_index) { unsigned long start = max(start_index, *unmapped_end_index); if (IS_ENABLED(CONFIG_IOMMUFD_TEST) && batch->total_pfns) WARN_ON(*unmapped_end_index - batch->total_pfns != start_index); batch_from_domain(batch, domain, area, start, last_index); batch_last_index = start_index + batch->total_pfns - 1; } else { batch_last_index = last_index; } if (IS_ENABLED(CONFIG_IOMMUFD_TEST)) WARN_ON(batch_last_index > real_last_index); /* * unmaps must always 'cut' at a place where the pfns are not * contiguous to pair with the maps that always install * contiguous pages. Thus, if we have to stop unpinning in the * middle of the domains we need to keep reading pfns until we * find a cut point to do the unmap. The pfns we read are * carried over and either skipped or integrated into the next * batch. */ if (batch_last_index == last_index && last_index != real_last_index) batch_from_domain_continue(batch, domain, area, last_index + 1, real_last_index); if (*unmapped_end_index <= batch_last_index) { iopt_area_unmap_domain_range( area, domain, *unmapped_end_index, start_index + batch->total_pfns - 1); *unmapped_end_index = start_index + batch->total_pfns; } /* unpin must follow unmap */ batch_unpin(batch, pages, 0, batch_last_index - start_index + 1); start_index = batch_last_index + 1; batch_clear_carry(batch, *unmapped_end_index - batch_last_index - 1); } } static void __iopt_area_unfill_domain(struct iopt_area *area, struct iopt_pages *pages, struct iommu_domain *domain, unsigned long last_index) { struct interval_tree_double_span_iter span; unsigned long start_index = iopt_area_index(area); unsigned long unmapped_end_index = start_index; u64 backup[BATCH_BACKUP_SIZE]; struct pfn_batch batch; lockdep_assert_held(&pages->mutex); /* * For security we must not unpin something that is still DMA mapped, * so this must unmap any IOVA before we go ahead and unpin the pages. * This creates a complexity where we need to skip over unpinning pages * held in the xarray, but continue to unmap from the domain. * * The domain unmap cannot stop in the middle of a contiguous range of * PFNs. To solve this problem the unpinning step will read ahead to the * end of any contiguous span, unmap that whole span, and then only * unpin the leading part that does not have any accesses. The residual * PFNs that were unmapped but not unpinned are called a "carry" in the * batch as they are moved to the front of the PFN list and continue on * to the next iteration(s). */ batch_init_backup(&batch, last_index + 1, backup, sizeof(backup)); interval_tree_for_each_double_span(&span, &pages->domains_itree, &pages->access_itree, start_index, last_index) { if (span.is_used) { batch_skip_carry(&batch, span.last_used - span.start_used + 1); continue; } iopt_area_unpin_domain(&batch, area, pages, domain, span.start_hole, span.last_hole, &unmapped_end_index, last_index); } /* * If the range ends in a access then we do the residual unmap without * any unpins. */ if (unmapped_end_index != last_index + 1) iopt_area_unmap_domain_range(area, domain, unmapped_end_index, last_index); WARN_ON(batch.total_pfns); batch_destroy(&batch, backup); update_unpinned(pages); } static void iopt_area_unfill_partial_domain(struct iopt_area *area, struct iopt_pages *pages, struct iommu_domain *domain, unsigned long end_index) { if (end_index != iopt_area_index(area)) __iopt_area_unfill_domain(area, pages, domain, end_index - 1); } /** * iopt_area_unmap_domain() - Unmap without unpinning PFNs in a domain * @area: The IOVA range to unmap * @domain: The domain to unmap * * The caller must know that unpinning is not required, usually because there * are other domains in the iopt. */ void iopt_area_unmap_domain(struct iopt_area *area, struct iommu_domain *domain) { iommu_unmap_nofail(domain, iopt_area_iova(area), iopt_area_length(area)); } /** * iopt_area_unfill_domain() - Unmap and unpin PFNs in a domain * @area: IOVA area to use * @pages: page supplier for the area (area->pages is NULL) * @domain: Domain to unmap from * * The domain should be removed from the domains_itree before calling. The * domain will always be unmapped, but the PFNs may not be unpinned if there are * still accesses. */ void iopt_area_unfill_domain(struct iopt_area *area, struct iopt_pages *pages, struct iommu_domain *domain) { __iopt_area_unfill_domain(area, pages, domain, iopt_area_last_index(area)); } /** * iopt_area_fill_domain() - Map PFNs from the area into a domain * @area: IOVA area to use * @domain: Domain to load PFNs into * * Read the pfns from the area's underlying iopt_pages and map them into the * given domain. Called when attaching a new domain to an io_pagetable. */ int iopt_area_fill_domain(struct iopt_area *area, struct iommu_domain *domain) { unsigned long done_end_index; struct pfn_reader pfns; int rc; lockdep_assert_held(&area->pages->mutex); rc = pfn_reader_first(&pfns, area->pages, iopt_area_index(area), iopt_area_last_index(area)); if (rc) return rc; while (!pfn_reader_done(&pfns)) { done_end_index = pfns.batch_start_index; rc = batch_to_domain(&pfns.batch, domain, area, pfns.batch_start_index); if (rc) goto out_unmap; done_end_index = pfns.batch_end_index; rc = pfn_reader_next(&pfns); if (rc) goto out_unmap; } rc = pfn_reader_update_pinned(&pfns); if (rc) goto out_unmap; goto out_destroy; out_unmap: pfn_reader_release_pins(&pfns); iopt_area_unfill_partial_domain(area, area->pages, domain, done_end_index); out_destroy: pfn_reader_destroy(&pfns); return rc; } /** * iopt_area_fill_domains() - Install PFNs into the area's domains * @area: The area to act on * @pages: The pages associated with the area (area->pages is NULL) * * Called during area creation. The area is freshly created and not inserted in * the domains_itree yet. PFNs are read and loaded into every domain held in the * area's io_pagetable and the area is installed in the domains_itree. * * On failure all domains are left unchanged. */ int iopt_area_fill_domains(struct iopt_area *area, struct iopt_pages *pages) { unsigned long done_first_end_index; unsigned long done_all_end_index; struct iommu_domain *domain; unsigned long unmap_index; struct pfn_reader pfns; unsigned long index; int rc; lockdep_assert_held(&area->iopt->domains_rwsem); if (xa_empty(&area->iopt->domains)) return 0; mutex_lock(&pages->mutex); rc = pfn_reader_first(&pfns, pages, iopt_area_index(area), iopt_area_last_index(area)); if (rc) goto out_unlock; while (!pfn_reader_done(&pfns)) { done_first_end_index = pfns.batch_end_index; done_all_end_index = pfns.batch_start_index; xa_for_each(&area->iopt->domains, index, domain) { rc = batch_to_domain(&pfns.batch, domain, area, pfns.batch_start_index); if (rc) goto out_unmap; } done_all_end_index = done_first_end_index; rc = pfn_reader_next(&pfns); if (rc) goto out_unmap; } rc = pfn_reader_update_pinned(&pfns); if (rc) goto out_unmap; area->storage_domain = xa_load(&area->iopt->domains, 0); interval_tree_insert(&area->pages_node, &pages->domains_itree); goto out_destroy; out_unmap: pfn_reader_release_pins(&pfns); xa_for_each(&area->iopt->domains, unmap_index, domain) { unsigned long end_index; if (unmap_index < index) end_index = done_first_end_index; else end_index = done_all_end_index; /* * The area is not yet part of the domains_itree so we have to * manage the unpinning specially. The last domain does the * unpin, every other domain is just unmapped. */ if (unmap_index != area->iopt->next_domain_id - 1) { if (end_index != iopt_area_index(area)) iopt_area_unmap_domain_range( area, domain, iopt_area_index(area), end_index - 1); } else { iopt_area_unfill_partial_domain(area, pages, domain, end_index); } } out_destroy: pfn_reader_destroy(&pfns); out_unlock: mutex_unlock(&pages->mutex); return rc; } /** * iopt_area_unfill_domains() - unmap PFNs from the area's domains * @area: The area to act on * @pages: The pages associated with the area (area->pages is NULL) * * Called during area destruction. This unmaps the iova's covered by all the * area's domains and releases the PFNs. */ void iopt_area_unfill_domains(struct iopt_area *area, struct iopt_pages *pages) { struct io_pagetable *iopt = area->iopt; struct iommu_domain *domain; unsigned long index; lockdep_assert_held(&iopt->domains_rwsem); mutex_lock(&pages->mutex); if (!area->storage_domain) goto out_unlock; xa_for_each(&iopt->domains, index, domain) if (domain != area->storage_domain) iopt_area_unmap_domain_range( area, domain, iopt_area_index(area), iopt_area_last_index(area)); if (IS_ENABLED(CONFIG_IOMMUFD_TEST)) WARN_ON(RB_EMPTY_NODE(&area->pages_node.rb)); interval_tree_remove(&area->pages_node, &pages->domains_itree); iopt_area_unfill_domain(area, pages, area->storage_domain); area->storage_domain = NULL; out_unlock: mutex_unlock(&pages->mutex); } static void iopt_pages_unpin_xarray(struct pfn_batch *batch, struct iopt_pages *pages, unsigned long start_index, unsigned long end_index) { while (start_index <= end_index) { batch_from_xarray_clear(batch, &pages->pinned_pfns, start_index, end_index); batch_unpin(batch, pages, 0, batch->total_pfns); start_index += batch->total_pfns; batch_clear(batch); } } /** * iopt_pages_unfill_xarray() - Update the xarry after removing an access * @pages: The pages to act on * @start_index: Starting PFN index * @last_index: Last PFN index * * Called when an iopt_pages_access is removed, removes pages from the itree. * The access should already be removed from the access_itree. */ void iopt_pages_unfill_xarray(struct iopt_pages *pages, unsigned long start_index, unsigned long last_index) { struct interval_tree_double_span_iter span; u64 backup[BATCH_BACKUP_SIZE]; struct pfn_batch batch; bool batch_inited = false; lockdep_assert_held(&pages->mutex); interval_tree_for_each_double_span(&span, &pages->access_itree, &pages->domains_itree, start_index, last_index) { if (!span.is_used) { if (!batch_inited) { batch_init_backup(&batch, last_index - start_index + 1, backup, sizeof(backup)); batch_inited = true; } iopt_pages_unpin_xarray(&batch, pages, span.start_hole, span.last_hole); } else if (span.is_used == 2) { /* Covered by a domain */ clear_xarray(&pages->pinned_pfns, span.start_used, span.last_used); } /* Otherwise covered by an existing access */ } if (batch_inited) batch_destroy(&batch, backup); update_unpinned(pages); } /** * iopt_pages_fill_from_xarray() - Fast path for reading PFNs * @pages: The pages to act on * @start_index: The first page index in the range * @last_index: The last page index in the range * @out_pages: The output array to return the pages * * This can be called if the caller is holding a refcount on an * iopt_pages_access that is known to have already been filled. It quickly reads * the pages directly from the xarray. * * This is part of the SW iommu interface to read pages for in-kernel use. */ void iopt_pages_fill_from_xarray(struct iopt_pages *pages, unsigned long start_index, unsigned long last_index, struct page **out_pages) { XA_STATE(xas, &pages->pinned_pfns, start_index); void *entry; rcu_read_lock(); while (start_index <= last_index) { entry = xas_next(&xas); if (xas_retry(&xas, entry)) continue; WARN_ON(!xa_is_value(entry)); *(out_pages++) = pfn_to_page(xa_to_value(entry)); start_index++; } rcu_read_unlock(); } static int iopt_pages_fill_from_domain(struct iopt_pages *pages, unsigned long start_index, unsigned long last_index, struct page **out_pages) { while (start_index != last_index + 1) { unsigned long domain_last; struct iopt_area *area; area = iopt_pages_find_domain_area(pages, start_index); if (WARN_ON(!area)) return -EINVAL; domain_last = min(iopt_area_last_index(area), last_index); out_pages = raw_pages_from_domain(area->storage_domain, area, start_index, domain_last, out_pages); start_index = domain_last + 1; } return 0; } static int iopt_pages_fill_from_mm(struct iopt_pages *pages, struct pfn_reader_user *user, unsigned long start_index, unsigned long last_index, struct page **out_pages) { unsigned long cur_index = start_index; int rc; while (cur_index != last_index + 1) { user->upages = out_pages + (cur_index - start_index); rc = pfn_reader_user_pin(user, pages, cur_index, last_index); if (rc) goto out_unpin; cur_index = user->upages_end; } return 0; out_unpin: if (start_index != cur_index) iopt_pages_err_unpin(pages, start_index, cur_index - 1, out_pages); return rc; } /** * iopt_pages_fill_xarray() - Read PFNs * @pages: The pages to act on * @start_index: The first page index in the range * @last_index: The last page index in the range * @out_pages: The output array to return the pages, may be NULL * * This populates the xarray and returns the pages in out_pages. As the slow * path this is able to copy pages from other storage tiers into the xarray. * * On failure the xarray is left unchanged. * * This is part of the SW iommu interface to read pages for in-kernel use. */ int iopt_pages_fill_xarray(struct iopt_pages *pages, unsigned long start_index, unsigned long last_index, struct page **out_pages) { struct interval_tree_double_span_iter span; unsigned long xa_end = start_index; struct pfn_reader_user user; int rc; lockdep_assert_held(&pages->mutex); pfn_reader_user_init(&user, pages); user.upages_len = (last_index - start_index + 1) * sizeof(*out_pages); interval_tree_for_each_double_span(&span, &pages->access_itree, &pages->domains_itree, start_index, last_index) { struct page **cur_pages; if (span.is_used == 1) { cur_pages = out_pages + (span.start_used - start_index); iopt_pages_fill_from_xarray(pages, span.start_used, span.last_used, cur_pages); continue; } if (span.is_used == 2) { cur_pages = out_pages + (span.start_used - start_index); iopt_pages_fill_from_domain(pages, span.start_used, span.last_used, cur_pages); rc = pages_to_xarray(&pages->pinned_pfns, span.start_used, span.last_used, cur_pages); if (rc) goto out_clean_xa; xa_end = span.last_used + 1; continue; } /* hole */ cur_pages = out_pages + (span.start_hole - start_index); rc = iopt_pages_fill_from_mm(pages, &user, span.start_hole, span.last_hole, cur_pages); if (rc) goto out_clean_xa; rc = pages_to_xarray(&pages->pinned_pfns, span.start_hole, span.last_hole, cur_pages); if (rc) { iopt_pages_err_unpin(pages, span.start_hole, span.last_hole, cur_pages); goto out_clean_xa; } xa_end = span.last_hole + 1; } rc = pfn_reader_user_update_pinned(&user, pages); if (rc) goto out_clean_xa; user.upages = NULL; pfn_reader_user_destroy(&user, pages); return 0; out_clean_xa: if (start_index != xa_end) iopt_pages_unfill_xarray(pages, start_index, xa_end - 1); user.upages = NULL; pfn_reader_user_destroy(&user, pages); return rc; } /* * This uses the pfn_reader instead of taking a shortcut by using the mm. It can * do every scenario and is fully consistent with what an iommu_domain would * see. */ static int iopt_pages_rw_slow(struct iopt_pages *pages, unsigned long start_index, unsigned long last_index, unsigned long offset, void *data, unsigned long length, unsigned int flags) { struct pfn_reader pfns; int rc; mutex_lock(&pages->mutex); rc = pfn_reader_first(&pfns, pages, start_index, last_index); if (rc) goto out_unlock; while (!pfn_reader_done(&pfns)) { unsigned long done; done = batch_rw(&pfns.batch, data, offset, length, flags); data += done; length -= done; offset = 0; pfn_reader_unpin(&pfns); rc = pfn_reader_next(&pfns); if (rc) goto out_destroy; } if (WARN_ON(length != 0)) rc = -EINVAL; out_destroy: pfn_reader_destroy(&pfns); out_unlock: mutex_unlock(&pages->mutex); return rc; } /* * A medium speed path that still allows DMA inconsistencies, but doesn't do any * memory allocations or interval tree searches. */ static int iopt_pages_rw_page(struct iopt_pages *pages, unsigned long index, unsigned long offset, void *data, unsigned long length, unsigned int flags) { struct page *page = NULL; int rc; if (!mmget_not_zero(pages->source_mm)) return iopt_pages_rw_slow(pages, index, index, offset, data, length, flags); if (iommufd_should_fail()) { rc = -EINVAL; goto out_mmput; } mmap_read_lock(pages->source_mm); rc = pin_user_pages_remote( pages->source_mm, (uintptr_t)(pages->uptr + index * PAGE_SIZE), 1, (flags & IOMMUFD_ACCESS_RW_WRITE) ? FOLL_WRITE : 0, &page, NULL); mmap_read_unlock(pages->source_mm); if (rc != 1) { if (WARN_ON(rc >= 0)) rc = -EINVAL; goto out_mmput; } copy_data_page(page, data, offset, length, flags); unpin_user_page(page); rc = 0; out_mmput: mmput(pages->source_mm); return rc; } /** * iopt_pages_rw_access - Copy to/from a linear slice of the pages * @pages: pages to act on * @start_byte: First byte of pages to copy to/from * @data: Kernel buffer to get/put the data * @length: Number of bytes to copy * @flags: IOMMUFD_ACCESS_RW_* flags * * This will find each page in the range, kmap it and then memcpy to/from * the given kernel buffer. */ int iopt_pages_rw_access(struct iopt_pages *pages, unsigned long start_byte, void *data, unsigned long length, unsigned int flags) { unsigned long start_index = start_byte / PAGE_SIZE; unsigned long last_index = (start_byte + length - 1) / PAGE_SIZE; bool change_mm = current->mm != pages->source_mm; int rc = 0; if (IS_ENABLED(CONFIG_IOMMUFD_TEST) && (flags & __IOMMUFD_ACCESS_RW_SLOW_PATH)) change_mm = true; if ((flags & IOMMUFD_ACCESS_RW_WRITE) && !pages->writable) return -EPERM; if (!(flags & IOMMUFD_ACCESS_RW_KTHREAD) && change_mm) { if (start_index == last_index) return iopt_pages_rw_page(pages, start_index, start_byte % PAGE_SIZE, data, length, flags); return iopt_pages_rw_slow(pages, start_index, last_index, start_byte % PAGE_SIZE, data, length, flags); } /* * Try to copy using copy_to_user(). We do this as a fast path and * ignore any pinning inconsistencies, unlike a real DMA path. */ if (change_mm) { if (!mmget_not_zero(pages->source_mm)) return iopt_pages_rw_slow(pages, start_index, last_index, start_byte % PAGE_SIZE, data, length, flags); kthread_use_mm(pages->source_mm); } if (flags & IOMMUFD_ACCESS_RW_WRITE) { if (copy_to_user(pages->uptr + start_byte, data, length)) rc = -EFAULT; } else { if (copy_from_user(data, pages->uptr + start_byte, length)) rc = -EFAULT; } if (change_mm) { kthread_unuse_mm(pages->source_mm); mmput(pages->source_mm); } return rc; } static struct iopt_pages_access * iopt_pages_get_exact_access(struct iopt_pages *pages, unsigned long index, unsigned long last) { struct interval_tree_node *node; lockdep_assert_held(&pages->mutex); /* There can be overlapping ranges in this interval tree */ for (node = interval_tree_iter_first(&pages->access_itree, index, last); node; node = interval_tree_iter_next(node, index, last)) if (node->start == index && node->last == last) return container_of(node, struct iopt_pages_access, node); return NULL; } /** * iopt_area_add_access() - Record an in-knerel access for PFNs * @area: The source of PFNs * @start_index: First page index * @last_index: Inclusive last page index * @out_pages: Output list of struct page's representing the PFNs * @flags: IOMMUFD_ACCESS_RW_* flags * * Record that an in-kernel access will be accessing the pages, ensure they are * pinned, and return the PFNs as a simple list of 'struct page *'. * * This should be undone through a matching call to iopt_area_remove_access() */ int iopt_area_add_access(struct iopt_area *area, unsigned long start_index, unsigned long last_index, struct page **out_pages, unsigned int flags) { struct iopt_pages *pages = area->pages; struct iopt_pages_access *access; int rc; if ((flags & IOMMUFD_ACCESS_RW_WRITE) && !pages->writable) return -EPERM; mutex_lock(&pages->mutex); access = iopt_pages_get_exact_access(pages, start_index, last_index); if (access) { area->num_accesses++; access->users++; iopt_pages_fill_from_xarray(pages, start_index, last_index, out_pages); mutex_unlock(&pages->mutex); return 0; } access = kzalloc(sizeof(*access), GFP_KERNEL_ACCOUNT); if (!access) { rc = -ENOMEM; goto err_unlock; } rc = iopt_pages_fill_xarray(pages, start_index, last_index, out_pages); if (rc) goto err_free; access->node.start = start_index; access->node.last = last_index; access->users = 1; area->num_accesses++; interval_tree_insert(&access->node, &pages->access_itree); mutex_unlock(&pages->mutex); return 0; err_free: kfree(access); err_unlock: mutex_unlock(&pages->mutex); return rc; } /** * iopt_area_remove_access() - Release an in-kernel access for PFNs * @area: The source of PFNs * @start_index: First page index * @last_index: Inclusive last page index * * Undo iopt_area_add_access() and unpin the pages if necessary. The caller * must stop using the PFNs before calling this. */ void iopt_area_remove_access(struct iopt_area *area, unsigned long start_index, unsigned long last_index) { struct iopt_pages *pages = area->pages; struct iopt_pages_access *access; mutex_lock(&pages->mutex); access = iopt_pages_get_exact_access(pages, start_index, last_index); if (WARN_ON(!access)) goto out_unlock; WARN_ON(area->num_accesses == 0 || access->users == 0); area->num_accesses--; access->users--; if (access->users) goto out_unlock; interval_tree_remove(&access->node, &pages->access_itree); iopt_pages_unfill_xarray(pages, start_index, last_index); kfree(access); out_unlock: mutex_unlock(&pages->mutex); } |
708 708 708 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 | /* SPDX-License-Identifier: GPL-2.0 */ /* * linux/cgroup-defs.h - basic definitions for cgroup * * This file provides basic type and interface. Include this file directly * only if necessary to avoid cyclic dependencies. */ #ifndef _LINUX_CGROUP_DEFS_H #define _LINUX_CGROUP_DEFS_H #include <linux/limits.h> #include <linux/list.h> #include <linux/idr.h> #include <linux/wait.h> #include <linux/mutex.h> #include <linux/rcupdate.h> #include <linux/refcount.h> #include <linux/percpu-refcount.h> #include <linux/percpu-rwsem.h> #include <linux/u64_stats_sync.h> #include <linux/workqueue.h> #include <linux/bpf-cgroup-defs.h> #include <linux/psi_types.h> #ifdef CONFIG_CGROUPS struct cgroup; struct cgroup_root; struct cgroup_subsys; struct cgroup_taskset; struct kernfs_node; struct kernfs_ops; struct kernfs_open_file; struct seq_file; struct poll_table_struct; #define MAX_CGROUP_TYPE_NAMELEN 32 #define MAX_CGROUP_ROOT_NAMELEN 64 #define MAX_CFTYPE_NAME 64 /* define the enumeration of all cgroup subsystems */ #define SUBSYS(_x) _x ## _cgrp_id, enum cgroup_subsys_id { #include <linux/cgroup_subsys.h> CGROUP_SUBSYS_COUNT, }; #undef SUBSYS /* bits in struct cgroup_subsys_state flags field */ enum { CSS_NO_REF = (1 << 0), /* no reference counting for this css */ CSS_ONLINE = (1 << 1), /* between ->css_online() and ->css_offline() */ CSS_RELEASED = (1 << 2), /* refcnt reached zero, released */ CSS_VISIBLE = (1 << 3), /* css is visible to userland */ CSS_DYING = (1 << 4), /* css is dying */ }; /* bits in struct cgroup flags field */ enum { /* Control Group requires release notifications to userspace */ CGRP_NOTIFY_ON_RELEASE, /* * Clone the parent's configuration when creating a new child * cpuset cgroup. For historical reasons, this option can be * specified at mount time and thus is implemented here. */ CGRP_CPUSET_CLONE_CHILDREN, /* Control group has to be frozen. */ CGRP_FREEZE, /* Cgroup is frozen. */ CGRP_FROZEN, /* Control group has to be killed. */ CGRP_KILL, }; /* cgroup_root->flags */ enum { CGRP_ROOT_NOPREFIX = (1 << 1), /* mounted subsystems have no named prefix */ CGRP_ROOT_XATTR = (1 << 2), /* supports extended attributes */ /* * Consider namespaces as delegation boundaries. If this flag is * set, controller specific interface files in a namespace root * aren't writeable from inside the namespace. */ CGRP_ROOT_NS_DELEGATE = (1 << 3), /* * Reduce latencies on dynamic cgroup modifications such as task * migrations and controller on/offs by disabling percpu operation on * cgroup_threadgroup_rwsem. This makes hot path operations such as * forks and exits into the slow path and more expensive. * * The static usage pattern of creating a cgroup, enabling controllers, * and then seeding it with CLONE_INTO_CGROUP doesn't require write * locking cgroup_threadgroup_rwsem and thus doesn't benefit from * favordynmod. */ CGRP_ROOT_FAVOR_DYNMODS = (1 << 4), /* * Enable cpuset controller in v1 cgroup to use v2 behavior. */ CGRP_ROOT_CPUSET_V2_MODE = (1 << 16), /* * Enable legacy local memory.events. */ CGRP_ROOT_MEMORY_LOCAL_EVENTS = (1 << 17), /* * Enable recursive subtree protection */ CGRP_ROOT_MEMORY_RECURSIVE_PROT = (1 << 18), /* * Enable hugetlb accounting for the memory controller. */ CGRP_ROOT_MEMORY_HUGETLB_ACCOUNTING = (1 << 19), }; /* cftype->flags */ enum { CFTYPE_ONLY_ON_ROOT = (1 << 0), /* only create on root cgrp */ CFTYPE_NOT_ON_ROOT = (1 << 1), /* don't create on root cgrp */ CFTYPE_NS_DELEGATABLE = (1 << 2), /* writeable beyond delegation boundaries */ CFTYPE_NO_PREFIX = (1 << 3), /* (DON'T USE FOR NEW FILES) no subsys prefix */ CFTYPE_WORLD_WRITABLE = (1 << 4), /* (DON'T USE FOR NEW FILES) S_IWUGO */ CFTYPE_DEBUG = (1 << 5), /* create when cgroup_debug */ /* internal flags, do not use outside cgroup core proper */ __CFTYPE_ONLY_ON_DFL = (1 << 16), /* only on default hierarchy */ __CFTYPE_NOT_ON_DFL = (1 << 17), /* not on default hierarchy */ __CFTYPE_ADDED = (1 << 18), }; /* * cgroup_file is the handle for a file instance created in a cgroup which * is used, for example, to generate file changed notifications. This can * be obtained by setting cftype->file_offset. */ struct cgroup_file { /* do not access any fields from outside cgroup core */ struct kernfs_node *kn; unsigned long notified_at; struct timer_list notify_timer; }; /* * Per-subsystem/per-cgroup state maintained by the system. This is the * fundamental structural building block that controllers deal with. * * Fields marked with "PI:" are public and immutable and may be accessed * directly without synchronization. */ struct cgroup_subsys_state { /* PI: the cgroup that this css is attached to */ struct cgroup *cgroup; /* PI: the cgroup subsystem that this css is attached to */ struct cgroup_subsys *ss; /* reference count - access via css_[try]get() and css_put() */ struct percpu_ref refcnt; /* siblings list anchored at the parent's ->children */ struct list_head sibling; struct list_head children; /* flush target list anchored at cgrp->rstat_css_list */ struct list_head rstat_css_node; /* * PI: Subsys-unique ID. 0 is unused and root is always 1. The * matching css can be looked up using css_from_id(). */ int id; unsigned int flags; /* * Monotonically increasing unique serial number which defines a * uniform order among all csses. It's guaranteed that all * ->children lists are in the ascending order of ->serial_nr and * used to allow interrupting and resuming iterations. */ u64 serial_nr; /* * Incremented by online self and children. Used to guarantee that * parents are not offlined before their children. */ atomic_t online_cnt; /* percpu_ref killing and RCU release */ struct work_struct destroy_work; struct rcu_work destroy_rwork; /* * PI: the parent css. Placed here for cache proximity to following * fields of the containing structure. */ struct cgroup_subsys_state *parent; }; /* * A css_set is a structure holding pointers to a set of * cgroup_subsys_state objects. This saves space in the task struct * object and speeds up fork()/exit(), since a single inc/dec and a * list_add()/del() can bump the reference count on the entire cgroup * set for a task. */ struct css_set { /* * Set of subsystem states, one for each subsystem. This array is * immutable after creation apart from the init_css_set during * subsystem registration (at boot time). */ struct cgroup_subsys_state *subsys[CGROUP_SUBSYS_COUNT]; /* reference count */ refcount_t refcount; /* * For a domain cgroup, the following points to self. If threaded, * to the matching cset of the nearest domain ancestor. The * dom_cset provides access to the domain cgroup and its csses to * which domain level resource consumptions should be charged. */ struct css_set *dom_cset; /* the default cgroup associated with this css_set */ struct cgroup *dfl_cgrp; /* internal task count, protected by css_set_lock */ int nr_tasks; /* * Lists running through all tasks using this cgroup group. * mg_tasks lists tasks which belong to this cset but are in the * process of being migrated out or in. Protected by * css_set_lock, but, during migration, once tasks are moved to * mg_tasks, it can be read safely while holding cgroup_mutex. */ struct list_head tasks; struct list_head mg_tasks; struct list_head dying_tasks; /* all css_task_iters currently walking this cset */ struct list_head task_iters; /* * On the default hierarchy, ->subsys[ssid] may point to a css * attached to an ancestor instead of the cgroup this css_set is * associated with. The following node is anchored at * ->subsys[ssid]->cgroup->e_csets[ssid] and provides a way to * iterate through all css's attached to a given cgroup. */ struct list_head e_cset_node[CGROUP_SUBSYS_COUNT]; /* all threaded csets whose ->dom_cset points to this cset */ struct list_head threaded_csets; struct list_head threaded_csets_node; /* * List running through all cgroup groups in the same hash * slot. Protected by css_set_lock */ struct hlist_node hlist; /* * List of cgrp_cset_links pointing at cgroups referenced from this * css_set. Protected by css_set_lock. */ struct list_head cgrp_links; /* * List of csets participating in the on-going migration either as * source or destination. Protected by cgroup_mutex. */ struct list_head mg_src_preload_node; struct list_head mg_dst_preload_node; struct list_head mg_node; /* * If this cset is acting as the source of migration the following * two fields are set. mg_src_cgrp and mg_dst_cgrp are * respectively the source and destination cgroups of the on-going * migration. mg_dst_cset is the destination cset the target tasks * on this cset should be migrated to. Protected by cgroup_mutex. */ struct cgroup *mg_src_cgrp; struct cgroup *mg_dst_cgrp; struct css_set *mg_dst_cset; /* dead and being drained, ignore for migration */ bool dead; /* For RCU-protected deletion */ struct rcu_head rcu_head; }; struct cgroup_base_stat { struct task_cputime cputime; #ifdef CONFIG_SCHED_CORE u64 forceidle_sum; #endif }; /* * rstat - cgroup scalable recursive statistics. Accounting is done * per-cpu in cgroup_rstat_cpu which is then lazily propagated up the * hierarchy on reads. * * When a stat gets updated, the cgroup_rstat_cpu and its ancestors are * linked into the updated tree. On the following read, propagation only * considers and consumes the updated tree. This makes reading O(the * number of descendants which have been active since last read) instead of * O(the total number of descendants). * * This is important because there can be a lot of (draining) cgroups which * aren't active and stat may be read frequently. The combination can * become very expensive. By propagating selectively, increasing reading * frequency decreases the cost of each read. * * This struct hosts both the fields which implement the above - * updated_children and updated_next - and the fields which track basic * resource statistics on top of it - bsync, bstat and last_bstat. */ struct cgroup_rstat_cpu { /* * ->bsync protects ->bstat. These are the only fields which get * updated in the hot path. */ struct u64_stats_sync bsync; struct cgroup_base_stat bstat; /* * Snapshots at the last reading. These are used to calculate the * deltas to propagate to the global counters. */ struct cgroup_base_stat last_bstat; /* * This field is used to record the cumulative per-cpu time of * the cgroup and its descendants. Currently it can be read via * eBPF/drgn etc, and we are still trying to determine how to * expose it in the cgroupfs interface. */ struct cgroup_base_stat subtree_bstat; /* * Snapshots at the last reading. These are used to calculate the * deltas to propagate to the per-cpu subtree_bstat. */ struct cgroup_base_stat last_subtree_bstat; /* * Child cgroups with stat updates on this cpu since the last read * are linked on the parent's ->updated_children through * ->updated_next. * * In addition to being more compact, singly-linked list pointing * to the cgroup makes it unnecessary for each per-cpu struct to * point back to the associated cgroup. * * Protected by per-cpu cgroup_rstat_cpu_lock. */ struct cgroup *updated_children; /* terminated by self cgroup */ struct cgroup *updated_next; /* NULL iff not on the list */ }; struct cgroup_freezer_state { /* Should the cgroup and its descendants be frozen. */ bool freeze; /* Should the cgroup actually be frozen? */ int e_freeze; /* Fields below are protected by css_set_lock */ /* Number of frozen descendant cgroups */ int nr_frozen_descendants; /* * Number of tasks, which are counted as frozen: * frozen, SIGSTOPped, and PTRACEd. */ int nr_frozen_tasks; }; struct cgroup { /* self css with NULL ->ss, points back to this cgroup */ struct cgroup_subsys_state self; unsigned long flags; /* "unsigned long" so bitops work */ /* * The depth this cgroup is at. The root is at depth zero and each * step down the hierarchy increments the level. This along with * ancestors[] can determine whether a given cgroup is a * descendant of another without traversing the hierarchy. */ int level; /* Maximum allowed descent tree depth */ int max_depth; /* * Keep track of total numbers of visible and dying descent cgroups. * Dying cgroups are cgroups which were deleted by a user, * but are still existing because someone else is holding a reference. * max_descendants is a maximum allowed number of descent cgroups. * * nr_descendants and nr_dying_descendants are protected * by cgroup_mutex and css_set_lock. It's fine to read them holding * any of cgroup_mutex and css_set_lock; for writing both locks * should be held. */ int nr_descendants; int nr_dying_descendants; int max_descendants; /* * Each non-empty css_set associated with this cgroup contributes * one to nr_populated_csets. The counter is zero iff this cgroup * doesn't have any tasks. * * All children which have non-zero nr_populated_csets and/or * nr_populated_children of their own contribute one to either * nr_populated_domain_children or nr_populated_threaded_children * depending on their type. Each counter is zero iff all cgroups * of the type in the subtree proper don't have any tasks. */ int nr_populated_csets; int nr_populated_domain_children; int nr_populated_threaded_children; int nr_threaded_children; /* # of live threaded child cgroups */ struct kernfs_node *kn; /* cgroup kernfs entry */ struct cgroup_file procs_file; /* handle for "cgroup.procs" */ struct cgroup_file events_file; /* handle for "cgroup.events" */ /* handles for "{cpu,memory,io,irq}.pressure" */ struct cgroup_file psi_files[NR_PSI_RESOURCES]; /* * The bitmask of subsystems enabled on the child cgroups. * ->subtree_control is the one configured through * "cgroup.subtree_control" while ->subtree_ss_mask is the effective * one which may have more subsystems enabled. Controller knobs * are made available iff it's enabled in ->subtree_control. */ u16 subtree_control; u16 subtree_ss_mask; u16 old_subtree_control; u16 old_subtree_ss_mask; /* Private pointers for each registered subsystem */ struct cgroup_subsys_state __rcu *subsys[CGROUP_SUBSYS_COUNT]; struct cgroup_root *root; /* * List of cgrp_cset_links pointing at css_sets with tasks in this * cgroup. Protected by css_set_lock. */ struct list_head cset_links; /* * On the default hierarchy, a css_set for a cgroup with some * susbsys disabled will point to css's which are associated with * the closest ancestor which has the subsys enabled. The * following lists all css_sets which point to this cgroup's css * for the given subsystem. */ struct list_head e_csets[CGROUP_SUBSYS_COUNT]; /* * If !threaded, self. If threaded, it points to the nearest * domain ancestor. Inside a threaded subtree, cgroups are exempt * from process granularity and no-internal-task constraint. * Domain level resource consumptions which aren't tied to a * specific task are charged to the dom_cgrp. */ struct cgroup *dom_cgrp; struct cgroup *old_dom_cgrp; /* used while enabling threaded */ /* per-cpu recursive resource statistics */ struct cgroup_rstat_cpu __percpu *rstat_cpu; struct list_head rstat_css_list; /* * Add padding to separate the read mostly rstat_cpu and * rstat_css_list into a different cacheline from the following * rstat_flush_next and *bstat fields which can have frequent updates. */ CACHELINE_PADDING(_pad_); /* * A singly-linked list of cgroup structures to be rstat flushed. * This is a scratch field to be used exclusively by * cgroup_rstat_flush_locked() and protected by cgroup_rstat_lock. */ struct cgroup *rstat_flush_next; /* cgroup basic resource statistics */ struct cgroup_base_stat last_bstat; struct cgroup_base_stat bstat; struct prev_cputime prev_cputime; /* for printing out cputime */ /* * list of pidlists, up to two for each namespace (one for procs, one * for tasks); created on demand. */ struct list_head pidlists; struct mutex pidlist_mutex; /* used to wait for offlining of csses */ wait_queue_head_t offline_waitq; /* used to schedule release agent */ struct work_struct release_agent_work; /* used to track pressure stalls */ struct psi_group *psi; /* used to store eBPF programs */ struct cgroup_bpf bpf; /* If there is block congestion on this cgroup. */ atomic_t congestion_count; /* Used to store internal freezer state */ struct cgroup_freezer_state freezer; #ifdef CONFIG_BPF_SYSCALL struct bpf_local_storage __rcu *bpf_cgrp_storage; #endif /* All ancestors including self */ struct cgroup *ancestors[]; }; /* * A cgroup_root represents the root of a cgroup hierarchy, and may be * associated with a kernfs_root to form an active hierarchy. This is * internal to cgroup core. Don't access directly from controllers. */ struct cgroup_root { struct kernfs_root *kf_root; /* The bitmask of subsystems attached to this hierarchy */ unsigned int subsys_mask; /* Unique id for this hierarchy. */ int hierarchy_id; /* A list running through the active hierarchies */ struct list_head root_list; struct rcu_head rcu; /* Must be near the top */ /* * The root cgroup. The containing cgroup_root will be destroyed on its * release. cgrp->ancestors[0] will be used overflowing into the * following field. cgrp_ancestor_storage must immediately follow. */ struct cgroup cgrp; /* must follow cgrp for cgrp->ancestors[0], see above */ struct cgroup *cgrp_ancestor_storage; /* Number of cgroups in the hierarchy, used only for /proc/cgroups */ atomic_t nr_cgrps; /* Hierarchy-specific flags */ unsigned int flags; /* The path to use for release notifications. */ char release_agent_path[PATH_MAX]; /* The name for this hierarchy - may be empty */ char name[MAX_CGROUP_ROOT_NAMELEN]; }; /* * struct cftype: handler definitions for cgroup control files * * When reading/writing to a file: * - the cgroup to use is file->f_path.dentry->d_parent->d_fsdata * - the 'cftype' of the file is file->f_path.dentry->d_fsdata */ struct cftype { /* * By convention, the name should begin with the name of the * subsystem, followed by a period. Zero length string indicates * end of cftype array. */ char name[MAX_CFTYPE_NAME]; unsigned long private; /* * The maximum length of string, excluding trailing nul, that can * be passed to write. If < PAGE_SIZE-1, PAGE_SIZE-1 is assumed. */ size_t max_write_len; /* CFTYPE_* flags */ unsigned int flags; /* * If non-zero, should contain the offset from the start of css to * a struct cgroup_file field. cgroup will record the handle of * the created file into it. The recorded handle can be used as * long as the containing css remains accessible. */ unsigned int file_offset; /* * Fields used for internal bookkeeping. Initialized automatically * during registration. */ struct cgroup_subsys *ss; /* NULL for cgroup core files */ struct list_head node; /* anchored at ss->cfts */ struct kernfs_ops *kf_ops; int (*open)(struct kernfs_open_file *of); void (*release)(struct kernfs_open_file *of); /* * read_u64() is a shortcut for the common case of returning a * single integer. Use it in place of read() */ u64 (*read_u64)(struct cgroup_subsys_state *css, struct cftype *cft); /* * read_s64() is a signed version of read_u64() */ s64 (*read_s64)(struct cgroup_subsys_state *css, struct cftype *cft); /* generic seq_file read interface */ int (*seq_show)(struct seq_file *sf, void *v); /* optional ops, implement all or none */ void *(*seq_start)(struct seq_file *sf, loff_t *ppos); void *(*seq_next)(struct seq_file *sf, void *v, loff_t *ppos); void (*seq_stop)(struct seq_file *sf, void *v); /* * write_u64() is a shortcut for the common case of accepting * a single integer (as parsed by simple_strtoull) from * userspace. Use in place of write(); return 0 or error. */ int (*write_u64)(struct cgroup_subsys_state *css, struct cftype *cft, u64 val); /* * write_s64() is a signed version of write_u64() */ int (*write_s64)(struct cgroup_subsys_state *css, struct cftype *cft, s64 val); /* * write() is the generic write callback which maps directly to * kernfs write operation and overrides all other operations. * Maximum write size is determined by ->max_write_len. Use * of_css/cft() to access the associated css and cft. */ ssize_t (*write)(struct kernfs_open_file *of, char *buf, size_t nbytes, loff_t off); __poll_t (*poll)(struct kernfs_open_file *of, struct poll_table_struct *pt); #ifdef CONFIG_DEBUG_LOCK_ALLOC struct lock_class_key lockdep_key; #endif }; /* * Control Group subsystem type. * See Documentation/admin-guide/cgroup-v1/cgroups.rst for details */ struct cgroup_subsys { struct cgroup_subsys_state *(*css_alloc)(struct cgroup_subsys_state *parent_css); int (*css_online)(struct cgroup_subsys_state *css); void (*css_offline)(struct cgroup_subsys_state *css); void (*css_released)(struct cgroup_subsys_state *css); void (*css_free)(struct cgroup_subsys_state *css); void (*css_reset)(struct cgroup_subsys_state *css); void (*css_rstat_flush)(struct cgroup_subsys_state *css, int cpu); int (*css_extra_stat_show)(struct seq_file *seq, struct cgroup_subsys_state *css); int (*css_local_stat_show)(struct seq_file *seq, struct cgroup_subsys_state *css); int (*can_attach)(struct cgroup_taskset *tset); void (*cancel_attach)(struct cgroup_taskset *tset); void (*attach)(struct cgroup_taskset *tset); void (*post_attach)(void); int (*can_fork)(struct task_struct *task, struct css_set *cset); void (*cancel_fork)(struct task_struct *task, struct css_set *cset); void (*fork)(struct task_struct *task); void (*exit)(struct task_struct *task); void (*release)(struct task_struct *task); void (*bind)(struct cgroup_subsys_state *root_css); bool early_init:1; /* * If %true, the controller, on the default hierarchy, doesn't show * up in "cgroup.controllers" or "cgroup.subtree_control", is * implicitly enabled on all cgroups on the default hierarchy, and * bypasses the "no internal process" constraint. This is for * utility type controllers which is transparent to userland. * * An implicit controller can be stolen from the default hierarchy * anytime and thus must be okay with offline csses from previous * hierarchies coexisting with csses for the current one. */ bool implicit_on_dfl:1; /* * If %true, the controller, supports threaded mode on the default * hierarchy. In a threaded subtree, both process granularity and * no-internal-process constraint are ignored and a threaded * controllers should be able to handle that. * * Note that as an implicit controller is automatically enabled on * all cgroups on the default hierarchy, it should also be * threaded. implicit && !threaded is not supported. */ bool threaded:1; /* the following two fields are initialized automatically during boot */ int id; const char *name; /* optional, initialized automatically during boot if not set */ const char *legacy_name; /* link to parent, protected by cgroup_lock() */ struct cgroup_root *root; /* idr for css->id */ struct idr css_idr; /* * List of cftypes. Each entry is the first entry of an array * terminated by zero length name. */ struct list_head cfts; /* * Base cftypes which are automatically registered. The two can * point to the same array. */ struct cftype *dfl_cftypes; /* for the default hierarchy */ struct cftype *legacy_cftypes; /* for the legacy hierarchies */ /* * A subsystem may depend on other subsystems. When such subsystem * is enabled on a cgroup, the depended-upon subsystems are enabled * together if available. Subsystems enabled due to dependency are * not visible to userland until explicitly enabled. The following * specifies the mask of subsystems that this one depends on. */ unsigned int depends_on; }; extern struct percpu_rw_semaphore cgroup_threadgroup_rwsem; /** * cgroup_threadgroup_change_begin - threadgroup exclusion for cgroups * @tsk: target task * * Allows cgroup operations to synchronize against threadgroup changes * using a percpu_rw_semaphore. */ static inline void cgroup_threadgroup_change_begin(struct task_struct *tsk) { percpu_down_read(&cgroup_threadgroup_rwsem); } /** * cgroup_threadgroup_change_end - threadgroup exclusion for cgroups * @tsk: target task * * Counterpart of cgroup_threadcgroup_change_begin(). */ static inline void cgroup_threadgroup_change_end(struct task_struct *tsk) { percpu_up_read(&cgroup_threadgroup_rwsem); } #else /* CONFIG_CGROUPS */ #define CGROUP_SUBSYS_COUNT 0 static inline void cgroup_threadgroup_change_begin(struct task_struct *tsk) { might_sleep(); } static inline void cgroup_threadgroup_change_end(struct task_struct *tsk) {} #endif /* CONFIG_CGROUPS */ #ifdef CONFIG_SOCK_CGROUP_DATA /* * sock_cgroup_data is embedded at sock->sk_cgrp_data and contains * per-socket cgroup information except for memcg association. * * On legacy hierarchies, net_prio and net_cls controllers directly * set attributes on each sock which can then be tested by the network * layer. On the default hierarchy, each sock is associated with the * cgroup it was created in and the networking layer can match the * cgroup directly. */ struct sock_cgroup_data { struct cgroup *cgroup; /* v2 */ #ifdef CONFIG_CGROUP_NET_CLASSID u32 classid; /* v1 */ #endif #ifdef CONFIG_CGROUP_NET_PRIO u16 prioidx; /* v1 */ #endif }; static inline u16 sock_cgroup_prioidx(const struct sock_cgroup_data *skcd) { #ifdef CONFIG_CGROUP_NET_PRIO return READ_ONCE(skcd->prioidx); #else return 1; #endif } static inline u32 sock_cgroup_classid(const struct sock_cgroup_data *skcd) { #ifdef CONFIG_CGROUP_NET_CLASSID return READ_ONCE(skcd->classid); #else return 0; #endif } static inline void sock_cgroup_set_prioidx(struct sock_cgroup_data *skcd, u16 prioidx) { #ifdef CONFIG_CGROUP_NET_PRIO WRITE_ONCE(skcd->prioidx, prioidx); #endif } static inline void sock_cgroup_set_classid(struct sock_cgroup_data *skcd, u32 classid) { #ifdef CONFIG_CGROUP_NET_CLASSID WRITE_ONCE(skcd->classid, classid); #endif } #else /* CONFIG_SOCK_CGROUP_DATA */ struct sock_cgroup_data { }; #endif /* CONFIG_SOCK_CGROUP_DATA */ #endif /* _LINUX_CGROUP_DEFS_H */ |
14 168 168 168 14 14 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 | // SPDX-License-Identifier: GPL-2.0 /* * linux/fs/ext4/sysfs.c * * Copyright (C) 1992, 1993, 1994, 1995 * Remy Card (card@masi.ibp.fr) * Theodore Ts'o (tytso@mit.edu) * */ #include <linux/time.h> #include <linux/fs.h> #include <linux/seq_file.h> #include <linux/slab.h> #include <linux/proc_fs.h> #include <linux/part_stat.h> #include "ext4.h" #include "ext4_jbd2.h" typedef enum { attr_noop, attr_delayed_allocation_blocks, attr_session_write_kbytes, attr_lifetime_write_kbytes, attr_reserved_clusters, attr_sra_exceeded_retry_limit, attr_inode_readahead, attr_trigger_test_error, attr_first_error_time, attr_last_error_time, attr_feature, attr_pointer_ui, attr_pointer_ul, attr_pointer_u64, attr_pointer_u8, attr_pointer_string, attr_pointer_atomic, attr_journal_task, } attr_id_t; typedef enum { ptr_explicit, ptr_ext4_sb_info_offset, ptr_ext4_super_block_offset, } attr_ptr_t; static const char proc_dirname[] = "fs/ext4"; static struct proc_dir_entry *ext4_proc_root; struct ext4_attr { struct attribute attr; short attr_id; short attr_ptr; unsigned short attr_size; union { int offset; void *explicit_ptr; } u; }; static ssize_t session_write_kbytes_show(struct ext4_sb_info *sbi, char *buf) { struct super_block *sb = sbi->s_buddy_cache->i_sb; return sysfs_emit(buf, "%lu\n", (part_stat_read(sb->s_bdev, sectors[STAT_WRITE]) - sbi->s_sectors_written_start) >> 1); } static ssize_t lifetime_write_kbytes_show(struct ext4_sb_info *sbi, char *buf) { struct super_block *sb = sbi->s_buddy_cache->i_sb; return sysfs_emit(buf, "%llu\n", (unsigned long long)(sbi->s_kbytes_written + ((part_stat_read(sb->s_bdev, sectors[STAT_WRITE]) - EXT4_SB(sb)->s_sectors_written_start) >> 1))); } static ssize_t inode_readahead_blks_store(struct ext4_sb_info *sbi, const char *buf, size_t count) { unsigned long t; int ret; ret = kstrtoul(skip_spaces(buf), 0, &t); if (ret) return ret; if (t && (!is_power_of_2(t) || t > 0x40000000)) return -EINVAL; sbi->s_inode_readahead_blks = t; return count; } static ssize_t reserved_clusters_store(struct ext4_sb_info *sbi, const char *buf, size_t count) { unsigned long long val; ext4_fsblk_t clusters = (ext4_blocks_count(sbi->s_es) >> sbi->s_cluster_bits); int ret; ret = kstrtoull(skip_spaces(buf), 0, &val); if (ret || val >= clusters) return -EINVAL; atomic64_set(&sbi->s_resv_clusters, val); return count; } static ssize_t trigger_test_error(struct ext4_sb_info *sbi, const char *buf, size_t count) { int len = count; if (!capable(CAP_SYS_ADMIN)) return -EPERM; if (len && buf[len-1] == '\n') len--; if (len) ext4_error(sbi->s_sb, "%.*s", len, buf); return count; } static ssize_t journal_task_show(struct ext4_sb_info *sbi, char *buf) { if (!sbi->s_journal) return sysfs_emit(buf, "<none>\n"); return sysfs_emit(buf, "%d\n", task_pid_vnr(sbi->s_journal->j_task)); } #define EXT4_ATTR(_name,_mode,_id) \ static struct ext4_attr ext4_attr_##_name = { \ .attr = {.name = __stringify(_name), .mode = _mode }, \ .attr_id = attr_##_id, \ } #define EXT4_ATTR_FUNC(_name,_mode) EXT4_ATTR(_name,_mode,_name) #define EXT4_ATTR_FEATURE(_name) EXT4_ATTR(_name, 0444, feature) #define EXT4_ATTR_OFFSET(_name,_mode,_id,_struct,_elname) \ static struct ext4_attr ext4_attr_##_name = { \ .attr = {.name = __stringify(_name), .mode = _mode }, \ .attr_id = attr_##_id, \ .attr_ptr = ptr_##_struct##_offset, \ .u = { \ .offset = offsetof(struct _struct, _elname),\ }, \ } #define EXT4_ATTR_STRING(_name,_mode,_size,_struct,_elname) \ static struct ext4_attr ext4_attr_##_name = { \ .attr = {.name = __stringify(_name), .mode = _mode }, \ .attr_id = attr_pointer_string, \ .attr_size = _size, \ .attr_ptr = ptr_##_struct##_offset, \ .u = { \ .offset = offsetof(struct _struct, _elname),\ }, \ } #define EXT4_RO_ATTR_ES_UI(_name,_elname) \ EXT4_ATTR_OFFSET(_name, 0444, pointer_ui, ext4_super_block, _elname) #define EXT4_RO_ATTR_ES_U8(_name,_elname) \ EXT4_ATTR_OFFSET(_name, 0444, pointer_u8, ext4_super_block, _elname) #define EXT4_RO_ATTR_ES_U64(_name,_elname) \ EXT4_ATTR_OFFSET(_name, 0444, pointer_u64, ext4_super_block, _elname) #define EXT4_RO_ATTR_ES_STRING(_name,_elname,_size) \ EXT4_ATTR_STRING(_name, 0444, _size, ext4_super_block, _elname) #define EXT4_RW_ATTR_SBI_UI(_name,_elname) \ EXT4_ATTR_OFFSET(_name, 0644, pointer_ui, ext4_sb_info, _elname) #define EXT4_RW_ATTR_SBI_UL(_name,_elname) \ EXT4_ATTR_OFFSET(_name, 0644, pointer_ul, ext4_sb_info, _elname) #define EXT4_RO_ATTR_SBI_ATOMIC(_name,_elname) \ EXT4_ATTR_OFFSET(_name, 0444, pointer_atomic, ext4_sb_info, _elname) #define EXT4_ATTR_PTR(_name,_mode,_id,_ptr) \ static struct ext4_attr ext4_attr_##_name = { \ .attr = {.name = __stringify(_name), .mode = _mode }, \ .attr_id = attr_##_id, \ .attr_ptr = ptr_explicit, \ .u = { \ .explicit_ptr = _ptr, \ }, \ } #define ATTR_LIST(name) &ext4_attr_##name.attr EXT4_ATTR_FUNC(delayed_allocation_blocks, 0444); EXT4_ATTR_FUNC(session_write_kbytes, 0444); EXT4_ATTR_FUNC(lifetime_write_kbytes, 0444); EXT4_ATTR_FUNC(reserved_clusters, 0644); EXT4_ATTR_FUNC(sra_exceeded_retry_limit, 0444); EXT4_ATTR_OFFSET(inode_readahead_blks, 0644, inode_readahead, ext4_sb_info, s_inode_readahead_blks); EXT4_RW_ATTR_SBI_UI(inode_goal, s_inode_goal); EXT4_RW_ATTR_SBI_UI(mb_stats, s_mb_stats); EXT4_RW_ATTR_SBI_UI(mb_max_to_scan, s_mb_max_to_scan); EXT4_RW_ATTR_SBI_UI(mb_min_to_scan, s_mb_min_to_scan); EXT4_RW_ATTR_SBI_UI(mb_order2_req, s_mb_order2_reqs); EXT4_RW_ATTR_SBI_UI(mb_stream_req, s_mb_stream_request); EXT4_RW_ATTR_SBI_UI(mb_group_prealloc, s_mb_group_prealloc); EXT4_RW_ATTR_SBI_UI(mb_max_linear_groups, s_mb_max_linear_groups); EXT4_RW_ATTR_SBI_UI(extent_max_zeroout_kb, s_extent_max_zeroout_kb); EXT4_ATTR(trigger_fs_error, 0200, trigger_test_error); EXT4_RW_ATTR_SBI_UI(err_ratelimit_interval_ms, s_err_ratelimit_state.interval); EXT4_RW_ATTR_SBI_UI(err_ratelimit_burst, s_err_ratelimit_state.burst); EXT4_RW_ATTR_SBI_UI(warning_ratelimit_interval_ms, s_warning_ratelimit_state.interval); EXT4_RW_ATTR_SBI_UI(warning_ratelimit_burst, s_warning_ratelimit_state.burst); EXT4_RW_ATTR_SBI_UI(msg_ratelimit_interval_ms, s_msg_ratelimit_state.interval); EXT4_RW_ATTR_SBI_UI(msg_ratelimit_burst, s_msg_ratelimit_state.burst); EXT4_RW_ATTR_SBI_UI(mb_best_avail_max_trim_order, s_mb_best_avail_max_trim_order); #ifdef CONFIG_EXT4_DEBUG EXT4_RW_ATTR_SBI_UL(simulate_fail, s_simulate_fail); #endif EXT4_RO_ATTR_SBI_ATOMIC(warning_count, s_warning_count); EXT4_RO_ATTR_SBI_ATOMIC(msg_count, s_msg_count); EXT4_RO_ATTR_ES_UI(errors_count, s_error_count); EXT4_RO_ATTR_ES_U8(first_error_errcode, s_first_error_errcode); EXT4_RO_ATTR_ES_U8(last_error_errcode, s_last_error_errcode); EXT4_RO_ATTR_ES_UI(first_error_ino, s_first_error_ino); EXT4_RO_ATTR_ES_UI(last_error_ino, s_last_error_ino); EXT4_RO_ATTR_ES_U64(first_error_block, s_first_error_block); EXT4_RO_ATTR_ES_U64(last_error_block, s_last_error_block); EXT4_RO_ATTR_ES_UI(first_error_line, s_first_error_line); EXT4_RO_ATTR_ES_UI(last_error_line, s_last_error_line); EXT4_RO_ATTR_ES_STRING(first_error_func, s_first_error_func, 32); EXT4_RO_ATTR_ES_STRING(last_error_func, s_last_error_func, 32); EXT4_ATTR(first_error_time, 0444, first_error_time); EXT4_ATTR(last_error_time, 0444, last_error_time); EXT4_ATTR(journal_task, 0444, journal_task); EXT4_RW_ATTR_SBI_UI(mb_prefetch, s_mb_prefetch); EXT4_RW_ATTR_SBI_UI(mb_prefetch_limit, s_mb_prefetch_limit); EXT4_RW_ATTR_SBI_UL(last_trim_minblks, s_last_trim_minblks); static unsigned int old_bump_val = 128; EXT4_ATTR_PTR(max_writeback_mb_bump, 0444, pointer_ui, &old_bump_val); static struct attribute *ext4_attrs[] = { ATTR_LIST(delayed_allocation_blocks), ATTR_LIST(session_write_kbytes), ATTR_LIST(lifetime_write_kbytes), ATTR_LIST(reserved_clusters), ATTR_LIST(sra_exceeded_retry_limit), ATTR_LIST(inode_readahead_blks), ATTR_LIST(inode_goal), ATTR_LIST(mb_stats), ATTR_LIST(mb_max_to_scan), ATTR_LIST(mb_min_to_scan), ATTR_LIST(mb_order2_req), ATTR_LIST(mb_stream_req), ATTR_LIST(mb_group_prealloc), ATTR_LIST(mb_max_linear_groups), ATTR_LIST(max_writeback_mb_bump), ATTR_LIST(extent_max_zeroout_kb), ATTR_LIST(trigger_fs_error), ATTR_LIST(err_ratelimit_interval_ms), ATTR_LIST(err_ratelimit_burst), ATTR_LIST(warning_ratelimit_interval_ms), ATTR_LIST(warning_ratelimit_burst), ATTR_LIST(msg_ratelimit_interval_ms), ATTR_LIST(msg_ratelimit_burst), ATTR_LIST(mb_best_avail_max_trim_order), ATTR_LIST(errors_count), ATTR_LIST(warning_count), ATTR_LIST(msg_count), ATTR_LIST(first_error_ino), ATTR_LIST(last_error_ino), ATTR_LIST(first_error_block), ATTR_LIST(last_error_block), ATTR_LIST(first_error_line), ATTR_LIST(last_error_line), ATTR_LIST(first_error_func), ATTR_LIST(last_error_func), ATTR_LIST(first_error_errcode), ATTR_LIST(last_error_errcode), ATTR_LIST(first_error_time), ATTR_LIST(last_error_time), ATTR_LIST(journal_task), #ifdef CONFIG_EXT4_DEBUG ATTR_LIST(simulate_fail), #endif ATTR_LIST(mb_prefetch), ATTR_LIST(mb_prefetch_limit), ATTR_LIST(last_trim_minblks), NULL, }; ATTRIBUTE_GROUPS(ext4); /* Features this copy of ext4 supports */ EXT4_ATTR_FEATURE(lazy_itable_init); EXT4_ATTR_FEATURE(batched_discard); EXT4_ATTR_FEATURE(meta_bg_resize); #ifdef CONFIG_FS_ENCRYPTION EXT4_ATTR_FEATURE(encryption); EXT4_ATTR_FEATURE(test_dummy_encryption_v2); #endif #if IS_ENABLED(CONFIG_UNICODE) EXT4_ATTR_FEATURE(casefold); #endif #ifdef CONFIG_FS_VERITY EXT4_ATTR_FEATURE(verity); #endif EXT4_ATTR_FEATURE(metadata_csum_seed); EXT4_ATTR_FEATURE(fast_commit); #if IS_ENABLED(CONFIG_UNICODE) && defined(CONFIG_FS_ENCRYPTION) EXT4_ATTR_FEATURE(encrypted_casefold); #endif static struct attribute *ext4_feat_attrs[] = { ATTR_LIST(lazy_itable_init), ATTR_LIST(batched_discard), ATTR_LIST(meta_bg_resize), #ifdef CONFIG_FS_ENCRYPTION ATTR_LIST(encryption), ATTR_LIST(test_dummy_encryption_v2), #endif #if IS_ENABLED(CONFIG_UNICODE) ATTR_LIST(casefold), #endif #ifdef CONFIG_FS_VERITY ATTR_LIST(verity), #endif ATTR_LIST(metadata_csum_seed), ATTR_LIST(fast_commit), #if IS_ENABLED(CONFIG_UNICODE) && defined(CONFIG_FS_ENCRYPTION) ATTR_LIST(encrypted_casefold), #endif NULL, }; ATTRIBUTE_GROUPS(ext4_feat); static void *calc_ptr(struct ext4_attr *a, struct ext4_sb_info *sbi) { switch (a->attr_ptr) { case ptr_explicit: return a->u.explicit_ptr; case ptr_ext4_sb_info_offset: return (void *) (((char *) sbi) + a->u.offset); case ptr_ext4_super_block_offset: return (void *) (((char *) sbi->s_es) + a->u.offset); } return NULL; } static ssize_t __print_tstamp(char *buf, __le32 lo, __u8 hi) { return sysfs_emit(buf, "%lld\n", ((time64_t)hi << 32) + le32_to_cpu(lo)); } #define print_tstamp(buf, es, tstamp) \ __print_tstamp(buf, (es)->tstamp, (es)->tstamp ## _hi) static ssize_t ext4_attr_show(struct kobject *kobj, struct attribute *attr, char *buf) { struct ext4_sb_info *sbi = container_of(kobj, struct ext4_sb_info, s_kobj); struct ext4_attr *a = container_of(attr, struct ext4_attr, attr); void *ptr = calc_ptr(a, sbi); switch (a->attr_id) { case attr_delayed_allocation_blocks: return sysfs_emit(buf, "%llu\n", (s64) EXT4_C2B(sbi, percpu_counter_sum(&sbi->s_dirtyclusters_counter))); case attr_session_write_kbytes: return session_write_kbytes_show(sbi, buf); case attr_lifetime_write_kbytes: return lifetime_write_kbytes_show(sbi, buf); case attr_reserved_clusters: return sysfs_emit(buf, "%llu\n", (unsigned long long) atomic64_read(&sbi->s_resv_clusters)); case attr_sra_exceeded_retry_limit: return sysfs_emit(buf, "%llu\n", (unsigned long long) percpu_counter_sum(&sbi->s_sra_exceeded_retry_limit)); case attr_inode_readahead: case attr_pointer_ui: if (!ptr) return 0; if (a->attr_ptr == ptr_ext4_super_block_offset) return sysfs_emit(buf, "%u\n", le32_to_cpup(ptr)); else return sysfs_emit(buf, "%u\n", *((unsigned int *) ptr)); case attr_pointer_ul: if (!ptr) return 0; return sysfs_emit(buf, "%lu\n", *((unsigned long *) ptr)); case attr_pointer_u8: if (!ptr) return 0; return sysfs_emit(buf, "%u\n", *((unsigned char *) ptr)); case attr_pointer_u64: if (!ptr) return 0; if (a->attr_ptr == ptr_ext4_super_block_offset) return sysfs_emit(buf, "%llu\n", le64_to_cpup(ptr)); else return sysfs_emit(buf, "%llu\n", *((unsigned long long *) ptr)); case attr_pointer_string: if (!ptr) return 0; return sysfs_emit(buf, "%.*s\n", a->attr_size, (char *) ptr); case attr_pointer_atomic: if (!ptr) return 0; return sysfs_emit(buf, "%d\n", atomic_read((atomic_t *) ptr)); case attr_feature: return sysfs_emit(buf, "supported\n"); case attr_first_error_time: return print_tstamp(buf, sbi->s_es, s_first_error_time); case attr_last_error_time: return print_tstamp(buf, sbi->s_es, s_last_error_time); case attr_journal_task: return journal_task_show(sbi, buf); } return 0; } static ssize_t ext4_attr_store(struct kobject *kobj, struct attribute *attr, const char *buf, size_t len) { struct ext4_sb_info *sbi = container_of(kobj, struct ext4_sb_info, s_kobj); struct ext4_attr *a = container_of(attr, struct ext4_attr, attr); void *ptr = calc_ptr(a, sbi); unsigned long t; int ret; switch (a->attr_id) { case attr_reserved_clusters: return reserved_clusters_store(sbi, buf, len); case attr_pointer_ui: if (!ptr) return 0; ret = kstrtoul(skip_spaces(buf), 0, &t); if (ret) return ret; if (a->attr_ptr == ptr_ext4_super_block_offset) *((__le32 *) ptr) = cpu_to_le32(t); else *((unsigned int *) ptr) = t; return len; case attr_pointer_ul: if (!ptr) return 0; ret = kstrtoul(skip_spaces(buf), 0, &t); if (ret) return ret; *((unsigned long *) ptr) = t; return len; case attr_inode_readahead: return inode_readahead_blks_store(sbi, buf, len); case attr_trigger_test_error: return trigger_test_error(sbi, buf, len); } return 0; } static void ext4_sb_release(struct kobject *kobj) { struct ext4_sb_info *sbi = container_of(kobj, struct ext4_sb_info, s_kobj); complete(&sbi->s_kobj_unregister); } static void ext4_feat_release(struct kobject *kobj) { kfree(kobj); } static const struct sysfs_ops ext4_attr_ops = { .show = ext4_attr_show, .store = ext4_attr_store, }; static const struct kobj_type ext4_sb_ktype = { .default_groups = ext4_groups, .sysfs_ops = &ext4_attr_ops, .release = ext4_sb_release, }; static const struct kobj_type ext4_feat_ktype = { .default_groups = ext4_feat_groups, .sysfs_ops = &ext4_attr_ops, .release = ext4_feat_release, }; void ext4_notify_error_sysfs(struct ext4_sb_info *sbi) { sysfs_notify(&sbi->s_kobj, NULL, "errors_count"); } static struct kobject *ext4_root; static struct kobject *ext4_feat; int ext4_register_sysfs(struct super_block *sb) { struct ext4_sb_info *sbi = EXT4_SB(sb); int err; init_completion(&sbi->s_kobj_unregister); err = kobject_init_and_add(&sbi->s_kobj, &ext4_sb_ktype, ext4_root, "%s", sb->s_id); if (err) { kobject_put(&sbi->s_kobj); wait_for_completion(&sbi->s_kobj_unregister); return err; } if (ext4_proc_root) sbi->s_proc = proc_mkdir(sb->s_id, ext4_proc_root); if (sbi->s_proc) { proc_create_single_data("options", S_IRUGO, sbi->s_proc, ext4_seq_options_show, sb); proc_create_single_data("es_shrinker_info", S_IRUGO, sbi->s_proc, ext4_seq_es_shrinker_info_show, sb); proc_create_single_data("fc_info", 0444, sbi->s_proc, ext4_fc_info_show, sb); proc_create_seq_data("mb_groups", S_IRUGO, sbi->s_proc, &ext4_mb_seq_groups_ops, sb); proc_create_single_data("mb_stats", 0444, sbi->s_proc, ext4_seq_mb_stats_show, sb); proc_create_seq_data("mb_structs_summary", 0444, sbi->s_proc, &ext4_mb_seq_structs_summary_ops, sb); } return 0; } void ext4_unregister_sysfs(struct super_block *sb) { struct ext4_sb_info *sbi = EXT4_SB(sb); if (sbi->s_proc) remove_proc_subtree(sb->s_id, ext4_proc_root); kobject_del(&sbi->s_kobj); } int __init ext4_init_sysfs(void) { int ret; ext4_root = kobject_create_and_add("ext4", fs_kobj); if (!ext4_root) return -ENOMEM; ext4_feat = kzalloc(sizeof(*ext4_feat), GFP_KERNEL); if (!ext4_feat) { ret = -ENOMEM; goto root_err; } ret = kobject_init_and_add(ext4_feat, &ext4_feat_ktype, ext4_root, "features"); if (ret) goto feat_err; ext4_proc_root = proc_mkdir(proc_dirname, NULL); return ret; feat_err: kobject_put(ext4_feat); ext4_feat = NULL; root_err: kobject_put(ext4_root); ext4_root = NULL; return ret; } void ext4_exit_sysfs(void) { kobject_put(ext4_feat); ext4_feat = NULL; kobject_put(ext4_root); ext4_root = NULL; remove_proc_entry(proc_dirname, NULL); ext4_proc_root = NULL; } |
454 454 3 7 125 124 63 120 4 10 22 30 429 454 454 454 2 9 3 14 17 475 474 16 476 474 475 492 491 181 476 475 475 401 491 30 31 29 28 25 23 23 22 21 20 19 18 17 1 16 15 12 1 12 1 7 5 1 117 100 16 1 3 99 13 2 7 12 95 104 3 100 1 6 99 3 5 96 4 7 102 5 100 7 101 6 106 102 4 99 7 1 3 1 101 1 101 2 9 4 92 86 11 82 15 7 72 17 7 17 11 8 10 1 76 1 2 1 19 36 17 5 5 5 44 5 48 1 48 1 40 12 12 3 45 3 44 5 44 4 48 49 1 1 1 15 31 5 5 5 4 1 1 5 5 4 48 61 58 3 57 3 1 57 2 2 16 61 56 5 61 6 58 3 57 4 58 3 58 3 58 3 54 8 57 56 5 61 10 10 10 1 9 10 10 1 4 3 7 1 20 11 9 9 1 2 9 9 4 8 9 52 52 52 1257 1256 1257 546 546 546 1999 1998 1983 1238 1238 23 23 23 524 524 459 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 | // SPDX-License-Identifier: GPL-2.0-only /* * net/core/fib_rules.c Generic Routing Rules * * Authors: Thomas Graf <tgraf@suug.ch> */ #include <linux/types.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/list.h> #include <linux/module.h> #include <net/net_namespace.h> #include <net/sock.h> #include <net/fib_rules.h> #include <net/ip_tunnels.h> #include <linux/indirect_call_wrapper.h> #if defined(CONFIG_IPV6) && defined(CONFIG_IPV6_MULTIPLE_TABLES) #ifdef CONFIG_IP_MULTIPLE_TABLES #define INDIRECT_CALL_MT(f, f2, f1, ...) \ INDIRECT_CALL_INET(f, f2, f1, __VA_ARGS__) #else #define INDIRECT_CALL_MT(f, f2, f1, ...) INDIRECT_CALL_1(f, f2, __VA_ARGS__) #endif #elif defined(CONFIG_IP_MULTIPLE_TABLES) #define INDIRECT_CALL_MT(f, f2, f1, ...) INDIRECT_CALL_1(f, f1, __VA_ARGS__) #else #define INDIRECT_CALL_MT(f, f2, f1, ...) f(__VA_ARGS__) #endif static const struct fib_kuid_range fib_kuid_range_unset = { KUIDT_INIT(0), KUIDT_INIT(~0), }; bool fib_rule_matchall(const struct fib_rule *rule) { if (rule->iifindex || rule->oifindex || rule->mark || rule->tun_id || rule->flags) return false; if (rule->suppress_ifgroup != -1 || rule->suppress_prefixlen != -1) return false; if (!uid_eq(rule->uid_range.start, fib_kuid_range_unset.start) || !uid_eq(rule->uid_range.end, fib_kuid_range_unset.end)) return false; if (fib_rule_port_range_set(&rule->sport_range)) return false; if (fib_rule_port_range_set(&rule->dport_range)) return false; return true; } EXPORT_SYMBOL_GPL(fib_rule_matchall); int fib_default_rule_add(struct fib_rules_ops *ops, u32 pref, u32 table) { struct fib_rule *r; r = kzalloc(ops->rule_size, GFP_KERNEL_ACCOUNT); if (r == NULL) return -ENOMEM; refcount_set(&r->refcnt, 1); r->action = FR_ACT_TO_TBL; r->pref = pref; r->table = table; r->proto = RTPROT_KERNEL; r->fr_net = ops->fro_net; r->uid_range = fib_kuid_range_unset; r->suppress_prefixlen = -1; r->suppress_ifgroup = -1; /* The lock is not required here, the list in unreacheable * at the moment this function is called */ list_add_tail(&r->list, &ops->rules_list); return 0; } EXPORT_SYMBOL(fib_default_rule_add); static u32 fib_default_rule_pref(struct fib_rules_ops *ops) { struct list_head *pos; struct fib_rule *rule; if (!list_empty(&ops->rules_list)) { pos = ops->rules_list.next; if (pos->next != &ops->rules_list) { rule = list_entry(pos->next, struct fib_rule, list); if (rule->pref) return rule->pref - 1; } } return 0; } static void notify_rule_change(int event, struct fib_rule *rule, struct fib_rules_ops *ops, struct nlmsghdr *nlh, u32 pid); static struct fib_rules_ops *lookup_rules_ops(struct net *net, int family) { struct fib_rules_ops *ops; rcu_read_lock(); list_for_each_entry_rcu(ops, &net->rules_ops, list) { if (ops->family == family) { if (!try_module_get(ops->owner)) ops = NULL; rcu_read_unlock(); return ops; } } rcu_read_unlock(); return NULL; } static void rules_ops_put(struct fib_rules_ops *ops) { if (ops) module_put(ops->owner); } static void flush_route_cache(struct fib_rules_ops *ops) { if (ops->flush_cache) ops->flush_cache(ops); } static int __fib_rules_register(struct fib_rules_ops *ops) { int err = -EEXIST; struct fib_rules_ops *o; struct net *net; net = ops->fro_net; if (ops->rule_size < sizeof(struct fib_rule)) return -EINVAL; if (ops->match == NULL || ops->configure == NULL || ops->compare == NULL || ops->fill == NULL || ops->action == NULL) return -EINVAL; spin_lock(&net->rules_mod_lock); list_for_each_entry(o, &net->rules_ops, list) if (ops->family == o->family) goto errout; list_add_tail_rcu(&ops->list, &net->rules_ops); err = 0; errout: spin_unlock(&net->rules_mod_lock); return err; } struct fib_rules_ops * fib_rules_register(const struct fib_rules_ops *tmpl, struct net *net) { struct fib_rules_ops *ops; int err; ops = kmemdup(tmpl, sizeof(*ops), GFP_KERNEL); if (ops == NULL) return ERR_PTR(-ENOMEM); INIT_LIST_HEAD(&ops->rules_list); ops->fro_net = net; err = __fib_rules_register(ops); if (err) { kfree(ops); ops = ERR_PTR(err); } return ops; } EXPORT_SYMBOL_GPL(fib_rules_register); static void fib_rules_cleanup_ops(struct fib_rules_ops *ops) { struct fib_rule *rule, *tmp; list_for_each_entry_safe(rule, tmp, &ops->rules_list, list) { list_del_rcu(&rule->list); if (ops->delete) ops->delete(rule); fib_rule_put(rule); } } void fib_rules_unregister(struct fib_rules_ops *ops) { struct net *net = ops->fro_net; spin_lock(&net->rules_mod_lock); list_del_rcu(&ops->list); spin_unlock(&net->rules_mod_lock); fib_rules_cleanup_ops(ops); kfree_rcu(ops, rcu); } EXPORT_SYMBOL_GPL(fib_rules_unregister); static int uid_range_set(struct fib_kuid_range *range) { return uid_valid(range->start) && uid_valid(range->end); } static struct fib_kuid_range nla_get_kuid_range(struct nlattr **tb) { struct fib_rule_uid_range *in; struct fib_kuid_range out; in = (struct fib_rule_uid_range *)nla_data(tb[FRA_UID_RANGE]); out.start = make_kuid(current_user_ns(), in->start); out.end = make_kuid(current_user_ns(), in->end); return out; } static int nla_put_uid_range(struct sk_buff *skb, struct fib_kuid_range *range) { struct fib_rule_uid_range out = { from_kuid_munged(current_user_ns(), range->start), from_kuid_munged(current_user_ns(), range->end) }; return nla_put(skb, FRA_UID_RANGE, sizeof(out), &out); } static int nla_get_port_range(struct nlattr *pattr, struct fib_rule_port_range *port_range) { const struct fib_rule_port_range *pr = nla_data(pattr); if (!fib_rule_port_range_valid(pr)) return -EINVAL; port_range->start = pr->start; port_range->end = pr->end; return 0; } static int nla_put_port_range(struct sk_buff *skb, int attrtype, struct fib_rule_port_range *range) { return nla_put(skb, attrtype, sizeof(*range), range); } static int fib_rule_match(struct fib_rule *rule, struct fib_rules_ops *ops, struct flowi *fl, int flags, struct fib_lookup_arg *arg) { int ret = 0; if (rule->iifindex && (rule->iifindex != fl->flowi_iif)) goto out; if (rule->oifindex && (rule->oifindex != fl->flowi_oif)) goto out; if ((rule->mark ^ fl->flowi_mark) & rule->mark_mask) goto out; if (rule->tun_id && (rule->tun_id != fl->flowi_tun_key.tun_id)) goto out; if (rule->l3mdev && !l3mdev_fib_rule_match(rule->fr_net, fl, arg)) goto out; if (uid_lt(fl->flowi_uid, rule->uid_range.start) || uid_gt(fl->flowi_uid, rule->uid_range.end)) goto out; ret = INDIRECT_CALL_MT(ops->match, fib6_rule_match, fib4_rule_match, rule, fl, flags); out: return (rule->flags & FIB_RULE_INVERT) ? !ret : ret; } int fib_rules_lookup(struct fib_rules_ops *ops, struct flowi *fl, int flags, struct fib_lookup_arg *arg) { struct fib_rule *rule; int err; rcu_read_lock(); list_for_each_entry_rcu(rule, &ops->rules_list, list) { jumped: if (!fib_rule_match(rule, ops, fl, flags, arg)) continue; if (rule->action == FR_ACT_GOTO) { struct fib_rule *target; target = rcu_dereference(rule->ctarget); if (target == NULL) { continue; } else { rule = target; goto jumped; } } else if (rule->action == FR_ACT_NOP) continue; else err = INDIRECT_CALL_MT(ops->action, fib6_rule_action, fib4_rule_action, rule, fl, flags, arg); if (!err && ops->suppress && INDIRECT_CALL_MT(ops->suppress, fib6_rule_suppress, fib4_rule_suppress, rule, flags, arg)) continue; if (err != -EAGAIN) { if ((arg->flags & FIB_LOOKUP_NOREF) || likely(refcount_inc_not_zero(&rule->refcnt))) { arg->rule = rule; goto out; } break; } } err = -ESRCH; out: rcu_read_unlock(); return err; } EXPORT_SYMBOL_GPL(fib_rules_lookup); static int call_fib_rule_notifier(struct notifier_block *nb, enum fib_event_type event_type, struct fib_rule *rule, int family, struct netlink_ext_ack *extack) { struct fib_rule_notifier_info info = { .info.family = family, .info.extack = extack, .rule = rule, }; return call_fib_notifier(nb, event_type, &info.info); } static int call_fib_rule_notifiers(struct net *net, enum fib_event_type event_type, struct fib_rule *rule, struct fib_rules_ops *ops, struct netlink_ext_ack *extack) { struct fib_rule_notifier_info info = { .info.family = ops->family, .info.extack = extack, .rule = rule, }; ops->fib_rules_seq++; return call_fib_notifiers(net, event_type, &info.info); } /* Called with rcu_read_lock() */ int fib_rules_dump(struct net *net, struct notifier_block *nb, int family, struct netlink_ext_ack *extack) { struct fib_rules_ops *ops; struct fib_rule *rule; int err = 0; ops = lookup_rules_ops(net, family); if (!ops) return -EAFNOSUPPORT; list_for_each_entry_rcu(rule, &ops->rules_list, list) { err = call_fib_rule_notifier(nb, FIB_EVENT_RULE_ADD, rule, family, extack); if (err) break; } rules_ops_put(ops); return err; } EXPORT_SYMBOL_GPL(fib_rules_dump); unsigned int fib_rules_seq_read(struct net *net, int family) { unsigned int fib_rules_seq; struct fib_rules_ops *ops; ASSERT_RTNL(); ops = lookup_rules_ops(net, family); if (!ops) return 0; fib_rules_seq = ops->fib_rules_seq; rules_ops_put(ops); return fib_rules_seq; } EXPORT_SYMBOL_GPL(fib_rules_seq_read); static struct fib_rule *rule_find(struct fib_rules_ops *ops, struct fib_rule_hdr *frh, struct nlattr **tb, struct fib_rule *rule, bool user_priority) { struct fib_rule *r; list_for_each_entry(r, &ops->rules_list, list) { if (rule->action && r->action != rule->action) continue; if (rule->table && r->table != rule->table) continue; if (user_priority && r->pref != rule->pref) continue; if (rule->iifname[0] && memcmp(r->iifname, rule->iifname, IFNAMSIZ)) continue; if (rule->oifname[0] && memcmp(r->oifname, rule->oifname, IFNAMSIZ)) continue; if (rule->mark && r->mark != rule->mark) continue; if (rule->suppress_ifgroup != -1 && r->suppress_ifgroup != rule->suppress_ifgroup) continue; if (rule->suppress_prefixlen != -1 && r->suppress_prefixlen != rule->suppress_prefixlen) continue; if (rule->mark_mask && r->mark_mask != rule->mark_mask) continue; if (rule->tun_id && r->tun_id != rule->tun_id) continue; if (r->fr_net != rule->fr_net) continue; if (rule->l3mdev && r->l3mdev != rule->l3mdev) continue; if (uid_range_set(&rule->uid_range) && (!uid_eq(r->uid_range.start, rule->uid_range.start) || !uid_eq(r->uid_range.end, rule->uid_range.end))) continue; if (rule->ip_proto && r->ip_proto != rule->ip_proto) continue; if (rule->proto && r->proto != rule->proto) continue; if (fib_rule_port_range_set(&rule->sport_range) && !fib_rule_port_range_compare(&r->sport_range, &rule->sport_range)) continue; if (fib_rule_port_range_set(&rule->dport_range) && !fib_rule_port_range_compare(&r->dport_range, &rule->dport_range)) continue; if (!ops->compare(r, frh, tb)) continue; return r; } return NULL; } #ifdef CONFIG_NET_L3_MASTER_DEV static int fib_nl2rule_l3mdev(struct nlattr *nla, struct fib_rule *nlrule, struct netlink_ext_ack *extack) { nlrule->l3mdev = nla_get_u8(nla); if (nlrule->l3mdev != 1) { NL_SET_ERR_MSG(extack, "Invalid l3mdev attribute"); return -1; } return 0; } #else static int fib_nl2rule_l3mdev(struct nlattr *nla, struct fib_rule *nlrule, struct netlink_ext_ack *extack) { NL_SET_ERR_MSG(extack, "l3mdev support is not enabled in kernel"); return -1; } #endif static int fib_nl2rule(struct sk_buff *skb, struct nlmsghdr *nlh, struct netlink_ext_ack *extack, struct fib_rules_ops *ops, struct nlattr *tb[], struct fib_rule **rule, bool *user_priority) { struct net *net = sock_net(skb->sk); struct fib_rule_hdr *frh = nlmsg_data(nlh); struct fib_rule *nlrule = NULL; int err = -EINVAL; if (frh->src_len) if (!tb[FRA_SRC] || frh->src_len > (ops->addr_size * 8) || nla_len(tb[FRA_SRC]) != ops->addr_size) { NL_SET_ERR_MSG(extack, "Invalid source address"); goto errout; } if (frh->dst_len) if (!tb[FRA_DST] || frh->dst_len > (ops->addr_size * 8) || nla_len(tb[FRA_DST]) != ops->addr_size) { NL_SET_ERR_MSG(extack, "Invalid dst address"); goto errout; } nlrule = kzalloc(ops->rule_size, GFP_KERNEL_ACCOUNT); if (!nlrule) { err = -ENOMEM; goto errout; } refcount_set(&nlrule->refcnt, 1); nlrule->fr_net = net; if (tb[FRA_PRIORITY]) { nlrule->pref = nla_get_u32(tb[FRA_PRIORITY]); *user_priority = true; } else { nlrule->pref = fib_default_rule_pref(ops); } nlrule->proto = tb[FRA_PROTOCOL] ? nla_get_u8(tb[FRA_PROTOCOL]) : RTPROT_UNSPEC; if (tb[FRA_IIFNAME]) { struct net_device *dev; nlrule->iifindex = -1; nla_strscpy(nlrule->iifname, tb[FRA_IIFNAME], IFNAMSIZ); dev = __dev_get_by_name(net, nlrule->iifname); if (dev) nlrule->iifindex = dev->ifindex; } if (tb[FRA_OIFNAME]) { struct net_device *dev; nlrule->oifindex = -1; nla_strscpy(nlrule->oifname, tb[FRA_OIFNAME], IFNAMSIZ); dev = __dev_get_by_name(net, nlrule->oifname); if (dev) nlrule->oifindex = dev->ifindex; } if (tb[FRA_FWMARK]) { nlrule->mark = nla_get_u32(tb[FRA_FWMARK]); if (nlrule->mark) /* compatibility: if the mark value is non-zero all bits * are compared unless a mask is explicitly specified. */ nlrule->mark_mask = 0xFFFFFFFF; } if (tb[FRA_FWMASK]) nlrule->mark_mask = nla_get_u32(tb[FRA_FWMASK]); if (tb[FRA_TUN_ID]) nlrule->tun_id = nla_get_be64(tb[FRA_TUN_ID]); if (tb[FRA_L3MDEV] && fib_nl2rule_l3mdev(tb[FRA_L3MDEV], nlrule, extack) < 0) goto errout_free; nlrule->action = frh->action; nlrule->flags = frh->flags; nlrule->table = frh_get_table(frh, tb); if (tb[FRA_SUPPRESS_PREFIXLEN]) nlrule->suppress_prefixlen = nla_get_u32(tb[FRA_SUPPRESS_PREFIXLEN]); else nlrule->suppress_prefixlen = -1; if (tb[FRA_SUPPRESS_IFGROUP]) nlrule->suppress_ifgroup = nla_get_u32(tb[FRA_SUPPRESS_IFGROUP]); else nlrule->suppress_ifgroup = -1; if (tb[FRA_GOTO]) { if (nlrule->action != FR_ACT_GOTO) { NL_SET_ERR_MSG(extack, "Unexpected goto"); goto errout_free; } nlrule->target = nla_get_u32(tb[FRA_GOTO]); /* Backward jumps are prohibited to avoid endless loops */ if (nlrule->target <= nlrule->pref) { NL_SET_ERR_MSG(extack, "Backward goto not supported"); goto errout_free; } } else if (nlrule->action == FR_ACT_GOTO) { NL_SET_ERR_MSG(extack, "Missing goto target for action goto"); goto errout_free; } if (nlrule->l3mdev && nlrule->table) { NL_SET_ERR_MSG(extack, "l3mdev and table are mutually exclusive"); goto errout_free; } if (tb[FRA_UID_RANGE]) { if (current_user_ns() != net->user_ns) { err = -EPERM; NL_SET_ERR_MSG(extack, "No permission to set uid"); goto errout_free; } nlrule->uid_range = nla_get_kuid_range(tb); if (!uid_range_set(&nlrule->uid_range) || !uid_lte(nlrule->uid_range.start, nlrule->uid_range.end)) { NL_SET_ERR_MSG(extack, "Invalid uid range"); goto errout_free; } } else { nlrule->uid_range = fib_kuid_range_unset; } if (tb[FRA_IP_PROTO]) nlrule->ip_proto = nla_get_u8(tb[FRA_IP_PROTO]); if (tb[FRA_SPORT_RANGE]) { err = nla_get_port_range(tb[FRA_SPORT_RANGE], &nlrule->sport_range); if (err) { NL_SET_ERR_MSG(extack, "Invalid sport range"); goto errout_free; } } if (tb[FRA_DPORT_RANGE]) { err = nla_get_port_range(tb[FRA_DPORT_RANGE], &nlrule->dport_range); if (err) { NL_SET_ERR_MSG(extack, "Invalid dport range"); goto errout_free; } } *rule = nlrule; return 0; errout_free: kfree(nlrule); errout: return err; } static int rule_exists(struct fib_rules_ops *ops, struct fib_rule_hdr *frh, struct nlattr **tb, struct fib_rule *rule) { struct fib_rule *r; list_for_each_entry(r, &ops->rules_list, list) { if (r->action != rule->action) continue; if (r->table != rule->table) continue; if (r->pref != rule->pref) continue; if (memcmp(r->iifname, rule->iifname, IFNAMSIZ)) continue; if (memcmp(r->oifname, rule->oifname, IFNAMSIZ)) continue; if (r->mark != rule->mark) continue; if (r->suppress_ifgroup != rule->suppress_ifgroup) continue; if (r->suppress_prefixlen != rule->suppress_prefixlen) continue; if (r->mark_mask != rule->mark_mask) continue; if (r->tun_id != rule->tun_id) continue; if (r->fr_net != rule->fr_net) continue; if (r->l3mdev != rule->l3mdev) continue; if (!uid_eq(r->uid_range.start, rule->uid_range.start) || !uid_eq(r->uid_range.end, rule->uid_range.end)) continue; if (r->ip_proto != rule->ip_proto) continue; if (r->proto != rule->proto) continue; if (!fib_rule_port_range_compare(&r->sport_range, &rule->sport_range)) continue; if (!fib_rule_port_range_compare(&r->dport_range, &rule->dport_range)) continue; if (!ops->compare(r, frh, tb)) continue; return 1; } return 0; } static const struct nla_policy fib_rule_policy[FRA_MAX + 1] = { [FRA_UNSPEC] = { .strict_start_type = FRA_DPORT_RANGE + 1 }, [FRA_IIFNAME] = { .type = NLA_STRING, .len = IFNAMSIZ - 1 }, [FRA_OIFNAME] = { .type = NLA_STRING, .len = IFNAMSIZ - 1 }, [FRA_PRIORITY] = { .type = NLA_U32 }, [FRA_FWMARK] = { .type = NLA_U32 }, [FRA_FLOW] = { .type = NLA_U32 }, [FRA_TUN_ID] = { .type = NLA_U64 }, [FRA_FWMASK] = { .type = NLA_U32 }, [FRA_TABLE] = { .type = NLA_U32 }, [FRA_SUPPRESS_PREFIXLEN] = { .type = NLA_U32 }, [FRA_SUPPRESS_IFGROUP] = { .type = NLA_U32 }, [FRA_GOTO] = { .type = NLA_U32 }, [FRA_L3MDEV] = { .type = NLA_U8 }, [FRA_UID_RANGE] = { .len = sizeof(struct fib_rule_uid_range) }, [FRA_PROTOCOL] = { .type = NLA_U8 }, [FRA_IP_PROTO] = { .type = NLA_U8 }, [FRA_SPORT_RANGE] = { .len = sizeof(struct fib_rule_port_range) }, [FRA_DPORT_RANGE] = { .len = sizeof(struct fib_rule_port_range) } }; int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr *nlh, struct netlink_ext_ack *extack) { struct net *net = sock_net(skb->sk); struct fib_rule_hdr *frh = nlmsg_data(nlh); struct fib_rules_ops *ops = NULL; struct fib_rule *rule = NULL, *r, *last = NULL; struct nlattr *tb[FRA_MAX + 1]; int err = -EINVAL, unresolved = 0; bool user_priority = false; if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*frh))) { NL_SET_ERR_MSG(extack, "Invalid msg length"); goto errout; } ops = lookup_rules_ops(net, frh->family); if (!ops) { err = -EAFNOSUPPORT; NL_SET_ERR_MSG(extack, "Rule family not supported"); goto errout; } err = nlmsg_parse_deprecated(nlh, sizeof(*frh), tb, FRA_MAX, fib_rule_policy, extack); if (err < 0) { NL_SET_ERR_MSG(extack, "Error parsing msg"); goto errout; } err = fib_nl2rule(skb, nlh, extack, ops, tb, &rule, &user_priority); if (err) goto errout; if ((nlh->nlmsg_flags & NLM_F_EXCL) && rule_exists(ops, frh, tb, rule)) { err = -EEXIST; goto errout_free; } err = ops->configure(rule, skb, frh, tb, extack); if (err < 0) goto errout_free; err = call_fib_rule_notifiers(net, FIB_EVENT_RULE_ADD, rule, ops, extack); if (err < 0) goto errout_free; list_for_each_entry(r, &ops->rules_list, list) { if (r->pref == rule->target) { RCU_INIT_POINTER(rule->ctarget, r); break; } } if (rcu_dereference_protected(rule->ctarget, 1) == NULL) unresolved = 1; list_for_each_entry(r, &ops->rules_list, list) { if (r->pref > rule->pref) break; last = r; } if (last) list_add_rcu(&rule->list, &last->list); else list_add_rcu(&rule->list, &ops->rules_list); if (ops->unresolved_rules) { /* * There are unresolved goto rules in the list, check if * any of them are pointing to this new rule. */ list_for_each_entry(r, &ops->rules_list, list) { if (r->action == FR_ACT_GOTO && r->target == rule->pref && rtnl_dereference(r->ctarget) == NULL) { rcu_assign_pointer(r->ctarget, rule); if (--ops->unresolved_rules == 0) break; } } } if (rule->action == FR_ACT_GOTO) ops->nr_goto_rules++; if (unresolved) ops->unresolved_rules++; if (rule->tun_id) ip_tunnel_need_metadata(); notify_rule_change(RTM_NEWRULE, rule, ops, nlh, NETLINK_CB(skb).portid); flush_route_cache(ops); rules_ops_put(ops); return 0; errout_free: kfree(rule); errout: rules_ops_put(ops); return err; } EXPORT_SYMBOL_GPL(fib_nl_newrule); int fib_nl_delrule(struct sk_buff *skb, struct nlmsghdr *nlh, struct netlink_ext_ack *extack) { struct net *net = sock_net(skb->sk); struct fib_rule_hdr *frh = nlmsg_data(nlh); struct fib_rules_ops *ops = NULL; struct fib_rule *rule = NULL, *r, *nlrule = NULL; struct nlattr *tb[FRA_MAX+1]; int err = -EINVAL; bool user_priority = false; if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*frh))) { NL_SET_ERR_MSG(extack, "Invalid msg length"); goto errout; } ops = lookup_rules_ops(net, frh->family); if (ops == NULL) { err = -EAFNOSUPPORT; NL_SET_ERR_MSG(extack, "Rule family not supported"); goto errout; } err = nlmsg_parse_deprecated(nlh, sizeof(*frh), tb, FRA_MAX, fib_rule_policy, extack); if (err < 0) { NL_SET_ERR_MSG(extack, "Error parsing msg"); goto errout; } err = fib_nl2rule(skb, nlh, extack, ops, tb, &nlrule, &user_priority); if (err) goto errout; rule = rule_find(ops, frh, tb, nlrule, user_priority); if (!rule) { err = -ENOENT; goto errout; } if (rule->flags & FIB_RULE_PERMANENT) { err = -EPERM; goto errout; } if (ops->delete) { err = ops->delete(rule); if (err) goto errout; } if (rule->tun_id) ip_tunnel_unneed_metadata(); list_del_rcu(&rule->list); if (rule->action == FR_ACT_GOTO) { ops->nr_goto_rules--; if (rtnl_dereference(rule->ctarget) == NULL) ops->unresolved_rules--; } /* * Check if this rule is a target to any of them. If so, * adjust to the next one with the same preference or * disable them. As this operation is eventually very * expensive, it is only performed if goto rules, except * current if it is goto rule, have actually been added. */ if (ops->nr_goto_rules > 0) { struct fib_rule *n; n = list_next_entry(rule, list); if (&n->list == &ops->rules_list || n->pref != rule->pref) n = NULL; list_for_each_entry(r, &ops->rules_list, list) { if (rtnl_dereference(r->ctarget) != rule) continue; rcu_assign_pointer(r->ctarget, n); if (!n) ops->unresolved_rules++; } } call_fib_rule_notifiers(net, FIB_EVENT_RULE_DEL, rule, ops, NULL); notify_rule_change(RTM_DELRULE, rule, ops, nlh, NETLINK_CB(skb).portid); fib_rule_put(rule); flush_route_cache(ops); rules_ops_put(ops); kfree(nlrule); return 0; errout: kfree(nlrule); rules_ops_put(ops); return err; } EXPORT_SYMBOL_GPL(fib_nl_delrule); static inline size_t fib_rule_nlmsg_size(struct fib_rules_ops *ops, struct fib_rule *rule) { size_t payload = NLMSG_ALIGN(sizeof(struct fib_rule_hdr)) + nla_total_size(IFNAMSIZ) /* FRA_IIFNAME */ + nla_total_size(IFNAMSIZ) /* FRA_OIFNAME */ + nla_total_size(4) /* FRA_PRIORITY */ + nla_total_size(4) /* FRA_TABLE */ + nla_total_size(4) /* FRA_SUPPRESS_PREFIXLEN */ + nla_total_size(4) /* FRA_SUPPRESS_IFGROUP */ + nla_total_size(4) /* FRA_FWMARK */ + nla_total_size(4) /* FRA_FWMASK */ + nla_total_size_64bit(8) /* FRA_TUN_ID */ + nla_total_size(sizeof(struct fib_kuid_range)) + nla_total_size(1) /* FRA_PROTOCOL */ + nla_total_size(1) /* FRA_IP_PROTO */ + nla_total_size(sizeof(struct fib_rule_port_range)) /* FRA_SPORT_RANGE */ + nla_total_size(sizeof(struct fib_rule_port_range)); /* FRA_DPORT_RANGE */ if (ops->nlmsg_payload) payload += ops->nlmsg_payload(rule); return payload; } static int fib_nl_fill_rule(struct sk_buff *skb, struct fib_rule *rule, u32 pid, u32 seq, int type, int flags, struct fib_rules_ops *ops) { struct nlmsghdr *nlh; struct fib_rule_hdr *frh; nlh = nlmsg_put(skb, pid, seq, type, sizeof(*frh), flags); if (nlh == NULL) return -EMSGSIZE; frh = nlmsg_data(nlh); frh->family = ops->family; frh->table = rule->table < 256 ? rule->table : RT_TABLE_COMPAT; if (nla_put_u32(skb, FRA_TABLE, rule->table)) goto nla_put_failure; if (nla_put_u32(skb, FRA_SUPPRESS_PREFIXLEN, rule->suppress_prefixlen)) goto nla_put_failure; frh->res1 = 0; frh->res2 = 0; frh->action = rule->action; frh->flags = rule->flags; if (nla_put_u8(skb, FRA_PROTOCOL, rule->proto)) goto nla_put_failure; if (rule->action == FR_ACT_GOTO && rcu_access_pointer(rule->ctarget) == NULL) frh->flags |= FIB_RULE_UNRESOLVED; if (rule->iifname[0]) { if (nla_put_string(skb, FRA_IIFNAME, rule->iifname)) goto nla_put_failure; if (rule->iifindex == -1) frh->flags |= FIB_RULE_IIF_DETACHED; } if (rule->oifname[0]) { if (nla_put_string(skb, FRA_OIFNAME, rule->oifname)) goto nla_put_failure; if (rule->oifindex == -1) frh->flags |= FIB_RULE_OIF_DETACHED; } if ((rule->pref && nla_put_u32(skb, FRA_PRIORITY, rule->pref)) || (rule->mark && nla_put_u32(skb, FRA_FWMARK, rule->mark)) || ((rule->mark_mask || rule->mark) && nla_put_u32(skb, FRA_FWMASK, rule->mark_mask)) || (rule->target && nla_put_u32(skb, FRA_GOTO, rule->target)) || (rule->tun_id && nla_put_be64(skb, FRA_TUN_ID, rule->tun_id, FRA_PAD)) || (rule->l3mdev && nla_put_u8(skb, FRA_L3MDEV, rule->l3mdev)) || (uid_range_set(&rule->uid_range) && nla_put_uid_range(skb, &rule->uid_range)) || (fib_rule_port_range_set(&rule->sport_range) && nla_put_port_range(skb, FRA_SPORT_RANGE, &rule->sport_range)) || (fib_rule_port_range_set(&rule->dport_range) && nla_put_port_range(skb, FRA_DPORT_RANGE, &rule->dport_range)) || (rule->ip_proto && nla_put_u8(skb, FRA_IP_PROTO, rule->ip_proto))) goto nla_put_failure; if (rule->suppress_ifgroup != -1) { if (nla_put_u32(skb, FRA_SUPPRESS_IFGROUP, rule->suppress_ifgroup)) goto nla_put_failure; } if (ops->fill(rule, skb, frh) < 0) goto nla_put_failure; nlmsg_end(skb, nlh); return 0; nla_put_failure: nlmsg_cancel(skb, nlh); return -EMSGSIZE; } static int dump_rules(struct sk_buff *skb, struct netlink_callback *cb, struct fib_rules_ops *ops) { int idx = 0; struct fib_rule *rule; int err = 0; rcu_read_lock(); list_for_each_entry_rcu(rule, &ops->rules_list, list) { if (idx < cb->args[1]) goto skip; err = fib_nl_fill_rule(skb, rule, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, RTM_NEWRULE, NLM_F_MULTI, ops); if (err) break; skip: idx++; } rcu_read_unlock(); cb->args[1] = idx; rules_ops_put(ops); return err; } static int fib_valid_dumprule_req(const struct nlmsghdr *nlh, struct netlink_ext_ack *extack) { struct fib_rule_hdr *frh; if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*frh))) { NL_SET_ERR_MSG(extack, "Invalid header for fib rule dump request"); return -EINVAL; } frh = nlmsg_data(nlh); if (frh->dst_len || frh->src_len || frh->tos || frh->table || frh->res1 || frh->res2 || frh->action || frh->flags) { NL_SET_ERR_MSG(extack, "Invalid values in header for fib rule dump request"); return -EINVAL; } if (nlmsg_attrlen(nlh, sizeof(*frh))) { NL_SET_ERR_MSG(extack, "Invalid data after header in fib rule dump request"); return -EINVAL; } return 0; } static int fib_nl_dumprule(struct sk_buff *skb, struct netlink_callback *cb) { const struct nlmsghdr *nlh = cb->nlh; struct net *net = sock_net(skb->sk); struct fib_rules_ops *ops; int idx = 0, family; if (cb->strict_check) { int err = fib_valid_dumprule_req(nlh, cb->extack); if (err < 0) return err; } family = rtnl_msg_family(nlh); if (family != AF_UNSPEC) { /* Protocol specific dump request */ ops = lookup_rules_ops(net, family); if (ops == NULL) return -EAFNOSUPPORT; dump_rules(skb, cb, ops); return skb->len; } rcu_read_lock(); list_for_each_entry_rcu(ops, &net->rules_ops, list) { if (idx < cb->args[0] || !try_module_get(ops->owner)) goto skip; if (dump_rules(skb, cb, ops) < 0) break; cb->args[1] = 0; skip: idx++; } rcu_read_unlock(); cb->args[0] = idx; return skb->len; } static void notify_rule_change(int event, struct fib_rule *rule, struct fib_rules_ops *ops, struct nlmsghdr *nlh, u32 pid) { struct net *net; struct sk_buff *skb; int err = -ENOMEM; net = ops->fro_net; skb = nlmsg_new(fib_rule_nlmsg_size(ops, rule), GFP_KERNEL); if (skb == NULL) goto errout; err = fib_nl_fill_rule(skb, rule, pid, nlh->nlmsg_seq, event, 0, ops); if (err < 0) { /* -EMSGSIZE implies BUG in fib_rule_nlmsg_size() */ WARN_ON(err == -EMSGSIZE); kfree_skb(skb); goto errout; } rtnl_notify(skb, net, pid, ops->nlgroup, nlh, GFP_KERNEL); return; errout: if (err < 0) rtnl_set_sk_err(net, ops->nlgroup, err); } static void attach_rules(struct list_head *rules, struct net_device *dev) { struct fib_rule *rule; list_for_each_entry(rule, rules, list) { if (rule->iifindex == -1 && strcmp(dev->name, rule->iifname) == 0) rule->iifindex = dev->ifindex; if (rule->oifindex == -1 && strcmp(dev->name, rule->oifname) == 0) rule->oifindex = dev->ifindex; } } static void detach_rules(struct list_head *rules, struct net_device *dev) { struct fib_rule *rule; list_for_each_entry(rule, rules, list) { if (rule->iifindex == dev->ifindex) rule->iifindex = -1; if (rule->oifindex == dev->ifindex) rule->oifindex = -1; } } static int fib_rules_event(struct notifier_block *this, unsigned long event, void *ptr) { struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct net *net = dev_net(dev); struct fib_rules_ops *ops; ASSERT_RTNL(); switch (event) { case NETDEV_REGISTER: list_for_each_entry(ops, &net->rules_ops, list) attach_rules(&ops->rules_list, dev); break; case NETDEV_CHANGENAME: list_for_each_entry(ops, &net->rules_ops, list) { detach_rules(&ops->rules_list, dev); attach_rules(&ops->rules_list, dev); } break; case NETDEV_UNREGISTER: list_for_each_entry(ops, &net->rules_ops, list) detach_rules(&ops->rules_list, dev); break; } return NOTIFY_DONE; } static struct notifier_block fib_rules_notifier = { .notifier_call = fib_rules_event, }; static int __net_init fib_rules_net_init(struct net *net) { INIT_LIST_HEAD(&net->rules_ops); spin_lock_init(&net->rules_mod_lock); return 0; } static void __net_exit fib_rules_net_exit(struct net *net) { WARN_ON_ONCE(!list_empty(&net->rules_ops)); } static struct pernet_operations fib_rules_net_ops = { .init = fib_rules_net_init, .exit = fib_rules_net_exit, }; static int __init fib_rules_init(void) { int err; rtnl_register(PF_UNSPEC, RTM_NEWRULE, fib_nl_newrule, NULL, 0); rtnl_register(PF_UNSPEC, RTM_DELRULE, fib_nl_delrule, NULL, 0); rtnl_register(PF_UNSPEC, RTM_GETRULE, NULL, fib_nl_dumprule, 0); err = register_pernet_subsys(&fib_rules_net_ops); if (err < 0) goto fail; err = register_netdevice_notifier(&fib_rules_notifier); if (err < 0) goto fail_unregister; return 0; fail_unregister: unregister_pernet_subsys(&fib_rules_net_ops); fail: rtnl_unregister(PF_UNSPEC, RTM_NEWRULE); rtnl_unregister(PF_UNSPEC, RTM_DELRULE); rtnl_unregister(PF_UNSPEC, RTM_GETRULE); return err; } subsys_initcall(fib_rules_init); |
7 1 6 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 | // SPDX-License-Identifier: GPL-2.0-only /* module that allows mangling of the arp payload */ #include <linux/module.h> #include <linux/netfilter.h> #include <linux/netfilter_arp/arpt_mangle.h> #include <net/sock.h> MODULE_LICENSE("GPL"); MODULE_AUTHOR("Bart De Schuymer <bdschuym@pandora.be>"); MODULE_DESCRIPTION("arptables arp payload mangle target"); static unsigned int target(struct sk_buff *skb, const struct xt_action_param *par) { const struct arpt_mangle *mangle = par->targinfo; const struct arphdr *arp; unsigned char *arpptr; int pln, hln; if (skb_ensure_writable(skb, skb->len)) return NF_DROP; arp = arp_hdr(skb); arpptr = skb_network_header(skb) + sizeof(*arp); pln = arp->ar_pln; hln = arp->ar_hln; /* We assume that pln and hln were checked in the match */ if (mangle->flags & ARPT_MANGLE_SDEV) { if (ARPT_DEV_ADDR_LEN_MAX < hln || (arpptr + hln > skb_tail_pointer(skb))) return NF_DROP; memcpy(arpptr, mangle->src_devaddr, hln); } arpptr += hln; if (mangle->flags & ARPT_MANGLE_SIP) { if (ARPT_MANGLE_ADDR_LEN_MAX < pln || (arpptr + pln > skb_tail_pointer(skb))) return NF_DROP; memcpy(arpptr, &mangle->u_s.src_ip, pln); } arpptr += pln; if (mangle->flags & ARPT_MANGLE_TDEV) { if (ARPT_DEV_ADDR_LEN_MAX < hln || (arpptr + hln > skb_tail_pointer(skb))) return NF_DROP; memcpy(arpptr, mangle->tgt_devaddr, hln); } arpptr += hln; if (mangle->flags & ARPT_MANGLE_TIP) { if (ARPT_MANGLE_ADDR_LEN_MAX < pln || (arpptr + pln > skb_tail_pointer(skb))) return NF_DROP; memcpy(arpptr, &mangle->u_t.tgt_ip, pln); } return mangle->target; } static int checkentry(const struct xt_tgchk_param *par) { const struct arpt_mangle *mangle = par->targinfo; if (mangle->flags & ~ARPT_MANGLE_MASK || !(mangle->flags & ARPT_MANGLE_MASK)) return -EINVAL; if (mangle->target != NF_DROP && mangle->target != NF_ACCEPT && mangle->target != XT_CONTINUE) return -EINVAL; return 0; } static struct xt_target arpt_mangle_reg __read_mostly = { .name = "mangle", .family = NFPROTO_ARP, .target = target, .targetsize = sizeof(struct arpt_mangle), .checkentry = checkentry, .me = THIS_MODULE, }; static int __init arpt_mangle_init(void) { return xt_register_target(&arpt_mangle_reg); } static void __exit arpt_mangle_fini(void) { xt_unregister_target(&arpt_mangle_reg); } module_init(arpt_mangle_init); module_exit(arpt_mangle_fini); |
55488 31267 16567 1450 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 | /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _ASM_X86_JUMP_LABEL_H #define _ASM_X86_JUMP_LABEL_H #define HAVE_JUMP_LABEL_BATCH #include <asm/asm.h> #include <asm/nops.h> #ifndef __ASSEMBLY__ #include <linux/stringify.h> #include <linux/types.h> #define JUMP_TABLE_ENTRY \ ".pushsection __jump_table, \"aw\" \n\t" \ _ASM_ALIGN "\n\t" \ ".long 1b - . \n\t" \ ".long %l[l_yes] - . \n\t" \ _ASM_PTR "%c0 + %c1 - .\n\t" \ ".popsection \n\t" #ifdef CONFIG_HAVE_JUMP_LABEL_HACK static __always_inline bool arch_static_branch(struct static_key *key, bool branch) { asm goto("1:" "jmp %l[l_yes] # objtool NOPs this \n\t" JUMP_TABLE_ENTRY : : "i" (key), "i" (2 | branch) : : l_yes); return false; l_yes: return true; } #else /* !CONFIG_HAVE_JUMP_LABEL_HACK */ static __always_inline bool arch_static_branch(struct static_key * const key, const bool branch) { asm goto("1:" ".byte " __stringify(BYTES_NOP5) "\n\t" JUMP_TABLE_ENTRY : : "i" (key), "i" (branch) : : l_yes); return false; l_yes: return true; } #endif /* CONFIG_HAVE_JUMP_LABEL_HACK */ static __always_inline bool arch_static_branch_jump(struct static_key * const key, const bool branch) { asm goto("1:" "jmp %l[l_yes]\n\t" JUMP_TABLE_ENTRY : : "i" (key), "i" (branch) : : l_yes); return false; l_yes: return true; } extern int arch_jump_entry_size(struct jump_entry *entry); #endif /* __ASSEMBLY__ */ #endif |
1 1 1 1 2 2 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 | // SPDX-License-Identifier: GPL-2.0-only /****************************************************************************** ******************************************************************************* ** ** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. ** Copyright (C) 2004-2011 Red Hat, Inc. All rights reserved. ** ** ******************************************************************************* ******************************************************************************/ #include <linux/module.h> #include "dlm_internal.h" #include "lockspace.h" #include "member.h" #include "recoverd.h" #include "dir.h" #include "midcomms.h" #include "config.h" #include "memory.h" #include "lock.h" #include "recover.h" #include "requestqueue.h" #include "user.h" #include "ast.h" static int ls_count; static struct mutex ls_lock; static struct list_head lslist; static spinlock_t lslist_lock; static struct task_struct * scand_task; static ssize_t dlm_control_store(struct dlm_ls *ls, const char *buf, size_t len) { ssize_t ret = len; int n; int rc = kstrtoint(buf, 0, &n); if (rc) return rc; ls = dlm_find_lockspace_local(ls->ls_local_handle); if (!ls) return -EINVAL; switch (n) { case 0: dlm_ls_stop(ls); break; case 1: dlm_ls_start(ls); break; default: ret = -EINVAL; } dlm_put_lockspace(ls); return ret; } static ssize_t dlm_event_store(struct dlm_ls *ls, const char *buf, size_t len) { int rc = kstrtoint(buf, 0, &ls->ls_uevent_result); if (rc) return rc; set_bit(LSFL_UEVENT_WAIT, &ls->ls_flags); wake_up(&ls->ls_uevent_wait); return len; } static ssize_t dlm_id_show(struct dlm_ls *ls, char *buf) { return snprintf(buf, PAGE_SIZE, "%u\n", ls->ls_global_id); } static ssize_t dlm_id_store(struct dlm_ls *ls, const char *buf, size_t len) { int rc = kstrtouint(buf, 0, &ls->ls_global_id); if (rc) return rc; return len; } static ssize_t dlm_nodir_show(struct dlm_ls *ls, char *buf) { return snprintf(buf, PAGE_SIZE, "%u\n", dlm_no_directory(ls)); } static ssize_t dlm_nodir_store(struct dlm_ls *ls, const char *buf, size_t len) { int val; int rc = kstrtoint(buf, 0, &val); if (rc) return rc; if (val == 1) set_bit(LSFL_NODIR, &ls->ls_flags); return len; } static ssize_t dlm_recover_status_show(struct dlm_ls *ls, char *buf) { uint32_t status = dlm_recover_status(ls); return snprintf(buf, PAGE_SIZE, "%x\n", status); } static ssize_t dlm_recover_nodeid_show(struct dlm_ls *ls, char *buf) { return snprintf(buf, PAGE_SIZE, "%d\n", ls->ls_recover_nodeid); } struct dlm_attr { struct attribute attr; ssize_t (*show)(struct dlm_ls *, char *); ssize_t (*store)(struct dlm_ls *, const char *, size_t); }; static struct dlm_attr dlm_attr_control = { .attr = {.name = "control", .mode = S_IWUSR}, .store = dlm_control_store }; static struct dlm_attr dlm_attr_event = { .attr = {.name = "event_done", .mode = S_IWUSR}, .store = dlm_event_store }; static struct dlm_attr dlm_attr_id = { .attr = {.name = "id", .mode = S_IRUGO | S_IWUSR}, .show = dlm_id_show, .store = dlm_id_store }; static struct dlm_attr dlm_attr_nodir = { .attr = {.name = "nodir", .mode = S_IRUGO | S_IWUSR}, .show = dlm_nodir_show, .store = dlm_nodir_store }; static struct dlm_attr dlm_attr_recover_status = { .attr = {.name = "recover_status", .mode = S_IRUGO}, .show = dlm_recover_status_show }; static struct dlm_attr dlm_attr_recover_nodeid = { .attr = {.name = "recover_nodeid", .mode = S_IRUGO}, .show = dlm_recover_nodeid_show }; static struct attribute *dlm_attrs[] = { &dlm_attr_control.attr, &dlm_attr_event.attr, &dlm_attr_id.attr, &dlm_attr_nodir.attr, &dlm_attr_recover_status.attr, &dlm_attr_recover_nodeid.attr, NULL, }; ATTRIBUTE_GROUPS(dlm); static ssize_t dlm_attr_show(struct kobject *kobj, struct attribute *attr, char *buf) { struct dlm_ls *ls = container_of(kobj, struct dlm_ls, ls_kobj); struct dlm_attr *a = container_of(attr, struct dlm_attr, attr); return a->show ? a->show(ls, buf) : 0; } static ssize_t dlm_attr_store(struct kobject *kobj, struct attribute *attr, const char *buf, size_t len) { struct dlm_ls *ls = container_of(kobj, struct dlm_ls, ls_kobj); struct dlm_attr *a = container_of(attr, struct dlm_attr, attr); return a->store ? a->store(ls, buf, len) : len; } static void lockspace_kobj_release(struct kobject *k) { struct dlm_ls *ls = container_of(k, struct dlm_ls, ls_kobj); kfree(ls); } static const struct sysfs_ops dlm_attr_ops = { .show = dlm_attr_show, .store = dlm_attr_store, }; static struct kobj_type dlm_ktype = { .default_groups = dlm_groups, .sysfs_ops = &dlm_attr_ops, .release = lockspace_kobj_release, }; static struct kset *dlm_kset; static int do_uevent(struct dlm_ls *ls, int in) { if (in) kobject_uevent(&ls->ls_kobj, KOBJ_ONLINE); else kobject_uevent(&ls->ls_kobj, KOBJ_OFFLINE); log_rinfo(ls, "%s the lockspace group...", in ? "joining" : "leaving"); /* dlm_controld will see the uevent, do the necessary group management and then write to sysfs to wake us */ wait_event(ls->ls_uevent_wait, test_and_clear_bit(LSFL_UEVENT_WAIT, &ls->ls_flags)); log_rinfo(ls, "group event done %d", ls->ls_uevent_result); return ls->ls_uevent_result; } static int dlm_uevent(const struct kobject *kobj, struct kobj_uevent_env *env) { const struct dlm_ls *ls = container_of(kobj, struct dlm_ls, ls_kobj); add_uevent_var(env, "LOCKSPACE=%s", ls->ls_name); return 0; } static const struct kset_uevent_ops dlm_uevent_ops = { .uevent = dlm_uevent, }; int __init dlm_lockspace_init(void) { ls_count = 0; mutex_init(&ls_lock); INIT_LIST_HEAD(&lslist); spin_lock_init(&lslist_lock); dlm_kset = kset_create_and_add("dlm", &dlm_uevent_ops, kernel_kobj); if (!dlm_kset) { printk(KERN_WARNING "%s: can not create kset\n", __func__); return -ENOMEM; } return 0; } void dlm_lockspace_exit(void) { kset_unregister(dlm_kset); } static struct dlm_ls *find_ls_to_scan(void) { struct dlm_ls *ls; spin_lock(&lslist_lock); list_for_each_entry(ls, &lslist, ls_list) { if (time_after_eq(jiffies, ls->ls_scan_time + dlm_config.ci_scan_secs * HZ)) { spin_unlock(&lslist_lock); return ls; } } spin_unlock(&lslist_lock); return NULL; } static int dlm_scand(void *data) { struct dlm_ls *ls; while (!kthread_should_stop()) { ls = find_ls_to_scan(); if (ls) { if (dlm_lock_recovery_try(ls)) { ls->ls_scan_time = jiffies; dlm_scan_rsbs(ls); dlm_unlock_recovery(ls); } else { ls->ls_scan_time += HZ; } continue; } schedule_timeout_interruptible(dlm_config.ci_scan_secs * HZ); } return 0; } static int dlm_scand_start(void) { struct task_struct *p; int error = 0; p = kthread_run(dlm_scand, NULL, "dlm_scand"); if (IS_ERR(p)) error = PTR_ERR(p); else scand_task = p; return error; } static void dlm_scand_stop(void) { kthread_stop(scand_task); } struct dlm_ls *dlm_find_lockspace_global(uint32_t id) { struct dlm_ls *ls; spin_lock(&lslist_lock); list_for_each_entry(ls, &lslist, ls_list) { if (ls->ls_global_id == id) { atomic_inc(&ls->ls_count); goto out; } } ls = NULL; out: spin_unlock(&lslist_lock); return ls; } struct dlm_ls *dlm_find_lockspace_local(dlm_lockspace_t *lockspace) { struct dlm_ls *ls; spin_lock(&lslist_lock); list_for_each_entry(ls, &lslist, ls_list) { if (ls->ls_local_handle == lockspace) { atomic_inc(&ls->ls_count); goto out; } } ls = NULL; out: spin_unlock(&lslist_lock); return ls; } struct dlm_ls *dlm_find_lockspace_device(int minor) { struct dlm_ls *ls; spin_lock(&lslist_lock); list_for_each_entry(ls, &lslist, ls_list) { if (ls->ls_device.minor == minor) { atomic_inc(&ls->ls_count); goto out; } } ls = NULL; out: spin_unlock(&lslist_lock); return ls; } void dlm_put_lockspace(struct dlm_ls *ls) { if (atomic_dec_and_test(&ls->ls_count)) wake_up(&ls->ls_count_wait); } static void remove_lockspace(struct dlm_ls *ls) { retry: wait_event(ls->ls_count_wait, atomic_read(&ls->ls_count) == 0); spin_lock(&lslist_lock); if (atomic_read(&ls->ls_count) != 0) { spin_unlock(&lslist_lock); goto retry; } WARN_ON(ls->ls_create_count != 0); list_del(&ls->ls_list); spin_unlock(&lslist_lock); } static int threads_start(void) { int error; /* Thread for sending/receiving messages for all lockspace's */ error = dlm_midcomms_start(); if (error) { log_print("cannot start dlm midcomms %d", error); goto fail; } error = dlm_scand_start(); if (error) { log_print("cannot start dlm_scand thread %d", error); goto midcomms_fail; } return 0; midcomms_fail: dlm_midcomms_stop(); fail: return error; } static int new_lockspace(const char *name, const char *cluster, uint32_t flags, int lvblen, const struct dlm_lockspace_ops *ops, void *ops_arg, int *ops_result, dlm_lockspace_t **lockspace) { struct dlm_ls *ls; int i, size, error; int do_unreg = 0; int namelen = strlen(name); if (namelen > DLM_LOCKSPACE_LEN || namelen == 0) return -EINVAL; if (lvblen % 8) return -EINVAL; if (!try_module_get(THIS_MODULE)) return -EINVAL; if (!dlm_user_daemon_available()) { log_print("dlm user daemon not available"); error = -EUNATCH; goto out; } if (ops && ops_result) { if (!dlm_config.ci_recover_callbacks) *ops_result = -EOPNOTSUPP; else *ops_result = 0; } if (!cluster) log_print("dlm cluster name '%s' is being used without an application provided cluster name", dlm_config.ci_cluster_name); if (dlm_config.ci_recover_callbacks && cluster && strncmp(cluster, dlm_config.ci_cluster_name, DLM_LOCKSPACE_LEN)) { log_print("dlm cluster name '%s' does not match " "the application cluster name '%s'", dlm_config.ci_cluster_name, cluster); error = -EBADR; goto out; } error = 0; spin_lock(&lslist_lock); list_for_each_entry(ls, &lslist, ls_list) { WARN_ON(ls->ls_create_count <= 0); if (ls->ls_namelen != namelen) continue; if (memcmp(ls->ls_name, name, namelen)) continue; if (flags & DLM_LSFL_NEWEXCL) { error = -EEXIST; break; } ls->ls_create_count++; *lockspace = ls; error = 1; break; } spin_unlock(&lslist_lock); if (error) goto out; error = -ENOMEM; ls = kzalloc(sizeof(*ls), GFP_NOFS); if (!ls) goto out; memcpy(ls->ls_name, name, namelen); ls->ls_namelen = namelen; ls->ls_lvblen = lvblen; atomic_set(&ls->ls_count, 0); init_waitqueue_head(&ls->ls_count_wait); ls->ls_flags = 0; ls->ls_scan_time = jiffies; if (ops && dlm_config.ci_recover_callbacks) { ls->ls_ops = ops; ls->ls_ops_arg = ops_arg; } /* ls_exflags are forced to match among nodes, and we don't * need to require all nodes to have some flags set */ ls->ls_exflags = (flags & ~(DLM_LSFL_FS | DLM_LSFL_NEWEXCL)); size = READ_ONCE(dlm_config.ci_rsbtbl_size); ls->ls_rsbtbl_size = size; ls->ls_rsbtbl = vmalloc(array_size(size, sizeof(struct dlm_rsbtable))); if (!ls->ls_rsbtbl) goto out_lsfree; for (i = 0; i < size; i++) { ls->ls_rsbtbl[i].keep.rb_node = NULL; ls->ls_rsbtbl[i].toss.rb_node = NULL; spin_lock_init(&ls->ls_rsbtbl[i].lock); } for (i = 0; i < DLM_REMOVE_NAMES_MAX; i++) { ls->ls_remove_names[i] = kzalloc(DLM_RESNAME_MAXLEN+1, GFP_KERNEL); if (!ls->ls_remove_names[i]) goto out_rsbtbl; } idr_init(&ls->ls_lkbidr); spin_lock_init(&ls->ls_lkbidr_spin); INIT_LIST_HEAD(&ls->ls_waiters); mutex_init(&ls->ls_waiters_mutex); INIT_LIST_HEAD(&ls->ls_orphans); mutex_init(&ls->ls_orphans_mutex); INIT_LIST_HEAD(&ls->ls_new_rsb); spin_lock_init(&ls->ls_new_rsb_spin); INIT_LIST_HEAD(&ls->ls_nodes); INIT_LIST_HEAD(&ls->ls_nodes_gone); ls->ls_num_nodes = 0; ls->ls_low_nodeid = 0; ls->ls_total_weight = 0; ls->ls_node_array = NULL; memset(&ls->ls_local_rsb, 0, sizeof(struct dlm_rsb)); ls->ls_local_rsb.res_ls = ls; ls->ls_debug_rsb_dentry = NULL; ls->ls_debug_waiters_dentry = NULL; init_waitqueue_head(&ls->ls_uevent_wait); ls->ls_uevent_result = 0; init_completion(&ls->ls_recovery_done); ls->ls_recovery_result = -1; spin_lock_init(&ls->ls_cb_lock); INIT_LIST_HEAD(&ls->ls_cb_delay); ls->ls_recoverd_task = NULL; mutex_init(&ls->ls_recoverd_active); spin_lock_init(&ls->ls_recover_lock); spin_lock_init(&ls->ls_rcom_spin); get_random_bytes(&ls->ls_rcom_seq, sizeof(uint64_t)); ls->ls_recover_status = 0; ls->ls_recover_seq = get_random_u64(); ls->ls_recover_args = NULL; init_rwsem(&ls->ls_in_recovery); init_rwsem(&ls->ls_recv_active); INIT_LIST_HEAD(&ls->ls_requestqueue); atomic_set(&ls->ls_requestqueue_cnt, 0); init_waitqueue_head(&ls->ls_requestqueue_wait); mutex_init(&ls->ls_requestqueue_mutex); spin_lock_init(&ls->ls_clear_proc_locks); /* Due backwards compatibility with 3.1 we need to use maximum * possible dlm message size to be sure the message will fit and * not having out of bounds issues. However on sending side 3.2 * might send less. */ ls->ls_recover_buf = kmalloc(DLM_MAX_SOCKET_BUFSIZE, GFP_NOFS); if (!ls->ls_recover_buf) goto out_lkbidr; ls->ls_slot = 0; ls->ls_num_slots = 0; ls->ls_slots_size = 0; ls->ls_slots = NULL; INIT_LIST_HEAD(&ls->ls_recover_list); spin_lock_init(&ls->ls_recover_list_lock); idr_init(&ls->ls_recover_idr); spin_lock_init(&ls->ls_recover_idr_lock); ls->ls_recover_list_count = 0; ls->ls_local_handle = ls; init_waitqueue_head(&ls->ls_wait_general); INIT_LIST_HEAD(&ls->ls_root_list); init_rwsem(&ls->ls_root_sem); spin_lock(&lslist_lock); ls->ls_create_count = 1; list_add(&ls->ls_list, &lslist); spin_unlock(&lslist_lock); if (flags & DLM_LSFL_FS) { error = dlm_callback_start(ls); if (error) { log_error(ls, "can't start dlm_callback %d", error); goto out_delist; } } init_waitqueue_head(&ls->ls_recover_lock_wait); /* * Once started, dlm_recoverd first looks for ls in lslist, then * initializes ls_in_recovery as locked in "down" mode. We need * to wait for the wakeup from dlm_recoverd because in_recovery * has to start out in down mode. */ error = dlm_recoverd_start(ls); if (error) { log_error(ls, "can't start dlm_recoverd %d", error); goto out_callback; } wait_event(ls->ls_recover_lock_wait, test_bit(LSFL_RECOVER_LOCK, &ls->ls_flags)); /* let kobject handle freeing of ls if there's an error */ do_unreg = 1; ls->ls_kobj.kset = dlm_kset; error = kobject_init_and_add(&ls->ls_kobj, &dlm_ktype, NULL, "%s", ls->ls_name); if (error) goto out_recoverd; kobject_uevent(&ls->ls_kobj, KOBJ_ADD); /* This uevent triggers dlm_controld in userspace to add us to the group of nodes that are members of this lockspace (managed by the cluster infrastructure.) Once it's done that, it tells us who the current lockspace members are (via configfs) and then tells the lockspace to start running (via sysfs) in dlm_ls_start(). */ error = do_uevent(ls, 1); if (error) goto out_recoverd; /* wait until recovery is successful or failed */ wait_for_completion(&ls->ls_recovery_done); error = ls->ls_recovery_result; if (error) goto out_members; dlm_create_debug_file(ls); log_rinfo(ls, "join complete"); *lockspace = ls; return 0; out_members: do_uevent(ls, 0); dlm_clear_members(ls); kfree(ls->ls_node_array); out_recoverd: dlm_recoverd_stop(ls); out_callback: dlm_callback_stop(ls); out_delist: spin_lock(&lslist_lock); list_del(&ls->ls_list); spin_unlock(&lslist_lock); idr_destroy(&ls->ls_recover_idr); kfree(ls->ls_recover_buf); out_lkbidr: idr_destroy(&ls->ls_lkbidr); out_rsbtbl: for (i = 0; i < DLM_REMOVE_NAMES_MAX; i++) kfree(ls->ls_remove_names[i]); vfree(ls->ls_rsbtbl); out_lsfree: if (do_unreg) kobject_put(&ls->ls_kobj); else kfree(ls); out: module_put(THIS_MODULE); return error; } static int __dlm_new_lockspace(const char *name, const char *cluster, uint32_t flags, int lvblen, const struct dlm_lockspace_ops *ops, void *ops_arg, int *ops_result, dlm_lockspace_t **lockspace) { int error = 0; mutex_lock(&ls_lock); if (!ls_count) error = threads_start(); if (error) goto out; error = new_lockspace(name, cluster, flags, lvblen, ops, ops_arg, ops_result, lockspace); if (!error) ls_count++; if (error > 0) error = 0; if (!ls_count) { dlm_scand_stop(); dlm_midcomms_shutdown(); dlm_midcomms_stop(); } out: mutex_unlock(&ls_lock); return error; } int dlm_new_lockspace(const char *name, const char *cluster, uint32_t flags, int lvblen, const struct dlm_lockspace_ops *ops, void *ops_arg, int *ops_result, dlm_lockspace_t **lockspace) { return __dlm_new_lockspace(name, cluster, flags | DLM_LSFL_FS, lvblen, ops, ops_arg, ops_result, lockspace); } int dlm_new_user_lockspace(const char *name, const char *cluster, uint32_t flags, int lvblen, const struct dlm_lockspace_ops *ops, void *ops_arg, int *ops_result, dlm_lockspace_t **lockspace) { return __dlm_new_lockspace(name, cluster, flags, lvblen, ops, ops_arg, ops_result, lockspace); } static int lkb_idr_is_local(int id, void *p, void *data) { struct dlm_lkb *lkb = p; return lkb->lkb_nodeid == 0 && lkb->lkb_grmode != DLM_LOCK_IV; } static int lkb_idr_is_any(int id, void *p, void *data) { return 1; } static int lkb_idr_free(int id, void *p, void *data) { struct dlm_lkb *lkb = p; if (lkb->lkb_lvbptr && test_bit(DLM_IFL_MSTCPY_BIT, &lkb->lkb_iflags)) dlm_free_lvb(lkb->lkb_lvbptr); dlm_free_lkb(lkb); return 0; } /* NOTE: We check the lkbidr here rather than the resource table. This is because there may be LKBs queued as ASTs that have been unlinked from their RSBs and are pending deletion once the AST has been delivered */ static int lockspace_busy(struct dlm_ls *ls, int force) { int rv; spin_lock(&ls->ls_lkbidr_spin); if (force == 0) { rv = idr_for_each(&ls->ls_lkbidr, lkb_idr_is_any, ls); } else if (force == 1) { rv = idr_for_each(&ls->ls_lkbidr, lkb_idr_is_local, ls); } else { rv = 0; } spin_unlock(&ls->ls_lkbidr_spin); return rv; } static int release_lockspace(struct dlm_ls *ls, int force) { struct dlm_rsb *rsb; struct rb_node *n; int i, busy, rv; busy = lockspace_busy(ls, force); spin_lock(&lslist_lock); if (ls->ls_create_count == 1) { if (busy) { rv = -EBUSY; } else { /* remove_lockspace takes ls off lslist */ ls->ls_create_count = 0; rv = 0; } } else if (ls->ls_create_count > 1) { rv = --ls->ls_create_count; } else { rv = -EINVAL; } spin_unlock(&lslist_lock); if (rv) { log_debug(ls, "release_lockspace no remove %d", rv); return rv; } if (ls_count == 1) dlm_midcomms_version_wait(); dlm_device_deregister(ls); if (force < 3 && dlm_user_daemon_available()) do_uevent(ls, 0); dlm_recoverd_stop(ls); if (ls_count == 1) { dlm_scand_stop(); dlm_clear_members(ls); dlm_midcomms_shutdown(); } dlm_callback_stop(ls); remove_lockspace(ls); dlm_delete_debug_file(ls); idr_destroy(&ls->ls_recover_idr); kfree(ls->ls_recover_buf); /* * Free all lkb's in idr */ idr_for_each(&ls->ls_lkbidr, lkb_idr_free, ls); idr_destroy(&ls->ls_lkbidr); /* * Free all rsb's on rsbtbl[] lists */ for (i = 0; i < ls->ls_rsbtbl_size; i++) { while ((n = rb_first(&ls->ls_rsbtbl[i].keep))) { rsb = rb_entry(n, struct dlm_rsb, res_hashnode); rb_erase(n, &ls->ls_rsbtbl[i].keep); dlm_free_rsb(rsb); } while ((n = rb_first(&ls->ls_rsbtbl[i].toss))) { rsb = rb_entry(n, struct dlm_rsb, res_hashnode); rb_erase(n, &ls->ls_rsbtbl[i].toss); dlm_free_rsb(rsb); } } vfree(ls->ls_rsbtbl); for (i = 0; i < DLM_REMOVE_NAMES_MAX; i++) kfree(ls->ls_remove_names[i]); while (!list_empty(&ls->ls_new_rsb)) { rsb = list_first_entry(&ls->ls_new_rsb, struct dlm_rsb, res_hashchain); list_del(&rsb->res_hashchain); dlm_free_rsb(rsb); } /* * Free structures on any other lists */ dlm_purge_requestqueue(ls); kfree(ls->ls_recover_args); dlm_clear_members(ls); dlm_clear_members_gone(ls); kfree(ls->ls_node_array); log_rinfo(ls, "release_lockspace final free"); kobject_put(&ls->ls_kobj); /* The ls structure will be freed when the kobject is done with */ module_put(THIS_MODULE); return 0; } /* * Called when a system has released all its locks and is not going to use the * lockspace any longer. We free everything we're managing for this lockspace. * Remaining nodes will go through the recovery process as if we'd died. The * lockspace must continue to function as usual, participating in recoveries, * until this returns. * * Force has 4 possible values: * 0 - don't destroy lockspace if it has any LKBs * 1 - destroy lockspace if it has remote LKBs but not if it has local LKBs * 2 - destroy lockspace regardless of LKBs * 3 - destroy lockspace as part of a forced shutdown */ int dlm_release_lockspace(void *lockspace, int force) { struct dlm_ls *ls; int error; ls = dlm_find_lockspace_local(lockspace); if (!ls) return -EINVAL; dlm_put_lockspace(ls); mutex_lock(&ls_lock); error = release_lockspace(ls, force); if (!error) ls_count--; if (!ls_count) dlm_midcomms_stop(); mutex_unlock(&ls_lock); return error; } void dlm_stop_lockspaces(void) { struct dlm_ls *ls; int count; restart: count = 0; spin_lock(&lslist_lock); list_for_each_entry(ls, &lslist, ls_list) { if (!test_bit(LSFL_RUNNING, &ls->ls_flags)) { count++; continue; } spin_unlock(&lslist_lock); log_error(ls, "no userland control daemon, stopping lockspace"); dlm_ls_stop(ls); goto restart; } spin_unlock(&lslist_lock); if (count) log_print("dlm user daemon left %d lockspaces", count); } |
2 42 5 39 39 39 43 4 1 25 13 31 7 11 27 66 32 18 42 66 67 47 53 66 41 67 67 67 45 21 25 25 2 21 67 66 62 2 66 16 111 84 66 62 65 64 66 54 54 66 38 64 21 66 54 64 2 45 40 65 66 20 46 41 11 11 10 1 11 71 71 70 27 48 71 71 37 33 54 17 25 45 73 5 68 21 2 19 67 123 97 67 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 | // SPDX-License-Identifier: GPL-2.0 /* * Copyright (c) 2000-2002 Silicon Graphics, Inc. * All Rights Reserved. */ #include "xfs.h" #include "xfs_fs.h" #include "xfs_shared.h" #include "xfs_format.h" #include "xfs_log_format.h" #include "xfs_trans_resv.h" #include "xfs_mount.h" #include "xfs_inode.h" #include "xfs_trans.h" #include "xfs_trans_priv.h" #include "xfs_quota.h" #include "xfs_qm.h" #include "xfs_trace.h" #include "xfs_error.h" STATIC void xfs_trans_alloc_dqinfo(xfs_trans_t *); /* * Add the locked dquot to the transaction. * The dquot must be locked, and it cannot be associated with any * transaction. */ void xfs_trans_dqjoin( struct xfs_trans *tp, struct xfs_dquot *dqp) { ASSERT(XFS_DQ_IS_LOCKED(dqp)); ASSERT(dqp->q_logitem.qli_dquot == dqp); /* * Get a log_item_desc to point at the new item. */ xfs_trans_add_item(tp, &dqp->q_logitem.qli_item); } /* * This is called to mark the dquot as needing * to be logged when the transaction is committed. The dquot must * already be associated with the given transaction. * Note that it marks the entire transaction as dirty. In the ordinary * case, this gets called via xfs_trans_commit, after the transaction * is already dirty. However, there's nothing stop this from getting * called directly, as done by xfs_qm_scall_setqlim. Hence, the TRANS_DIRTY * flag. */ void xfs_trans_log_dquot( struct xfs_trans *tp, struct xfs_dquot *dqp) { ASSERT(XFS_DQ_IS_LOCKED(dqp)); /* Upgrade the dquot to bigtime format if possible. */ if (dqp->q_id != 0 && xfs_has_bigtime(tp->t_mountp) && !(dqp->q_type & XFS_DQTYPE_BIGTIME)) dqp->q_type |= XFS_DQTYPE_BIGTIME; tp->t_flags |= XFS_TRANS_DIRTY; set_bit(XFS_LI_DIRTY, &dqp->q_logitem.qli_item.li_flags); } /* * Carry forward whatever is left of the quota blk reservation to * the spanky new transaction */ void xfs_trans_dup_dqinfo( struct xfs_trans *otp, struct xfs_trans *ntp) { struct xfs_dqtrx *oq, *nq; int i, j; struct xfs_dqtrx *oqa, *nqa; uint64_t blk_res_used; if (!otp->t_dqinfo) return; xfs_trans_alloc_dqinfo(ntp); for (j = 0; j < XFS_QM_TRANS_DQTYPES; j++) { oqa = otp->t_dqinfo->dqs[j]; nqa = ntp->t_dqinfo->dqs[j]; for (i = 0; i < XFS_QM_TRANS_MAXDQS; i++) { blk_res_used = 0; if (oqa[i].qt_dquot == NULL) break; oq = &oqa[i]; nq = &nqa[i]; if (oq->qt_blk_res && oq->qt_bcount_delta > 0) blk_res_used = oq->qt_bcount_delta; nq->qt_dquot = oq->qt_dquot; nq->qt_bcount_delta = nq->qt_icount_delta = 0; nq->qt_rtbcount_delta = 0; /* * Transfer whatever is left of the reservations. */ nq->qt_blk_res = oq->qt_blk_res - blk_res_used; oq->qt_blk_res = blk_res_used; nq->qt_rtblk_res = oq->qt_rtblk_res - oq->qt_rtblk_res_used; oq->qt_rtblk_res = oq->qt_rtblk_res_used; nq->qt_ino_res = oq->qt_ino_res - oq->qt_ino_res_used; oq->qt_ino_res = oq->qt_ino_res_used; } } } /* * Wrap around mod_dquot to account for both user and group quotas. */ void xfs_trans_mod_dquot_byino( xfs_trans_t *tp, xfs_inode_t *ip, uint field, int64_t delta) { xfs_mount_t *mp = tp->t_mountp; if (!XFS_IS_QUOTA_ON(mp) || xfs_is_quota_inode(&mp->m_sb, ip->i_ino)) return; if (XFS_IS_UQUOTA_ON(mp) && ip->i_udquot) (void) xfs_trans_mod_dquot(tp, ip->i_udquot, field, delta); if (XFS_IS_GQUOTA_ON(mp) && ip->i_gdquot) (void) xfs_trans_mod_dquot(tp, ip->i_gdquot, field, delta); if (XFS_IS_PQUOTA_ON(mp) && ip->i_pdquot) (void) xfs_trans_mod_dquot(tp, ip->i_pdquot, field, delta); } STATIC struct xfs_dqtrx * xfs_trans_get_dqtrx( struct xfs_trans *tp, struct xfs_dquot *dqp) { int i; struct xfs_dqtrx *qa; switch (xfs_dquot_type(dqp)) { case XFS_DQTYPE_USER: qa = tp->t_dqinfo->dqs[XFS_QM_TRANS_USR]; break; case XFS_DQTYPE_GROUP: qa = tp->t_dqinfo->dqs[XFS_QM_TRANS_GRP]; break; case XFS_DQTYPE_PROJ: qa = tp->t_dqinfo->dqs[XFS_QM_TRANS_PRJ]; break; default: return NULL; } for (i = 0; i < XFS_QM_TRANS_MAXDQS; i++) { if (qa[i].qt_dquot == NULL || qa[i].qt_dquot == dqp) return &qa[i]; } return NULL; } /* * Make the changes in the transaction structure. * The moral equivalent to xfs_trans_mod_sb(). * We don't touch any fields in the dquot, so we don't care * if it's locked or not (most of the time it won't be). */ void xfs_trans_mod_dquot( struct xfs_trans *tp, struct xfs_dquot *dqp, uint field, int64_t delta) { struct xfs_dqtrx *qtrx; ASSERT(tp); ASSERT(XFS_IS_QUOTA_ON(tp->t_mountp)); qtrx = NULL; if (!delta) return; if (tp->t_dqinfo == NULL) xfs_trans_alloc_dqinfo(tp); /* * Find either the first free slot or the slot that belongs * to this dquot. */ qtrx = xfs_trans_get_dqtrx(tp, dqp); ASSERT(qtrx); if (qtrx->qt_dquot == NULL) qtrx->qt_dquot = dqp; trace_xfs_trans_mod_dquot_before(qtrx); trace_xfs_trans_mod_dquot(tp, dqp, field, delta); switch (field) { /* regular disk blk reservation */ case XFS_TRANS_DQ_RES_BLKS: qtrx->qt_blk_res += delta; break; /* inode reservation */ case XFS_TRANS_DQ_RES_INOS: qtrx->qt_ino_res += delta; break; /* disk blocks used. */ case XFS_TRANS_DQ_BCOUNT: qtrx->qt_bcount_delta += delta; break; case XFS_TRANS_DQ_DELBCOUNT: qtrx->qt_delbcnt_delta += delta; break; /* Inode Count */ case XFS_TRANS_DQ_ICOUNT: if (qtrx->qt_ino_res && delta > 0) { qtrx->qt_ino_res_used += delta; ASSERT(qtrx->qt_ino_res >= qtrx->qt_ino_res_used); } qtrx->qt_icount_delta += delta; break; /* rtblk reservation */ case XFS_TRANS_DQ_RES_RTBLKS: qtrx->qt_rtblk_res += delta; break; /* rtblk count */ case XFS_TRANS_DQ_RTBCOUNT: if (qtrx->qt_rtblk_res && delta > 0) { qtrx->qt_rtblk_res_used += delta; ASSERT(qtrx->qt_rtblk_res >= qtrx->qt_rtblk_res_used); } qtrx->qt_rtbcount_delta += delta; break; case XFS_TRANS_DQ_DELRTBCOUNT: qtrx->qt_delrtb_delta += delta; break; default: ASSERT(0); } trace_xfs_trans_mod_dquot_after(qtrx); } /* * Given an array of dqtrx structures, lock all the dquots associated and join * them to the transaction, provided they have been modified. We know that the * highest number of dquots of one type - usr, grp and prj - involved in a * transaction is 3 so we don't need to make this very generic. */ STATIC void xfs_trans_dqlockedjoin( struct xfs_trans *tp, struct xfs_dqtrx *q) { ASSERT(q[0].qt_dquot != NULL); if (q[1].qt_dquot == NULL) { xfs_dqlock(q[0].qt_dquot); xfs_trans_dqjoin(tp, q[0].qt_dquot); } else { ASSERT(XFS_QM_TRANS_MAXDQS == 2); xfs_dqlock2(q[0].qt_dquot, q[1].qt_dquot); xfs_trans_dqjoin(tp, q[0].qt_dquot); xfs_trans_dqjoin(tp, q[1].qt_dquot); } } /* Apply dqtrx changes to the quota reservation counters. */ static inline void xfs_apply_quota_reservation_deltas( struct xfs_dquot_res *res, uint64_t reserved, int64_t res_used, int64_t count_delta) { if (reserved != 0) { /* * Subtle math here: If reserved > res_used (the normal case), * we're simply subtracting the unused transaction quota * reservation from the dquot reservation. * * If, however, res_used > reserved, then we have allocated * more quota blocks than were reserved for the transaction. * We must add that excess to the dquot reservation since it * tracks (usage + resv) and by definition we didn't reserve * that excess. */ res->reserved -= abs(reserved - res_used); } else if (count_delta != 0) { /* * These blks were never reserved, either inside a transaction * or outside one (in a delayed allocation). Also, this isn't * always a negative number since we sometimes deliberately * skip quota reservations. */ res->reserved += count_delta; } } /* * Called by xfs_trans_commit() and similar in spirit to * xfs_trans_apply_sb_deltas(). * Go thru all the dquots belonging to this transaction and modify the * INCORE dquot to reflect the actual usages. * Unreserve just the reservations done by this transaction. * dquot is still left locked at exit. */ void xfs_trans_apply_dquot_deltas( struct xfs_trans *tp) { int i, j; struct xfs_dquot *dqp; struct xfs_dqtrx *qtrx, *qa; int64_t totalbdelta; int64_t totalrtbdelta; if (!tp->t_dqinfo) return; ASSERT(tp->t_dqinfo); for (j = 0; j < XFS_QM_TRANS_DQTYPES; j++) { qa = tp->t_dqinfo->dqs[j]; if (qa[0].qt_dquot == NULL) continue; /* * Lock all of the dquots and join them to the transaction. */ xfs_trans_dqlockedjoin(tp, qa); for (i = 0; i < XFS_QM_TRANS_MAXDQS; i++) { uint64_t blk_res_used; qtrx = &qa[i]; /* * The array of dquots is filled * sequentially, not sparsely. */ if ((dqp = qtrx->qt_dquot) == NULL) break; ASSERT(XFS_DQ_IS_LOCKED(dqp)); /* * adjust the actual number of blocks used */ /* * The issue here is - sometimes we don't make a blkquota * reservation intentionally to be fair to users * (when the amount is small). On the other hand, * delayed allocs do make reservations, but that's * outside of a transaction, so we have no * idea how much was really reserved. * So, here we've accumulated delayed allocation blks and * non-delay blks. The assumption is that the * delayed ones are always reserved (outside of a * transaction), and the others may or may not have * quota reservations. */ totalbdelta = qtrx->qt_bcount_delta + qtrx->qt_delbcnt_delta; totalrtbdelta = qtrx->qt_rtbcount_delta + qtrx->qt_delrtb_delta; if (totalbdelta != 0 || totalrtbdelta != 0 || qtrx->qt_icount_delta != 0) { trace_xfs_trans_apply_dquot_deltas_before(dqp); trace_xfs_trans_apply_dquot_deltas(qtrx); } #ifdef DEBUG if (totalbdelta < 0) ASSERT(dqp->q_blk.count >= -totalbdelta); if (totalrtbdelta < 0) ASSERT(dqp->q_rtb.count >= -totalrtbdelta); if (qtrx->qt_icount_delta < 0) ASSERT(dqp->q_ino.count >= -qtrx->qt_icount_delta); #endif if (totalbdelta) dqp->q_blk.count += totalbdelta; if (qtrx->qt_icount_delta) dqp->q_ino.count += qtrx->qt_icount_delta; if (totalrtbdelta) dqp->q_rtb.count += totalrtbdelta; if (totalbdelta != 0 || totalrtbdelta != 0 || qtrx->qt_icount_delta != 0) trace_xfs_trans_apply_dquot_deltas_after(dqp); /* * Get any default limits in use. * Start/reset the timer(s) if needed. */ if (dqp->q_id) { xfs_qm_adjust_dqlimits(dqp); xfs_qm_adjust_dqtimers(dqp); } dqp->q_flags |= XFS_DQFLAG_DIRTY; /* * add this to the list of items to get logged */ xfs_trans_log_dquot(tp, dqp); /* * Take off what's left of the original reservation. * In case of delayed allocations, there's no * reservation that a transaction structure knows of. */ blk_res_used = max_t(int64_t, 0, qtrx->qt_bcount_delta); xfs_apply_quota_reservation_deltas(&dqp->q_blk, qtrx->qt_blk_res, blk_res_used, qtrx->qt_bcount_delta); /* * Adjust the RT reservation. */ xfs_apply_quota_reservation_deltas(&dqp->q_rtb, qtrx->qt_rtblk_res, qtrx->qt_rtblk_res_used, qtrx->qt_rtbcount_delta); /* * Adjust the inode reservation. */ ASSERT(qtrx->qt_ino_res >= qtrx->qt_ino_res_used); xfs_apply_quota_reservation_deltas(&dqp->q_ino, qtrx->qt_ino_res, qtrx->qt_ino_res_used, qtrx->qt_icount_delta); ASSERT(dqp->q_blk.reserved >= dqp->q_blk.count); ASSERT(dqp->q_ino.reserved >= dqp->q_ino.count); ASSERT(dqp->q_rtb.reserved >= dqp->q_rtb.count); } } } /* * Release the reservations, and adjust the dquots accordingly. * This is called only when the transaction is being aborted. If by * any chance we have done dquot modifications incore (ie. deltas) already, * we simply throw those away, since that's the expected behavior * when a transaction is curtailed without a commit. */ void xfs_trans_unreserve_and_mod_dquots( struct xfs_trans *tp) { int i, j; struct xfs_dquot *dqp; struct xfs_dqtrx *qtrx, *qa; bool locked; if (!tp->t_dqinfo) return; for (j = 0; j < XFS_QM_TRANS_DQTYPES; j++) { qa = tp->t_dqinfo->dqs[j]; for (i = 0; i < XFS_QM_TRANS_MAXDQS; i++) { qtrx = &qa[i]; /* * We assume that the array of dquots is filled * sequentially, not sparsely. */ if ((dqp = qtrx->qt_dquot) == NULL) break; /* * Unreserve the original reservation. We don't care * about the number of blocks used field, or deltas. * Also we don't bother to zero the fields. */ locked = false; if (qtrx->qt_blk_res) { xfs_dqlock(dqp); locked = true; dqp->q_blk.reserved -= (xfs_qcnt_t)qtrx->qt_blk_res; } if (qtrx->qt_ino_res) { if (!locked) { xfs_dqlock(dqp); locked = true; } dqp->q_ino.reserved -= (xfs_qcnt_t)qtrx->qt_ino_res; } if (qtrx->qt_rtblk_res) { if (!locked) { xfs_dqlock(dqp); locked = true; } dqp->q_rtb.reserved -= (xfs_qcnt_t)qtrx->qt_rtblk_res; } if (locked) xfs_dqunlock(dqp); } } } STATIC void xfs_quota_warn( struct xfs_mount *mp, struct xfs_dquot *dqp, int type) { enum quota_type qtype; switch (xfs_dquot_type(dqp)) { case XFS_DQTYPE_PROJ: qtype = PRJQUOTA; break; case XFS_DQTYPE_USER: qtype = USRQUOTA; break; case XFS_DQTYPE_GROUP: qtype = GRPQUOTA; break; default: return; } quota_send_warning(make_kqid(&init_user_ns, qtype, dqp->q_id), mp->m_super->s_dev, type); } /* * Decide if we can make an additional reservation against a quota resource. * Returns an inode QUOTA_NL_ warning code and whether or not it's fatal. * * Note that we assume that the numeric difference between the inode and block * warning codes will always be 3 since it's userspace ABI now, and will never * decrease the quota reservation, so the *BELOW messages are irrelevant. */ static inline int xfs_dqresv_check( struct xfs_dquot_res *res, struct xfs_quota_limits *qlim, int64_t delta, bool *fatal) { xfs_qcnt_t hardlimit = res->hardlimit; xfs_qcnt_t softlimit = res->softlimit; xfs_qcnt_t total_count = res->reserved + delta; BUILD_BUG_ON(QUOTA_NL_BHARDWARN != QUOTA_NL_IHARDWARN + 3); BUILD_BUG_ON(QUOTA_NL_BSOFTLONGWARN != QUOTA_NL_ISOFTLONGWARN + 3); BUILD_BUG_ON(QUOTA_NL_BSOFTWARN != QUOTA_NL_ISOFTWARN + 3); *fatal = false; if (delta <= 0) return QUOTA_NL_NOWARN; if (!hardlimit) hardlimit = qlim->hard; if (!softlimit) softlimit = qlim->soft; if (hardlimit && total_count > hardlimit) { *fatal = true; return QUOTA_NL_IHARDWARN; } if (softlimit && total_count > softlimit) { time64_t now = ktime_get_real_seconds(); if (res->timer != 0 && now > res->timer) { *fatal = true; return QUOTA_NL_ISOFTLONGWARN; } return QUOTA_NL_ISOFTWARN; } return QUOTA_NL_NOWARN; } /* * This reserves disk blocks and inodes against a dquot. * Flags indicate if the dquot is to be locked here and also * if the blk reservation is for RT or regular blocks. * Sending in XFS_QMOPT_FORCE_RES flag skips the quota check. */ STATIC int xfs_trans_dqresv( struct xfs_trans *tp, struct xfs_mount *mp, struct xfs_dquot *dqp, int64_t nblks, long ninos, uint flags) { struct xfs_quotainfo *q = mp->m_quotainfo; struct xfs_def_quota *defq; struct xfs_dquot_res *blkres; struct xfs_quota_limits *qlim; xfs_dqlock(dqp); defq = xfs_get_defquota(q, xfs_dquot_type(dqp)); if (flags & XFS_TRANS_DQ_RES_BLKS) { blkres = &dqp->q_blk; qlim = &defq->blk; } else { blkres = &dqp->q_rtb; qlim = &defq->rtb; } if ((flags & XFS_QMOPT_FORCE_RES) == 0 && dqp->q_id && xfs_dquot_is_enforced(dqp)) { int quota_nl; bool fatal; /* * dquot is locked already. See if we'd go over the hardlimit * or exceed the timelimit if we'd reserve resources. */ quota_nl = xfs_dqresv_check(blkres, qlim, nblks, &fatal); if (quota_nl != QUOTA_NL_NOWARN) { /* * Quota block warning codes are 3 more than the inode * codes, which we check above. */ xfs_quota_warn(mp, dqp, quota_nl + 3); if (fatal) goto error_return; } quota_nl = xfs_dqresv_check(&dqp->q_ino, &defq->ino, ninos, &fatal); if (quota_nl != QUOTA_NL_NOWARN) { xfs_quota_warn(mp, dqp, quota_nl); if (fatal) goto error_return; } } /* * Change the reservation, but not the actual usage. * Note that q_blk.reserved = q_blk.count + resv */ blkres->reserved += (xfs_qcnt_t)nblks; dqp->q_ino.reserved += (xfs_qcnt_t)ninos; /* * note the reservation amt in the trans struct too, * so that the transaction knows how much was reserved by * it against this particular dquot. * We don't do this when we are reserving for a delayed allocation, * because we don't have the luxury of a transaction envelope then. */ if (tp) { ASSERT(flags & XFS_QMOPT_RESBLK_MASK); xfs_trans_mod_dquot(tp, dqp, flags & XFS_QMOPT_RESBLK_MASK, nblks); xfs_trans_mod_dquot(tp, dqp, XFS_TRANS_DQ_RES_INOS, ninos); } if (XFS_IS_CORRUPT(mp, dqp->q_blk.reserved < dqp->q_blk.count) || XFS_IS_CORRUPT(mp, dqp->q_rtb.reserved < dqp->q_rtb.count) || XFS_IS_CORRUPT(mp, dqp->q_ino.reserved < dqp->q_ino.count)) goto error_corrupt; xfs_dqunlock(dqp); return 0; error_return: xfs_dqunlock(dqp); if (xfs_dquot_type(dqp) == XFS_DQTYPE_PROJ) return -ENOSPC; return -EDQUOT; error_corrupt: xfs_dqunlock(dqp); xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); return -EFSCORRUPTED; } /* * Given dquot(s), make disk block and/or inode reservations against them. * The fact that this does the reservation against user, group and * project quotas is important, because this follows a all-or-nothing * approach. * * flags = XFS_QMOPT_FORCE_RES evades limit enforcement. Used by chown. * XFS_QMOPT_ENOSPC returns ENOSPC not EDQUOT. Used by pquota. * XFS_TRANS_DQ_RES_BLKS reserves regular disk blocks * XFS_TRANS_DQ_RES_RTBLKS reserves realtime disk blocks * dquots are unlocked on return, if they were not locked by caller. */ int xfs_trans_reserve_quota_bydquots( struct xfs_trans *tp, struct xfs_mount *mp, struct xfs_dquot *udqp, struct xfs_dquot *gdqp, struct xfs_dquot *pdqp, int64_t nblks, long ninos, uint flags) { int error; if (!XFS_IS_QUOTA_ON(mp)) return 0; ASSERT(flags & XFS_QMOPT_RESBLK_MASK); if (udqp) { error = xfs_trans_dqresv(tp, mp, udqp, nblks, ninos, flags); if (error) return error; } if (gdqp) { error = xfs_trans_dqresv(tp, mp, gdqp, nblks, ninos, flags); if (error) goto unwind_usr; } if (pdqp) { error = xfs_trans_dqresv(tp, mp, pdqp, nblks, ninos, flags); if (error) goto unwind_grp; } /* * Didn't change anything critical, so, no need to log */ return 0; unwind_grp: flags |= XFS_QMOPT_FORCE_RES; if (gdqp) xfs_trans_dqresv(tp, mp, gdqp, -nblks, -ninos, flags); unwind_usr: flags |= XFS_QMOPT_FORCE_RES; if (udqp) xfs_trans_dqresv(tp, mp, udqp, -nblks, -ninos, flags); return error; } /* * Lock the dquot and change the reservation if we can. * This doesn't change the actual usage, just the reservation. * The inode sent in is locked. */ int xfs_trans_reserve_quota_nblks( struct xfs_trans *tp, struct xfs_inode *ip, int64_t dblocks, int64_t rblocks, bool force) { struct xfs_mount *mp = ip->i_mount; unsigned int qflags = 0; int error; if (!XFS_IS_QUOTA_ON(mp)) return 0; ASSERT(!xfs_is_quota_inode(&mp->m_sb, ip->i_ino)); ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); if (force) qflags |= XFS_QMOPT_FORCE_RES; /* Reserve data device quota against the inode's dquots. */ error = xfs_trans_reserve_quota_bydquots(tp, mp, ip->i_udquot, ip->i_gdquot, ip->i_pdquot, dblocks, 0, XFS_QMOPT_RES_REGBLKS | qflags); if (error) return error; /* Do the same but for realtime blocks. */ error = xfs_trans_reserve_quota_bydquots(tp, mp, ip->i_udquot, ip->i_gdquot, ip->i_pdquot, rblocks, 0, XFS_QMOPT_RES_RTBLKS | qflags); if (error) { xfs_trans_reserve_quota_bydquots(tp, mp, ip->i_udquot, ip->i_gdquot, ip->i_pdquot, -dblocks, 0, XFS_QMOPT_RES_REGBLKS); return error; } return 0; } /* Change the quota reservations for an inode creation activity. */ int xfs_trans_reserve_quota_icreate( struct xfs_trans *tp, struct xfs_dquot *udqp, struct xfs_dquot *gdqp, struct xfs_dquot *pdqp, int64_t dblocks) { struct xfs_mount *mp = tp->t_mountp; if (!XFS_IS_QUOTA_ON(mp)) return 0; return xfs_trans_reserve_quota_bydquots(tp, mp, udqp, gdqp, pdqp, dblocks, 1, XFS_QMOPT_RES_REGBLKS); } STATIC void xfs_trans_alloc_dqinfo( xfs_trans_t *tp) { tp->t_dqinfo = kmem_cache_zalloc(xfs_dqtrx_cache, GFP_KERNEL | __GFP_NOFAIL); } void xfs_trans_free_dqinfo( xfs_trans_t *tp) { if (!tp->t_dqinfo) return; kmem_cache_free(xfs_dqtrx_cache, tp->t_dqinfo); tp->t_dqinfo = NULL; } |
129 129 130 33 33 32 33 33 33 33 2 33 33 33 46 89 54 116 128 3 128 131 101 4 98 4 49 50 50 52 1 51 52 128 18 116 84 116 111 6 6 63 70 70 129 2 70 70 78 36 12 125 63 112 4 4 128 1 94 71 94 70 128 127 126 5 5 5 111 9 54 12 9 5 4 3 1 4 4 1 1 3 1 6 1 4 2 2 127 94 9 3 71 110 110 1 107 110 110 110 109 110 109 2 110 108 2 68 67 68 66 66 24 66 67 38 15 38 33 38 33 32 33 33 33 33 29 28 4 33 33 32 26 26 9 20 1 1 1 1 1 1 1 1 33 33 1 1 8 25 33 1 1 1 32 1 32 1 33 31 1 1 33 33 1 33 1 1 1 1 32 33 33 33 33 33 33 33 33 33 33 33 32 31 7 32 32 8 27 32 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 | // SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2007 Oracle. All rights reserved. */ #include <linux/fs.h> #include <linux/slab.h> #include <linux/sched.h> #include <linux/sched/mm.h> #include <linux/writeback.h> #include <linux/pagemap.h> #include <linux/blkdev.h> #include <linux/uuid.h> #include <linux/timekeeping.h> #include "misc.h" #include "ctree.h" #include "disk-io.h" #include "transaction.h" #include "locking.h" #include "tree-log.h" #include "volumes.h" #include "dev-replace.h" #include "qgroup.h" #include "block-group.h" #include "space-info.h" #include "zoned.h" #include "fs.h" #include "accessors.h" #include "extent-tree.h" #include "root-tree.h" #include "defrag.h" #include "dir-item.h" #include "uuid-tree.h" #include "ioctl.h" #include "relocation.h" #include "scrub.h" static struct kmem_cache *btrfs_trans_handle_cachep; /* * Transaction states and transitions * * No running transaction (fs tree blocks are not modified) * | * | To next stage: * | Call start_transaction() variants. Except btrfs_join_transaction_nostart(). * V * Transaction N [[TRANS_STATE_RUNNING]] * | * | New trans handles can be attached to transaction N by calling all * | start_transaction() variants. * | * | To next stage: * | Call btrfs_commit_transaction() on any trans handle attached to * | transaction N * V * Transaction N [[TRANS_STATE_COMMIT_PREP]] * | * | If there are simultaneous calls to btrfs_commit_transaction() one will win * | the race and the rest will wait for the winner to commit the transaction. * | * | The winner will wait for previous running transaction to completely finish * | if there is one. * | * Transaction N [[TRANS_STATE_COMMIT_START]] * | * | Then one of the following happens: * | - Wait for all other trans handle holders to release. * | The btrfs_commit_transaction() caller will do the commit work. * | - Wait for current transaction to be committed by others. * | Other btrfs_commit_transaction() caller will do the commit work. * | * | At this stage, only btrfs_join_transaction*() variants can attach * | to this running transaction. * | All other variants will wait for current one to finish and attach to * | transaction N+1. * | * | To next stage: * | Caller is chosen to commit transaction N, and all other trans handle * | haven been released. * V * Transaction N [[TRANS_STATE_COMMIT_DOING]] * | * | The heavy lifting transaction work is started. * | From running delayed refs (modifying extent tree) to creating pending * | snapshots, running qgroups. * | In short, modify supporting trees to reflect modifications of subvolume * | trees. * | * | At this stage, all start_transaction() calls will wait for this * | transaction to finish and attach to transaction N+1. * | * | To next stage: * | Until all supporting trees are updated. * V * Transaction N [[TRANS_STATE_UNBLOCKED]] * | Transaction N+1 * | All needed trees are modified, thus we only [[TRANS_STATE_RUNNING]] * | need to write them back to disk and update | * | super blocks. | * | | * | At this stage, new transaction is allowed to | * | start. | * | All new start_transaction() calls will be | * | attached to transid N+1. | * | | * | To next stage: | * | Until all tree blocks are super blocks are | * | written to block devices | * V | * Transaction N [[TRANS_STATE_COMPLETED]] V * All tree blocks and super blocks are written. Transaction N+1 * This transaction is finished and all its [[TRANS_STATE_COMMIT_START]] * data structures will be cleaned up. | Life goes on */ static const unsigned int btrfs_blocked_trans_types[TRANS_STATE_MAX] = { [TRANS_STATE_RUNNING] = 0U, [TRANS_STATE_COMMIT_PREP] = 0U, [TRANS_STATE_COMMIT_START] = (__TRANS_START | __TRANS_ATTACH), [TRANS_STATE_COMMIT_DOING] = (__TRANS_START | __TRANS_ATTACH | __TRANS_JOIN | __TRANS_JOIN_NOSTART), [TRANS_STATE_UNBLOCKED] = (__TRANS_START | __TRANS_ATTACH | __TRANS_JOIN | __TRANS_JOIN_NOLOCK | __TRANS_JOIN_NOSTART), [TRANS_STATE_SUPER_COMMITTED] = (__TRANS_START | __TRANS_ATTACH | __TRANS_JOIN | __TRANS_JOIN_NOLOCK | __TRANS_JOIN_NOSTART), [TRANS_STATE_COMPLETED] = (__TRANS_START | __TRANS_ATTACH | __TRANS_JOIN | __TRANS_JOIN_NOLOCK | __TRANS_JOIN_NOSTART), }; void btrfs_put_transaction(struct btrfs_transaction *transaction) { WARN_ON(refcount_read(&transaction->use_count) == 0); if (refcount_dec_and_test(&transaction->use_count)) { BUG_ON(!list_empty(&transaction->list)); WARN_ON(!RB_EMPTY_ROOT( &transaction->delayed_refs.href_root.rb_root)); WARN_ON(!RB_EMPTY_ROOT( &transaction->delayed_refs.dirty_extent_root)); if (transaction->delayed_refs.pending_csums) btrfs_err(transaction->fs_info, "pending csums is %llu", transaction->delayed_refs.pending_csums); /* * If any block groups are found in ->deleted_bgs then it's * because the transaction was aborted and a commit did not * happen (things failed before writing the new superblock * and calling btrfs_finish_extent_commit()), so we can not * discard the physical locations of the block groups. */ while (!list_empty(&transaction->deleted_bgs)) { struct btrfs_block_group *cache; cache = list_first_entry(&transaction->deleted_bgs, struct btrfs_block_group, bg_list); list_del_init(&cache->bg_list); btrfs_unfreeze_block_group(cache); btrfs_put_block_group(cache); } WARN_ON(!list_empty(&transaction->dev_update_list)); kfree(transaction); } } static noinline void switch_commit_roots(struct btrfs_trans_handle *trans) { struct btrfs_transaction *cur_trans = trans->transaction; struct btrfs_fs_info *fs_info = trans->fs_info; struct btrfs_root *root, *tmp; /* * At this point no one can be using this transaction to modify any tree * and no one can start another transaction to modify any tree either. */ ASSERT(cur_trans->state == TRANS_STATE_COMMIT_DOING); down_write(&fs_info->commit_root_sem); if (test_bit(BTRFS_FS_RELOC_RUNNING, &fs_info->flags)) fs_info->last_reloc_trans = trans->transid; list_for_each_entry_safe(root, tmp, &cur_trans->switch_commits, dirty_list) { list_del_init(&root->dirty_list); free_extent_buffer(root->commit_root); root->commit_root = btrfs_root_node(root); extent_io_tree_release(&root->dirty_log_pages); btrfs_qgroup_clean_swapped_blocks(root); } /* We can free old roots now. */ spin_lock(&cur_trans->dropped_roots_lock); while (!list_empty(&cur_trans->dropped_roots)) { root = list_first_entry(&cur_trans->dropped_roots, struct btrfs_root, root_list); list_del_init(&root->root_list); spin_unlock(&cur_trans->dropped_roots_lock); btrfs_free_log(trans, root); btrfs_drop_and_free_fs_root(fs_info, root); spin_lock(&cur_trans->dropped_roots_lock); } spin_unlock(&cur_trans->dropped_roots_lock); up_write(&fs_info->commit_root_sem); } static inline void extwriter_counter_inc(struct btrfs_transaction *trans, unsigned int type) { if (type & TRANS_EXTWRITERS) atomic_inc(&trans->num_extwriters); } static inline void extwriter_counter_dec(struct btrfs_transaction *trans, unsigned int type) { if (type & TRANS_EXTWRITERS) atomic_dec(&trans->num_extwriters); } static inline void extwriter_counter_init(struct btrfs_transaction *trans, unsigned int type) { atomic_set(&trans->num_extwriters, ((type & TRANS_EXTWRITERS) ? 1 : 0)); } static inline int extwriter_counter_read(struct btrfs_transaction *trans) { return atomic_read(&trans->num_extwriters); } /* * To be called after doing the chunk btree updates right after allocating a new * chunk (after btrfs_chunk_alloc_add_chunk_item() is called), when removing a * chunk after all chunk btree updates and after finishing the second phase of * chunk allocation (btrfs_create_pending_block_groups()) in case some block * group had its chunk item insertion delayed to the second phase. */ void btrfs_trans_release_chunk_metadata(struct btrfs_trans_handle *trans) { struct btrfs_fs_info *fs_info = trans->fs_info; if (!trans->chunk_bytes_reserved) return; btrfs_block_rsv_release(fs_info, &fs_info->chunk_block_rsv, trans->chunk_bytes_reserved, NULL); trans->chunk_bytes_reserved = 0; } /* * either allocate a new transaction or hop into the existing one */ static noinline int join_transaction(struct btrfs_fs_info *fs_info, unsigned int type) { struct btrfs_transaction *cur_trans; spin_lock(&fs_info->trans_lock); loop: /* The file system has been taken offline. No new transactions. */ if (BTRFS_FS_ERROR(fs_info)) { spin_unlock(&fs_info->trans_lock); return -EROFS; } cur_trans = fs_info->running_transaction; if (cur_trans) { if (TRANS_ABORTED(cur_trans)) { spin_unlock(&fs_info->trans_lock); return cur_trans->aborted; } if (btrfs_blocked_trans_types[cur_trans->state] & type) { spin_unlock(&fs_info->trans_lock); return -EBUSY; } refcount_inc(&cur_trans->use_count); atomic_inc(&cur_trans->num_writers); extwriter_counter_inc(cur_trans, type); spin_unlock(&fs_info->trans_lock); btrfs_lockdep_acquire(fs_info, btrfs_trans_num_writers); btrfs_lockdep_acquire(fs_info, btrfs_trans_num_extwriters); return 0; } spin_unlock(&fs_info->trans_lock); /* * If we are ATTACH or TRANS_JOIN_NOSTART, we just want to catch the * current transaction, and commit it. If there is no transaction, just * return ENOENT. */ if (type == TRANS_ATTACH || type == TRANS_JOIN_NOSTART) return -ENOENT; /* * JOIN_NOLOCK only happens during the transaction commit, so * it is impossible that ->running_transaction is NULL */ BUG_ON(type == TRANS_JOIN_NOLOCK); cur_trans = kmalloc(sizeof(*cur_trans), GFP_NOFS); if (!cur_trans) return -ENOMEM; btrfs_lockdep_acquire(fs_info, btrfs_trans_num_writers); btrfs_lockdep_acquire(fs_info, btrfs_trans_num_extwriters); spin_lock(&fs_info->trans_lock); if (fs_info->running_transaction) { /* * someone started a transaction after we unlocked. Make sure * to redo the checks above */ btrfs_lockdep_release(fs_info, btrfs_trans_num_extwriters); btrfs_lockdep_release(fs_info, btrfs_trans_num_writers); kfree(cur_trans); goto loop; } else if (BTRFS_FS_ERROR(fs_info)) { spin_unlock(&fs_info->trans_lock); btrfs_lockdep_release(fs_info, btrfs_trans_num_extwriters); btrfs_lockdep_release(fs_info, btrfs_trans_num_writers); kfree(cur_trans); return -EROFS; } cur_trans->fs_info = fs_info; atomic_set(&cur_trans->pending_ordered, 0); init_waitqueue_head(&cur_trans->pending_wait); atomic_set(&cur_trans->num_writers, 1); extwriter_counter_init(cur_trans, type); init_waitqueue_head(&cur_trans->writer_wait); init_waitqueue_head(&cur_trans->commit_wait); cur_trans->state = TRANS_STATE_RUNNING; /* * One for this trans handle, one so it will live on until we * commit the transaction. */ refcount_set(&cur_trans->use_count, 2); cur_trans->flags = 0; cur_trans->start_time = ktime_get_seconds(); memset(&cur_trans->delayed_refs, 0, sizeof(cur_trans->delayed_refs)); cur_trans->delayed_refs.href_root = RB_ROOT_CACHED; cur_trans->delayed_refs.dirty_extent_root = RB_ROOT; atomic_set(&cur_trans->delayed_refs.num_entries, 0); /* * although the tree mod log is per file system and not per transaction, * the log must never go across transaction boundaries. */ smp_mb(); if (!list_empty(&fs_info->tree_mod_seq_list)) WARN(1, KERN_ERR "BTRFS: tree_mod_seq_list not empty when creating a fresh transaction\n"); if (!RB_EMPTY_ROOT(&fs_info->tree_mod_log)) WARN(1, KERN_ERR "BTRFS: tree_mod_log rb tree not empty when creating a fresh transaction\n"); atomic64_set(&fs_info->tree_mod_seq, 0); spin_lock_init(&cur_trans->delayed_refs.lock); INIT_LIST_HEAD(&cur_trans->pending_snapshots); INIT_LIST_HEAD(&cur_trans->dev_update_list); INIT_LIST_HEAD(&cur_trans->switch_commits); INIT_LIST_HEAD(&cur_trans->dirty_bgs); INIT_LIST_HEAD(&cur_trans->io_bgs); INIT_LIST_HEAD(&cur_trans->dropped_roots); mutex_init(&cur_trans->cache_write_mutex); spin_lock_init(&cur_trans->dirty_bgs_lock); INIT_LIST_HEAD(&cur_trans->deleted_bgs); spin_lock_init(&cur_trans->dropped_roots_lock); list_add_tail(&cur_trans->list, &fs_info->trans_list); extent_io_tree_init(fs_info, &cur_trans->dirty_pages, IO_TREE_TRANS_DIRTY_PAGES); extent_io_tree_init(fs_info, &cur_trans->pinned_extents, IO_TREE_FS_PINNED_EXTENTS); btrfs_set_fs_generation(fs_info, fs_info->generation + 1); cur_trans->transid = fs_info->generation; fs_info->running_transaction = cur_trans; cur_trans->aborted = 0; spin_unlock(&fs_info->trans_lock); return 0; } /* * This does all the record keeping required to make sure that a shareable root * is properly recorded in a given transaction. This is required to make sure * the old root from before we joined the transaction is deleted when the * transaction commits. */ static int record_root_in_trans(struct btrfs_trans_handle *trans, struct btrfs_root *root, int force) { struct btrfs_fs_info *fs_info = root->fs_info; int ret = 0; if ((test_bit(BTRFS_ROOT_SHAREABLE, &root->state) && root->last_trans < trans->transid) || force) { WARN_ON(!force && root->commit_root != root->node); /* * see below for IN_TRANS_SETUP usage rules * we have the reloc mutex held now, so there * is only one writer in this function */ set_bit(BTRFS_ROOT_IN_TRANS_SETUP, &root->state); /* make sure readers find IN_TRANS_SETUP before * they find our root->last_trans update */ smp_wmb(); spin_lock(&fs_info->fs_roots_radix_lock); if (root->last_trans == trans->transid && !force) { spin_unlock(&fs_info->fs_roots_radix_lock); return 0; } radix_tree_tag_set(&fs_info->fs_roots_radix, (unsigned long)root->root_key.objectid, BTRFS_ROOT_TRANS_TAG); spin_unlock(&fs_info->fs_roots_radix_lock); root->last_trans = trans->transid; /* this is pretty tricky. We don't want to * take the relocation lock in btrfs_record_root_in_trans * unless we're really doing the first setup for this root in * this transaction. * * Normally we'd use root->last_trans as a flag to decide * if we want to take the expensive mutex. * * But, we have to set root->last_trans before we * init the relocation root, otherwise, we trip over warnings * in ctree.c. The solution used here is to flag ourselves * with root IN_TRANS_SETUP. When this is 1, we're still * fixing up the reloc trees and everyone must wait. * * When this is zero, they can trust root->last_trans and fly * through btrfs_record_root_in_trans without having to take the * lock. smp_wmb() makes sure that all the writes above are * done before we pop in the zero below */ ret = btrfs_init_reloc_root(trans, root); smp_mb__before_atomic(); clear_bit(BTRFS_ROOT_IN_TRANS_SETUP, &root->state); } return ret; } void btrfs_add_dropped_root(struct btrfs_trans_handle *trans, struct btrfs_root *root) { struct btrfs_fs_info *fs_info = root->fs_info; struct btrfs_transaction *cur_trans = trans->transaction; /* Add ourselves to the transaction dropped list */ spin_lock(&cur_trans->dropped_roots_lock); list_add_tail(&root->root_list, &cur_trans->dropped_roots); spin_unlock(&cur_trans->dropped_roots_lock); /* Make sure we don't try to update the root at commit time */ spin_lock(&fs_info->fs_roots_radix_lock); radix_tree_tag_clear(&fs_info->fs_roots_radix, (unsigned long)root->root_key.objectid, BTRFS_ROOT_TRANS_TAG); spin_unlock(&fs_info->fs_roots_radix_lock); } int btrfs_record_root_in_trans(struct btrfs_trans_handle *trans, struct btrfs_root *root) { struct btrfs_fs_info *fs_info = root->fs_info; int ret; if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state)) return 0; /* * see record_root_in_trans for comments about IN_TRANS_SETUP usage * and barriers */ smp_rmb(); if (root->last_trans == trans->transid && !test_bit(BTRFS_ROOT_IN_TRANS_SETUP, &root->state)) return 0; mutex_lock(&fs_info->reloc_mutex); ret = record_root_in_trans(trans, root, 0); mutex_unlock(&fs_info->reloc_mutex); return ret; } static inline int is_transaction_blocked(struct btrfs_transaction *trans) { return (trans->state >= TRANS_STATE_COMMIT_START && trans->state < TRANS_STATE_UNBLOCKED && !TRANS_ABORTED(trans)); } /* wait for commit against the current transaction to become unblocked * when this is done, it is safe to start a new transaction, but the current * transaction might not be fully on disk. */ static void wait_current_trans(struct btrfs_fs_info *fs_info) { struct btrfs_transaction *cur_trans; spin_lock(&fs_info->trans_lock); cur_trans = fs_info->running_transaction; if (cur_trans && is_transaction_blocked(cur_trans)) { refcount_inc(&cur_trans->use_count); spin_unlock(&fs_info->trans_lock); btrfs_might_wait_for_state(fs_info, BTRFS_LOCKDEP_TRANS_UNBLOCKED); wait_event(fs_info->transaction_wait, cur_trans->state >= TRANS_STATE_UNBLOCKED || TRANS_ABORTED(cur_trans)); btrfs_put_transaction(cur_trans); } else { spin_unlock(&fs_info->trans_lock); } } static int may_wait_transaction(struct btrfs_fs_info *fs_info, int type) { if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags)) return 0; if (type == TRANS_START) return 1; return 0; } static inline bool need_reserve_reloc_root(struct btrfs_root *root) { struct btrfs_fs_info *fs_info = root->fs_info; if (!fs_info->reloc_ctl || !test_bit(BTRFS_ROOT_SHAREABLE, &root->state) || root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID || root->reloc_root) return false; return true; } static int btrfs_reserve_trans_metadata(struct btrfs_fs_info *fs_info, enum btrfs_reserve_flush_enum flush, u64 num_bytes, u64 *delayed_refs_bytes) { struct btrfs_space_info *si = fs_info->trans_block_rsv.space_info; u64 bytes = num_bytes + *delayed_refs_bytes; int ret; /* * We want to reserve all the bytes we may need all at once, so we only * do 1 enospc flushing cycle per transaction start. */ ret = btrfs_reserve_metadata_bytes(fs_info, si, bytes, flush); /* * If we are an emergency flush, which can steal from the global block * reserve, then attempt to not reserve space for the delayed refs, as * we will consume space for them from the global block reserve. */ if (ret && flush == BTRFS_RESERVE_FLUSH_ALL_STEAL) { bytes -= *delayed_refs_bytes; *delayed_refs_bytes = 0; ret = btrfs_reserve_metadata_bytes(fs_info, si, bytes, flush); } return ret; } static struct btrfs_trans_handle * start_transaction(struct btrfs_root *root, unsigned int num_items, unsigned int type, enum btrfs_reserve_flush_enum flush, bool enforce_qgroups) { struct btrfs_fs_info *fs_info = root->fs_info; struct btrfs_block_rsv *delayed_refs_rsv = &fs_info->delayed_refs_rsv; struct btrfs_block_rsv *trans_rsv = &fs_info->trans_block_rsv; struct btrfs_trans_handle *h; struct btrfs_transaction *cur_trans; u64 num_bytes = 0; u64 qgroup_reserved = 0; u64 delayed_refs_bytes = 0; bool reloc_reserved = false; bool do_chunk_alloc = false; int ret; if (BTRFS_FS_ERROR(fs_info)) return ERR_PTR(-EROFS); if (current->journal_info) { WARN_ON(type & TRANS_EXTWRITERS); h = current->journal_info; refcount_inc(&h->use_count); WARN_ON(refcount_read(&h->use_count) > 2); h->orig_rsv = h->block_rsv; h->block_rsv = NULL; goto got_it; } /* * Do the reservation before we join the transaction so we can do all * the appropriate flushing if need be. */ if (num_items && root != fs_info->chunk_root) { qgroup_reserved = num_items * fs_info->nodesize; /* * Use prealloc for now, as there might be a currently running * transaction that could free this reserved space prematurely * by committing. */ ret = btrfs_qgroup_reserve_meta_prealloc(root, qgroup_reserved, enforce_qgroups, false); if (ret) return ERR_PTR(ret); num_bytes = btrfs_calc_insert_metadata_size(fs_info, num_items); /* * If we plan to insert/update/delete "num_items" from a btree, * we will also generate delayed refs for extent buffers in the * respective btree paths, so reserve space for the delayed refs * that will be generated by the caller as it modifies btrees. * Try to reserve them to avoid excessive use of the global * block reserve. */ delayed_refs_bytes = btrfs_calc_delayed_ref_bytes(fs_info, num_items); /* * Do the reservation for the relocation root creation */ if (need_reserve_reloc_root(root)) { num_bytes += fs_info->nodesize; reloc_reserved = true; } ret = btrfs_reserve_trans_metadata(fs_info, flush, num_bytes, &delayed_refs_bytes); if (ret) goto reserve_fail; btrfs_block_rsv_add_bytes(trans_rsv, num_bytes, true); if (trans_rsv->space_info->force_alloc) do_chunk_alloc = true; } else if (num_items == 0 && flush == BTRFS_RESERVE_FLUSH_ALL && !btrfs_block_rsv_full(delayed_refs_rsv)) { /* * Some people call with btrfs_start_transaction(root, 0) * because they can be throttled, but have some other mechanism * for reserving space. We still want these guys to refill the * delayed block_rsv so just add 1 items worth of reservation * here. */ ret = btrfs_delayed_refs_rsv_refill(fs_info, flush); if (ret) goto reserve_fail; } again: h = kmem_cache_zalloc(btrfs_trans_handle_cachep, GFP_NOFS); if (!h) { ret = -ENOMEM; goto alloc_fail; } /* * If we are JOIN_NOLOCK we're already committing a transaction and * waiting on this guy, so we don't need to do the sb_start_intwrite * because we're already holding a ref. We need this because we could * have raced in and did an fsync() on a file which can kick a commit * and then we deadlock with somebody doing a freeze. * * If we are ATTACH, it means we just want to catch the current * transaction and commit it, so we needn't do sb_start_intwrite(). */ if (type & __TRANS_FREEZABLE) sb_start_intwrite(fs_info->sb); if (may_wait_transaction(fs_info, type)) wait_current_trans(fs_info); do { ret = join_transaction(fs_info, type); if (ret == -EBUSY) { wait_current_trans(fs_info); if (unlikely(type == TRANS_ATTACH || type == TRANS_JOIN_NOSTART)) ret = -ENOENT; } } while (ret == -EBUSY); if (ret < 0) goto join_fail; cur_trans = fs_info->running_transaction; h->transid = cur_trans->transid; h->transaction = cur_trans; refcount_set(&h->use_count, 1); h->fs_info = root->fs_info; h->type = type; INIT_LIST_HEAD(&h->new_bgs); btrfs_init_metadata_block_rsv(fs_info, &h->delayed_rsv, BTRFS_BLOCK_RSV_DELOPS); smp_mb(); if (cur_trans->state >= TRANS_STATE_COMMIT_START && may_wait_transaction(fs_info, type)) { current->journal_info = h; btrfs_commit_transaction(h); goto again; } if (num_bytes) { trace_btrfs_space_reservation(fs_info, "transaction", h->transid, num_bytes, 1); h->block_rsv = trans_rsv; h->bytes_reserved = num_bytes; if (delayed_refs_bytes > 0) { trace_btrfs_space_reservation(fs_info, "local_delayed_refs_rsv", h->transid, delayed_refs_bytes, 1); h->delayed_refs_bytes_reserved = delayed_refs_bytes; btrfs_block_rsv_add_bytes(&h->delayed_rsv, delayed_refs_bytes, true); delayed_refs_bytes = 0; } h->reloc_reserved = reloc_reserved; } /* * Now that we have found a transaction to be a part of, convert the * qgroup reservation from prealloc to pertrans. A different transaction * can't race in and free our pertrans out from under us. */ if (qgroup_reserved) btrfs_qgroup_convert_reserved_meta(root, qgroup_reserved); got_it: if (!current->journal_info) current->journal_info = h; /* * If the space_info is marked ALLOC_FORCE then we'll get upgraded to * ALLOC_FORCE the first run through, and then we won't allocate for * anybody else who races in later. We don't care about the return * value here. */ if (do_chunk_alloc && num_bytes) { u64 flags = h->block_rsv->space_info->flags; btrfs_chunk_alloc(h, btrfs_get_alloc_profile(fs_info, flags), CHUNK_ALLOC_NO_FORCE); } /* * btrfs_record_root_in_trans() needs to alloc new extents, and may * call btrfs_join_transaction() while we're also starting a * transaction. * * Thus it need to be called after current->journal_info initialized, * or we can deadlock. */ ret = btrfs_record_root_in_trans(h, root); if (ret) { /* * The transaction handle is fully initialized and linked with * other structures so it needs to be ended in case of errors, * not just freed. */ btrfs_end_transaction(h); return ERR_PTR(ret); } return h; join_fail: if (type & __TRANS_FREEZABLE) sb_end_intwrite(fs_info->sb); kmem_cache_free(btrfs_trans_handle_cachep, h); alloc_fail: if (num_bytes) btrfs_block_rsv_release(fs_info, trans_rsv, num_bytes, NULL); if (delayed_refs_bytes) btrfs_space_info_free_bytes_may_use(fs_info, trans_rsv->space_info, delayed_refs_bytes); reserve_fail: btrfs_qgroup_free_meta_prealloc(root, qgroup_reserved); return ERR_PTR(ret); } struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root, unsigned int num_items) { return start_transaction(root, num_items, TRANS_START, BTRFS_RESERVE_FLUSH_ALL, true); } struct btrfs_trans_handle *btrfs_start_transaction_fallback_global_rsv( struct btrfs_root *root, unsigned int num_items) { return start_transaction(root, num_items, TRANS_START, BTRFS_RESERVE_FLUSH_ALL_STEAL, false); } struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root) { return start_transaction(root, 0, TRANS_JOIN, BTRFS_RESERVE_NO_FLUSH, true); } struct btrfs_trans_handle *btrfs_join_transaction_spacecache(struct btrfs_root *root) { return start_transaction(root, 0, TRANS_JOIN_NOLOCK, BTRFS_RESERVE_NO_FLUSH, true); } /* * Similar to regular join but it never starts a transaction when none is * running or when there's a running one at a state >= TRANS_STATE_UNBLOCKED. * This is similar to btrfs_attach_transaction() but it allows the join to * happen if the transaction commit already started but it's not yet in the * "doing" phase (the state is < TRANS_STATE_COMMIT_DOING). */ struct btrfs_trans_handle *btrfs_join_transaction_nostart(struct btrfs_root *root) { return start_transaction(root, 0, TRANS_JOIN_NOSTART, BTRFS_RESERVE_NO_FLUSH, true); } /* * Catch the running transaction. * * It is used when we want to commit the current the transaction, but * don't want to start a new one. * * Note: If this function return -ENOENT, it just means there is no * running transaction. But it is possible that the inactive transaction * is still in the memory, not fully on disk. If you hope there is no * inactive transaction in the fs when -ENOENT is returned, you should * invoke * btrfs_attach_transaction_barrier() */ struct btrfs_trans_handle *btrfs_attach_transaction(struct btrfs_root *root) { return start_transaction(root, 0, TRANS_ATTACH, BTRFS_RESERVE_NO_FLUSH, true); } /* * Catch the running transaction. * * It is similar to the above function, the difference is this one * will wait for all the inactive transactions until they fully * complete. */ struct btrfs_trans_handle * btrfs_attach_transaction_barrier(struct btrfs_root *root) { struct btrfs_trans_handle *trans; trans = start_transaction(root, 0, TRANS_ATTACH, BTRFS_RESERVE_NO_FLUSH, true); if (trans == ERR_PTR(-ENOENT)) { int ret; ret = btrfs_wait_for_commit(root->fs_info, 0); if (ret) return ERR_PTR(ret); } return trans; } /* Wait for a transaction commit to reach at least the given state. */ static noinline void wait_for_commit(struct btrfs_transaction *commit, const enum btrfs_trans_state min_state) { struct btrfs_fs_info *fs_info = commit->fs_info; u64 transid = commit->transid; bool put = false; /* * At the moment this function is called with min_state either being * TRANS_STATE_COMPLETED or TRANS_STATE_SUPER_COMMITTED. */ if (min_state == TRANS_STATE_COMPLETED) btrfs_might_wait_for_state(fs_info, BTRFS_LOCKDEP_TRANS_COMPLETED); else btrfs_might_wait_for_state(fs_info, BTRFS_LOCKDEP_TRANS_SUPER_COMMITTED); while (1) { wait_event(commit->commit_wait, commit->state >= min_state); if (put) btrfs_put_transaction(commit); if (min_state < TRANS_STATE_COMPLETED) break; /* * A transaction isn't really completed until all of the * previous transactions are completed, but with fsync we can * end up with SUPER_COMMITTED transactions before a COMPLETED * transaction. Wait for those. */ spin_lock(&fs_info->trans_lock); commit = list_first_entry_or_null(&fs_info->trans_list, struct btrfs_transaction, list); if (!commit || commit->transid > transid) { spin_unlock(&fs_info->trans_lock); break; } refcount_inc(&commit->use_count); put = true; spin_unlock(&fs_info->trans_lock); } } int btrfs_wait_for_commit(struct btrfs_fs_info *fs_info, u64 transid) { struct btrfs_transaction *cur_trans = NULL, *t; int ret = 0; if (transid) { if (transid <= btrfs_get_last_trans_committed(fs_info)) goto out; /* find specified transaction */ spin_lock(&fs_info->trans_lock); list_for_each_entry(t, &fs_info->trans_list, list) { if (t->transid == transid) { cur_trans = t; refcount_inc(&cur_trans->use_count); ret = 0; break; } if (t->transid > transid) { ret = 0; break; } } spin_unlock(&fs_info->trans_lock); /* * The specified transaction doesn't exist, or we * raced with btrfs_commit_transaction */ if (!cur_trans) { if (transid > btrfs_get_last_trans_committed(fs_info)) ret = -EINVAL; goto out; } } else { /* find newest transaction that is committing | committed */ spin_lock(&fs_info->trans_lock); list_for_each_entry_reverse(t, &fs_info->trans_list, list) { if (t->state >= TRANS_STATE_COMMIT_START) { if (t->state == TRANS_STATE_COMPLETED) break; cur_trans = t; refcount_inc(&cur_trans->use_count); break; } } spin_unlock(&fs_info->trans_lock); if (!cur_trans) goto out; /* nothing committing|committed */ } wait_for_commit(cur_trans, TRANS_STATE_COMPLETED); ret = cur_trans->aborted; btrfs_put_transaction(cur_trans); out: return ret; } void btrfs_throttle(struct btrfs_fs_info *fs_info) { wait_current_trans(fs_info); } bool btrfs_should_end_transaction(struct btrfs_trans_handle *trans) { struct btrfs_transaction *cur_trans = trans->transaction; if (cur_trans->state >= TRANS_STATE_COMMIT_START || test_bit(BTRFS_DELAYED_REFS_FLUSHING, &cur_trans->delayed_refs.flags)) return true; if (btrfs_check_space_for_delayed_refs(trans->fs_info)) return true; return !!btrfs_block_rsv_check(&trans->fs_info->global_block_rsv, 50); } static void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans) { struct btrfs_fs_info *fs_info = trans->fs_info; if (!trans->block_rsv) { ASSERT(!trans->bytes_reserved); ASSERT(!trans->delayed_refs_bytes_reserved); return; } if (!trans->bytes_reserved) { ASSERT(!trans->delayed_refs_bytes_reserved); return; } ASSERT(trans->block_rsv == &fs_info->trans_block_rsv); trace_btrfs_space_reservation(fs_info, "transaction", trans->transid, trans->bytes_reserved, 0); btrfs_block_rsv_release(fs_info, trans->block_rsv, trans->bytes_reserved, NULL); trans->bytes_reserved = 0; if (!trans->delayed_refs_bytes_reserved) return; trace_btrfs_space_reservation(fs_info, "local_delayed_refs_rsv", trans->transid, trans->delayed_refs_bytes_reserved, 0); btrfs_block_rsv_release(fs_info, &trans->delayed_rsv, trans->delayed_refs_bytes_reserved, NULL); trans->delayed_refs_bytes_reserved = 0; } static int __btrfs_end_transaction(struct btrfs_trans_handle *trans, int throttle) { struct btrfs_fs_info *info = trans->fs_info; struct btrfs_transaction *cur_trans = trans->transaction; int err = 0; if (refcount_read(&trans->use_count) > 1) { refcount_dec(&trans->use_count); trans->block_rsv = trans->orig_rsv; return 0; } btrfs_trans_release_metadata(trans); trans->block_rsv = NULL; btrfs_create_pending_block_groups(trans); btrfs_trans_release_chunk_metadata(trans); if (trans->type & __TRANS_FREEZABLE) sb_end_intwrite(info->sb); WARN_ON(cur_trans != info->running_transaction); WARN_ON(atomic_read(&cur_trans->num_writers) < 1); atomic_dec(&cur_trans->num_writers); extwriter_counter_dec(cur_trans, trans->type); cond_wake_up(&cur_trans->writer_wait); btrfs_lockdep_release(info, btrfs_trans_num_extwriters); btrfs_lockdep_release(info, btrfs_trans_num_writers); btrfs_put_transaction(cur_trans); if (current->journal_info == trans) current->journal_info = NULL; if (throttle) btrfs_run_delayed_iputs(info); if (TRANS_ABORTED(trans) || BTRFS_FS_ERROR(info)) { wake_up_process(info->transaction_kthread); if (TRANS_ABORTED(trans)) err = trans->aborted; else err = -EROFS; } kmem_cache_free(btrfs_trans_handle_cachep, trans); return err; } int btrfs_end_transaction(struct btrfs_trans_handle *trans) { return __btrfs_end_transaction(trans, 0); } int btrfs_end_transaction_throttle(struct btrfs_trans_handle *trans) { return __btrfs_end_transaction(trans, 1); } /* * when btree blocks are allocated, they have some corresponding bits set for * them in one of two extent_io trees. This is used to make sure all of * those extents are sent to disk but does not wait on them */ int btrfs_write_marked_extents(struct btrfs_fs_info *fs_info, struct extent_io_tree *dirty_pages, int mark) { int err = 0; int werr = 0; struct address_space *mapping = fs_info->btree_inode->i_mapping; struct extent_state *cached_state = NULL; u64 start = 0; u64 end; while (find_first_extent_bit(dirty_pages, start, &start, &end, mark, &cached_state)) { bool wait_writeback = false; err = convert_extent_bit(dirty_pages, start, end, EXTENT_NEED_WAIT, mark, &cached_state); /* * convert_extent_bit can return -ENOMEM, which is most of the * time a temporary error. So when it happens, ignore the error * and wait for writeback of this range to finish - because we * failed to set the bit EXTENT_NEED_WAIT for the range, a call * to __btrfs_wait_marked_extents() would not know that * writeback for this range started and therefore wouldn't * wait for it to finish - we don't want to commit a * superblock that points to btree nodes/leafs for which * writeback hasn't finished yet (and without errors). * We cleanup any entries left in the io tree when committing * the transaction (through extent_io_tree_release()). */ if (err == -ENOMEM) { err = 0; wait_writeback = true; } if (!err) err = filemap_fdatawrite_range(mapping, start, end); if (err) werr = err; else if (wait_writeback) werr = filemap_fdatawait_range(mapping, start, end); free_extent_state(cached_state); cached_state = NULL; cond_resched(); start = end + 1; } return werr; } /* * when btree blocks are allocated, they have some corresponding bits set for * them in one of two extent_io trees. This is used to make sure all of * those extents are on disk for transaction or log commit. We wait * on all the pages and clear them from the dirty pages state tree */ static int __btrfs_wait_marked_extents(struct btrfs_fs_info *fs_info, struct extent_io_tree *dirty_pages) { int err = 0; int werr = 0; struct address_space *mapping = fs_info->btree_inode->i_mapping; struct extent_state *cached_state = NULL; u64 start = 0; u64 end; while (find_first_extent_bit(dirty_pages, start, &start, &end, EXTENT_NEED_WAIT, &cached_state)) { /* * Ignore -ENOMEM errors returned by clear_extent_bit(). * When committing the transaction, we'll remove any entries * left in the io tree. For a log commit, we don't remove them * after committing the log because the tree can be accessed * concurrently - we do it only at transaction commit time when * it's safe to do it (through extent_io_tree_release()). */ err = clear_extent_bit(dirty_pages, start, end, EXTENT_NEED_WAIT, &cached_state); if (err == -ENOMEM) err = 0; if (!err) err = filemap_fdatawait_range(mapping, start, end); if (err) werr = err; free_extent_state(cached_state); cached_state = NULL; cond_resched(); start = end + 1; } if (err) werr = err; return werr; } static int btrfs_wait_extents(struct btrfs_fs_info *fs_info, struct extent_io_tree *dirty_pages) { bool errors = false; int err; err = __btrfs_wait_marked_extents(fs_info, dirty_pages); if (test_and_clear_bit(BTRFS_FS_BTREE_ERR, &fs_info->flags)) errors = true; if (errors && !err) err = -EIO; return err; } int btrfs_wait_tree_log_extents(struct btrfs_root *log_root, int mark) { struct btrfs_fs_info *fs_info = log_root->fs_info; struct extent_io_tree *dirty_pages = &log_root->dirty_log_pages; bool errors = false; int err; ASSERT(log_root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID); err = __btrfs_wait_marked_extents(fs_info, dirty_pages); if ((mark & EXTENT_DIRTY) && test_and_clear_bit(BTRFS_FS_LOG1_ERR, &fs_info->flags)) errors = true; if ((mark & EXTENT_NEW) && test_and_clear_bit(BTRFS_FS_LOG2_ERR, &fs_info->flags)) errors = true; if (errors && !err) err = -EIO; return err; } /* * When btree blocks are allocated the corresponding extents are marked dirty. * This function ensures such extents are persisted on disk for transaction or * log commit. * * @trans: transaction whose dirty pages we'd like to write */ static int btrfs_write_and_wait_transaction(struct btrfs_trans_handle *trans) { int ret; int ret2; struct extent_io_tree *dirty_pages = &trans->transaction->dirty_pages; struct btrfs_fs_info *fs_info = trans->fs_info; struct blk_plug plug; blk_start_plug(&plug); ret = btrfs_write_marked_extents(fs_info, dirty_pages, EXTENT_DIRTY); blk_finish_plug(&plug); ret2 = btrfs_wait_extents(fs_info, dirty_pages); extent_io_tree_release(&trans->transaction->dirty_pages); if (ret) return ret; else if (ret2) return ret2; else return 0; } /* * this is used to update the root pointer in the tree of tree roots. * * But, in the case of the extent allocation tree, updating the root * pointer may allocate blocks which may change the root of the extent * allocation tree. * * So, this loops and repeats and makes sure the cowonly root didn't * change while the root pointer was being updated in the metadata. */ static int update_cowonly_root(struct btrfs_trans_handle *trans, struct btrfs_root *root) { int ret; u64 old_root_bytenr; u64 old_root_used; struct btrfs_fs_info *fs_info = root->fs_info; struct btrfs_root *tree_root = fs_info->tree_root; old_root_used = btrfs_root_used(&root->root_item); while (1) { old_root_bytenr = btrfs_root_bytenr(&root->root_item); if (old_root_bytenr == root->node->start && old_root_used == btrfs_root_used(&root->root_item)) break; btrfs_set_root_node(&root->root_item, root->node); ret = btrfs_update_root(trans, tree_root, &root->root_key, &root->root_item); if (ret) return ret; old_root_used = btrfs_root_used(&root->root_item); } return 0; } /* * update all the cowonly tree roots on disk * * The error handling in this function may not be obvious. Any of the * failures will cause the file system to go offline. We still need * to clean up the delayed refs. */ static noinline int commit_cowonly_roots(struct btrfs_trans_handle *trans) { struct btrfs_fs_info *fs_info = trans->fs_info; struct list_head *dirty_bgs = &trans->transaction->dirty_bgs; struct list_head *io_bgs = &trans->transaction->io_bgs; struct list_head *next; struct extent_buffer *eb; int ret; /* * At this point no one can be using this transaction to modify any tree * and no one can start another transaction to modify any tree either. */ ASSERT(trans->transaction->state == TRANS_STATE_COMMIT_DOING); eb = btrfs_lock_root_node(fs_info->tree_root); ret = btrfs_cow_block(trans, fs_info->tree_root, eb, NULL, 0, &eb, BTRFS_NESTING_COW); btrfs_tree_unlock(eb); free_extent_buffer(eb); if (ret) return ret; ret = btrfs_run_dev_stats(trans); if (ret) return ret; ret = btrfs_run_dev_replace(trans); if (ret) return ret; ret = btrfs_run_qgroups(trans); if (ret) return ret; ret = btrfs_setup_space_cache(trans); if (ret) return ret; again: while (!list_empty(&fs_info->dirty_cowonly_roots)) { struct btrfs_root *root; next = fs_info->dirty_cowonly_roots.next; list_del_init(next); root = list_entry(next, struct btrfs_root, dirty_list); clear_bit(BTRFS_ROOT_DIRTY, &root->state); list_add_tail(&root->dirty_list, &trans->transaction->switch_commits); ret = update_cowonly_root(trans, root); if (ret) return ret; } /* Now flush any delayed refs generated by updating all of the roots */ ret = btrfs_run_delayed_refs(trans, U64_MAX); if (ret) return ret; while (!list_empty(dirty_bgs) || !list_empty(io_bgs)) { ret = btrfs_write_dirty_block_groups(trans); if (ret) return ret; /* * We're writing the dirty block groups, which could generate * delayed refs, which could generate more dirty block groups, * so we want to keep this flushing in this loop to make sure * everything gets run. */ ret = btrfs_run_delayed_refs(trans, U64_MAX); if (ret) return ret; } if (!list_empty(&fs_info->dirty_cowonly_roots)) goto again; /* Update dev-replace pointer once everything is committed */ fs_info->dev_replace.committed_cursor_left = fs_info->dev_replace.cursor_left_last_write_of_item; return 0; } /* * If we had a pending drop we need to see if there are any others left in our * dead roots list, and if not clear our bit and wake any waiters. */ void btrfs_maybe_wake_unfinished_drop(struct btrfs_fs_info *fs_info) { /* * We put the drop in progress roots at the front of the list, so if the * first entry doesn't have UNFINISHED_DROP set we can wake everybody * up. */ spin_lock(&fs_info->trans_lock); if (!list_empty(&fs_info->dead_roots)) { struct btrfs_root *root = list_first_entry(&fs_info->dead_roots, struct btrfs_root, root_list); if (test_bit(BTRFS_ROOT_UNFINISHED_DROP, &root->state)) { spin_unlock(&fs_info->trans_lock); return; } } spin_unlock(&fs_info->trans_lock); btrfs_wake_unfinished_drop(fs_info); } /* * dead roots are old snapshots that need to be deleted. This allocates * a dirty root struct and adds it into the list of dead roots that need to * be deleted */ void btrfs_add_dead_root(struct btrfs_root *root) { struct btrfs_fs_info *fs_info = root->fs_info; spin_lock(&fs_info->trans_lock); if (list_empty(&root->root_list)) { btrfs_grab_root(root); /* We want to process the partially complete drops first. */ if (test_bit(BTRFS_ROOT_UNFINISHED_DROP, &root->state)) list_add(&root->root_list, &fs_info->dead_roots); else list_add_tail(&root->root_list, &fs_info->dead_roots); } spin_unlock(&fs_info->trans_lock); } /* * Update each subvolume root and its relocation root, if it exists, in the tree * of tree roots. Also free log roots if they exist. */ static noinline int commit_fs_roots(struct btrfs_trans_handle *trans) { struct btrfs_fs_info *fs_info = trans->fs_info; struct btrfs_root *gang[8]; int i; int ret; /* * At this point no one can be using this transaction to modify any tree * and no one can start another transaction to modify any tree either. */ ASSERT(trans->transaction->state == TRANS_STATE_COMMIT_DOING); spin_lock(&fs_info->fs_roots_radix_lock); while (1) { ret = radix_tree_gang_lookup_tag(&fs_info->fs_roots_radix, (void **)gang, 0, ARRAY_SIZE(gang), BTRFS_ROOT_TRANS_TAG); if (ret == 0) break; for (i = 0; i < ret; i++) { struct btrfs_root *root = gang[i]; int ret2; /* * At this point we can neither have tasks logging inodes * from a root nor trying to commit a log tree. */ ASSERT(atomic_read(&root->log_writers) == 0); ASSERT(atomic_read(&root->log_commit[0]) == 0); ASSERT(atomic_read(&root->log_commit[1]) == 0); radix_tree_tag_clear(&fs_info->fs_roots_radix, (unsigned long)root->root_key.objectid, BTRFS_ROOT_TRANS_TAG); spin_unlock(&fs_info->fs_roots_radix_lock); btrfs_free_log(trans, root); ret2 = btrfs_update_reloc_root(trans, root); if (ret2) return ret2; /* see comments in should_cow_block() */ clear_bit(BTRFS_ROOT_FORCE_COW, &root->state); smp_mb__after_atomic(); if (root->commit_root != root->node) { list_add_tail(&root->dirty_list, &trans->transaction->switch_commits); btrfs_set_root_node(&root->root_item, root->node); } ret2 = btrfs_update_root(trans, fs_info->tree_root, &root->root_key, &root->root_item); if (ret2) return ret2; spin_lock(&fs_info->fs_roots_radix_lock); btrfs_qgroup_free_meta_all_pertrans(root); } } spin_unlock(&fs_info->fs_roots_radix_lock); return 0; } /* * Do all special snapshot related qgroup dirty hack. * * Will do all needed qgroup inherit and dirty hack like switch commit * roots inside one transaction and write all btree into disk, to make * qgroup works. */ static int qgroup_account_snapshot(struct btrfs_trans_handle *trans, struct btrfs_root *src, struct btrfs_root *parent, struct btrfs_qgroup_inherit *inherit, u64 dst_objectid) { struct btrfs_fs_info *fs_info = src->fs_info; int ret; /* * Save some performance in the case that qgroups are not enabled. If * this check races with the ioctl, rescan will kick in anyway. */ if (!btrfs_qgroup_full_accounting(fs_info)) return 0; /* * Ensure dirty @src will be committed. Or, after coming * commit_fs_roots() and switch_commit_roots(), any dirty but not * recorded root will never be updated again, causing an outdated root * item. */ ret = record_root_in_trans(trans, src, 1); if (ret) return ret; /* * btrfs_qgroup_inherit relies on a consistent view of the usage for the * src root, so we must run the delayed refs here. * * However this isn't particularly fool proof, because there's no * synchronization keeping us from changing the tree after this point * before we do the qgroup_inherit, or even from making changes while * we're doing the qgroup_inherit. But that's a problem for the future, * for now flush the delayed refs to narrow the race window where the * qgroup counters could end up wrong. */ ret = btrfs_run_delayed_refs(trans, U64_MAX); if (ret) { btrfs_abort_transaction(trans, ret); return ret; } ret = commit_fs_roots(trans); if (ret) goto out; ret = btrfs_qgroup_account_extents(trans); if (ret < 0) goto out; /* Now qgroup are all updated, we can inherit it to new qgroups */ ret = btrfs_qgroup_inherit(trans, src->root_key.objectid, dst_objectid, parent->root_key.objectid, inherit); if (ret < 0) goto out; /* * Now we do a simplified commit transaction, which will: * 1) commit all subvolume and extent tree * To ensure all subvolume and extent tree have a valid * commit_root to accounting later insert_dir_item() * 2) write all btree blocks onto disk * This is to make sure later btree modification will be cowed * Or commit_root can be populated and cause wrong qgroup numbers * In this simplified commit, we don't really care about other trees * like chunk and root tree, as they won't affect qgroup. * And we don't write super to avoid half committed status. */ ret = commit_cowonly_roots(trans); if (ret) goto out; switch_commit_roots(trans); ret = btrfs_write_and_wait_transaction(trans); if (ret) btrfs_handle_fs_error(fs_info, ret, "Error while writing out transaction for qgroup"); out: /* * Force parent root to be updated, as we recorded it before so its * last_trans == cur_transid. * Or it won't be committed again onto disk after later * insert_dir_item() */ if (!ret) ret = record_root_in_trans(trans, parent, 1); return ret; } /* * new snapshots need to be created at a very specific time in the * transaction commit. This does the actual creation. * * Note: * If the error which may affect the commitment of the current transaction * happens, we should return the error number. If the error which just affect * the creation of the pending snapshots, just return 0. */ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans, struct btrfs_pending_snapshot *pending) { struct btrfs_fs_info *fs_info = trans->fs_info; struct btrfs_key key; struct btrfs_root_item *new_root_item; struct btrfs_root *tree_root = fs_info->tree_root; struct btrfs_root *root = pending->root; struct btrfs_root *parent_root; struct btrfs_block_rsv *rsv; struct inode *parent_inode = pending->dir; struct btrfs_path *path; struct btrfs_dir_item *dir_item; struct extent_buffer *tmp; struct extent_buffer *old; struct timespec64 cur_time; int ret = 0; u64 to_reserve = 0; u64 index = 0; u64 objectid; u64 root_flags; unsigned int nofs_flags; struct fscrypt_name fname; ASSERT(pending->path); path = pending->path; ASSERT(pending->root_item); new_root_item = pending->root_item; /* * We're inside a transaction and must make sure that any potential * allocations with GFP_KERNEL in fscrypt won't recurse back to * filesystem. */ nofs_flags = memalloc_nofs_save(); pending->error = fscrypt_setup_filename(parent_inode, &pending->dentry->d_name, 0, &fname); memalloc_nofs_restore(nofs_flags); if (pending->error) goto free_pending; pending->error = btrfs_get_free_objectid(tree_root, &objectid); if (pending->error) goto free_fname; /* * Make qgroup to skip current new snapshot's qgroupid, as it is * accounted by later btrfs_qgroup_inherit(). */ btrfs_set_skip_qgroup(trans, objectid); btrfs_reloc_pre_snapshot(pending, &to_reserve); if (to_reserve > 0) { pending->error = btrfs_block_rsv_add(fs_info, &pending->block_rsv, to_reserve, BTRFS_RESERVE_NO_FLUSH); if (pending->error) goto clear_skip_qgroup; } key.objectid = objectid; key.offset = (u64)-1; key.type = BTRFS_ROOT_ITEM_KEY; rsv = trans->block_rsv; trans->block_rsv = &pending->block_rsv; trans->bytes_reserved = trans->block_rsv->reserved; trace_btrfs_space_reservation(fs_info, "transaction", trans->transid, trans->bytes_reserved, 1); parent_root = BTRFS_I(parent_inode)->root; ret = record_root_in_trans(trans, parent_root, 0); if (ret) goto fail; cur_time = current_time(parent_inode); /* * insert the directory item */ ret = btrfs_set_inode_index(BTRFS_I(parent_inode), &index); if (ret) { btrfs_abort_transaction(trans, ret); goto fail; } /* check if there is a file/dir which has the same name. */ dir_item = btrfs_lookup_dir_item(NULL, parent_root, path, btrfs_ino(BTRFS_I(parent_inode)), &fname.disk_name, 0); if (dir_item != NULL && !IS_ERR(dir_item)) { pending->error = -EEXIST; goto dir_item_existed; } else if (IS_ERR(dir_item)) { ret = PTR_ERR(dir_item); btrfs_abort_transaction(trans, ret); goto fail; } btrfs_release_path(path); ret = btrfs_create_qgroup(trans, objectid); if (ret && ret != -EEXIST) { btrfs_abort_transaction(trans, ret); goto fail; } /* * pull in the delayed directory update * and the delayed inode item * otherwise we corrupt the FS during * snapshot */ ret = btrfs_run_delayed_items(trans); if (ret) { /* Transaction aborted */ btrfs_abort_transaction(trans, ret); goto fail; } ret = record_root_in_trans(trans, root, 0); if (ret) { btrfs_abort_transaction(trans, ret); goto fail; } btrfs_set_root_last_snapshot(&root->root_item, trans->transid); memcpy(new_root_item, &root->root_item, sizeof(*new_root_item)); btrfs_check_and_init_root_item(new_root_item); root_flags = btrfs_root_flags(new_root_item); if (pending->readonly) root_flags |= BTRFS_ROOT_SUBVOL_RDONLY; else root_flags &= ~BTRFS_ROOT_SUBVOL_RDONLY; btrfs_set_root_flags(new_root_item, root_flags); btrfs_set_root_generation_v2(new_root_item, trans->transid); generate_random_guid(new_root_item->uuid); memcpy(new_root_item->parent_uuid, root->root_item.uuid, BTRFS_UUID_SIZE); if (!(root_flags & BTRFS_ROOT_SUBVOL_RDONLY)) { memset(new_root_item->received_uuid, 0, sizeof(new_root_item->received_uuid)); memset(&new_root_item->stime, 0, sizeof(new_root_item->stime)); memset(&new_root_item->rtime, 0, sizeof(new_root_item->rtime)); btrfs_set_root_stransid(new_root_item, 0); btrfs_set_root_rtransid(new_root_item, 0); } btrfs_set_stack_timespec_sec(&new_root_item->otime, cur_time.tv_sec); btrfs_set_stack_timespec_nsec(&new_root_item->otime, cur_time.tv_nsec); btrfs_set_root_otransid(new_root_item, trans->transid); old = btrfs_lock_root_node(root); ret = btrfs_cow_block(trans, root, old, NULL, 0, &old, BTRFS_NESTING_COW); if (ret) { btrfs_tree_unlock(old); free_extent_buffer(old); btrfs_abort_transaction(trans, ret); goto fail; } ret = btrfs_copy_root(trans, root, old, &tmp, objectid); /* clean up in any case */ btrfs_tree_unlock(old); free_extent_buffer(old); if (ret) { btrfs_abort_transaction(trans, ret); goto fail; } /* see comments in should_cow_block() */ set_bit(BTRFS_ROOT_FORCE_COW, &root->state); smp_wmb(); btrfs_set_root_node(new_root_item, tmp); /* record when the snapshot was created in key.offset */ key.offset = trans->transid; ret = btrfs_insert_root(trans, tree_root, &key, new_root_item); btrfs_tree_unlock(tmp); free_extent_buffer(tmp); if (ret) { btrfs_abort_transaction(trans, ret); goto fail; } /* * insert root back/forward references */ ret = btrfs_add_root_ref(trans, objectid, parent_root->root_key.objectid, btrfs_ino(BTRFS_I(parent_inode)), index, &fname.disk_name); if (ret) { btrfs_abort_transaction(trans, ret); goto fail; } key.offset = (u64)-1; pending->snap = btrfs_get_new_fs_root(fs_info, objectid, pending->anon_dev); if (IS_ERR(pending->snap)) { ret = PTR_ERR(pending->snap); pending->snap = NULL; btrfs_abort_transaction(trans, ret); goto fail; } ret = btrfs_reloc_post_snapshot(trans, pending); if (ret) { btrfs_abort_transaction(trans, ret); goto fail; } /* * Do special qgroup accounting for snapshot, as we do some qgroup * snapshot hack to do fast snapshot. * To co-operate with that hack, we do hack again. * Or snapshot will be greatly slowed down by a subtree qgroup rescan */ if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_FULL) ret = qgroup_account_snapshot(trans, root, parent_root, pending->inherit, objectid); else if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_SIMPLE) ret = btrfs_qgroup_inherit(trans, root->root_key.objectid, objectid, parent_root->root_key.objectid, pending->inherit); if (ret < 0) goto fail; ret = btrfs_insert_dir_item(trans, &fname.disk_name, BTRFS_I(parent_inode), &key, BTRFS_FT_DIR, index); /* We have check then name at the beginning, so it is impossible. */ BUG_ON(ret == -EEXIST || ret == -EOVERFLOW); if (ret) { btrfs_abort_transaction(trans, ret); goto fail; } btrfs_i_size_write(BTRFS_I(parent_inode), parent_inode->i_size + fname.disk_name.len * 2); inode_set_mtime_to_ts(parent_inode, inode_set_ctime_current(parent_inode)); ret = btrfs_update_inode_fallback(trans, BTRFS_I(parent_inode)); if (ret) { btrfs_abort_transaction(trans, ret); goto fail; } ret = btrfs_uuid_tree_add(trans, new_root_item->uuid, BTRFS_UUID_KEY_SUBVOL, objectid); if (ret) { btrfs_abort_transaction(trans, ret); goto fail; } if (!btrfs_is_empty_uuid(new_root_item->received_uuid)) { ret = btrfs_uuid_tree_add(trans, new_root_item->received_uuid, BTRFS_UUID_KEY_RECEIVED_SUBVOL, objectid); if (ret && ret != -EEXIST) { btrfs_abort_transaction(trans, ret); goto fail; } } fail: pending->error = ret; dir_item_existed: trans->block_rsv = rsv; trans->bytes_reserved = 0; clear_skip_qgroup: btrfs_clear_skip_qgroup(trans); free_fname: fscrypt_free_filename(&fname); free_pending: kfree(new_root_item); pending->root_item = NULL; btrfs_free_path(path); pending->path = NULL; return ret; } /* * create all the snapshots we've scheduled for creation */ static noinline int create_pending_snapshots(struct btrfs_trans_handle *trans) { struct btrfs_pending_snapshot *pending, *next; struct list_head *head = &trans->transaction->pending_snapshots; int ret = 0; list_for_each_entry_safe(pending, next, head, list) { list_del(&pending->list); ret = create_pending_snapshot(trans, pending); if (ret) break; } return ret; } static void update_super_roots(struct btrfs_fs_info *fs_info) { struct btrfs_root_item *root_item; struct btrfs_super_block *super; super = fs_info->super_copy; root_item = &fs_info->chunk_root->root_item; super->chunk_root = root_item->bytenr; super->chunk_root_generation = root_item->generation; super->chunk_root_level = root_item->level; root_item = &fs_info->tree_root->root_item; super->root = root_item->bytenr; super->generation = root_item->generation; super->root_level = root_item->level; if (btrfs_test_opt(fs_info, SPACE_CACHE)) super->cache_generation = root_item->generation; else if (test_bit(BTRFS_FS_CLEANUP_SPACE_CACHE_V1, &fs_info->flags)) super->cache_generation = 0; if (test_bit(BTRFS_FS_UPDATE_UUID_TREE_GEN, &fs_info->flags)) super->uuid_tree_generation = root_item->generation; } int btrfs_transaction_in_commit(struct btrfs_fs_info *info) { struct btrfs_transaction *trans; int ret = 0; spin_lock(&info->trans_lock); trans = info->running_transaction; if (trans) ret = (trans->state >= TRANS_STATE_COMMIT_START); spin_unlock(&info->trans_lock); return ret; } int btrfs_transaction_blocked(struct btrfs_fs_info *info) { struct btrfs_transaction *trans; int ret = 0; spin_lock(&info->trans_lock); trans = info->running_transaction; if (trans) ret = is_transaction_blocked(trans); spin_unlock(&info->trans_lock); return ret; } void btrfs_commit_transaction_async(struct btrfs_trans_handle *trans) { struct btrfs_fs_info *fs_info = trans->fs_info; struct btrfs_transaction *cur_trans; /* Kick the transaction kthread. */ set_bit(BTRFS_FS_COMMIT_TRANS, &fs_info->flags); wake_up_process(fs_info->transaction_kthread); /* take transaction reference */ cur_trans = trans->transaction; refcount_inc(&cur_trans->use_count); btrfs_end_transaction(trans); /* * Wait for the current transaction commit to start and block * subsequent transaction joins */ btrfs_might_wait_for_state(fs_info, BTRFS_LOCKDEP_TRANS_COMMIT_PREP); wait_event(fs_info->transaction_blocked_wait, cur_trans->state >= TRANS_STATE_COMMIT_START || TRANS_ABORTED(cur_trans)); btrfs_put_transaction(cur_trans); } static void cleanup_transaction(struct btrfs_trans_handle *trans, int err) { struct btrfs_fs_info *fs_info = trans->fs_info; struct btrfs_transaction *cur_trans = trans->transaction; WARN_ON(refcount_read(&trans->use_count) > 1); btrfs_abort_transaction(trans, err); spin_lock(&fs_info->trans_lock); /* * If the transaction is removed from the list, it means this * transaction has been committed successfully, so it is impossible * to call the cleanup function. */ BUG_ON(list_empty(&cur_trans->list)); if (cur_trans == fs_info->running_transaction) { cur_trans->state = TRANS_STATE_COMMIT_DOING; spin_unlock(&fs_info->trans_lock); /* * The thread has already released the lockdep map as reader * already in btrfs_commit_transaction(). */ btrfs_might_wait_for_event(fs_info, btrfs_trans_num_writers); wait_event(cur_trans->writer_wait, atomic_read(&cur_trans->num_writers) == 1); spin_lock(&fs_info->trans_lock); } /* * Now that we know no one else is still using the transaction we can * remove the transaction from the list of transactions. This avoids * the transaction kthread from cleaning up the transaction while some * other task is still using it, which could result in a use-after-free * on things like log trees, as it forces the transaction kthread to * wait for this transaction to be cleaned up by us. */ list_del_init(&cur_trans->list); spin_unlock(&fs_info->trans_lock); btrfs_cleanup_one_transaction(trans->transaction, fs_info); spin_lock(&fs_info->trans_lock); if (cur_trans == fs_info->running_transaction) fs_info->running_transaction = NULL; spin_unlock(&fs_info->trans_lock); if (trans->type & __TRANS_FREEZABLE) sb_end_intwrite(fs_info->sb); btrfs_put_transaction(cur_trans); btrfs_put_transaction(cur_trans); trace_btrfs_transaction_commit(fs_info); if (current->journal_info == trans) current->journal_info = NULL; /* * If relocation is running, we can't cancel scrub because that will * result in a deadlock. Before relocating a block group, relocation * pauses scrub, then starts and commits a transaction before unpausing * scrub. If the transaction commit is being done by the relocation * task or triggered by another task and the relocation task is waiting * for the commit, and we end up here due to an error in the commit * path, then calling btrfs_scrub_cancel() will deadlock, as we are * asking for scrub to stop while having it asked to be paused higher * above in relocation code. */ if (!test_bit(BTRFS_FS_RELOC_RUNNING, &fs_info->flags)) btrfs_scrub_cancel(fs_info); kmem_cache_free(btrfs_trans_handle_cachep, trans); } /* * Release reserved delayed ref space of all pending block groups of the * transaction and remove them from the list */ static void btrfs_cleanup_pending_block_groups(struct btrfs_trans_handle *trans) { struct btrfs_fs_info *fs_info = trans->fs_info; struct btrfs_block_group *block_group, *tmp; list_for_each_entry_safe(block_group, tmp, &trans->new_bgs, bg_list) { btrfs_dec_delayed_refs_rsv_bg_inserts(fs_info); list_del_init(&block_group->bg_list); } } static inline int btrfs_start_delalloc_flush(struct btrfs_fs_info *fs_info) { /* * We use try_to_writeback_inodes_sb() here because if we used * btrfs_start_delalloc_roots we would deadlock with fs freeze. * Currently are holding the fs freeze lock, if we do an async flush * we'll do btrfs_join_transaction() and deadlock because we need to * wait for the fs freeze lock. Using the direct flushing we benefit * from already being in a transaction and our join_transaction doesn't * have to re-take the fs freeze lock. * * Note that try_to_writeback_inodes_sb() will only trigger writeback * if it can read lock sb->s_umount. It will always be able to lock it, * except when the filesystem is being unmounted or being frozen, but in * those cases sync_filesystem() is called, which results in calling * writeback_inodes_sb() while holding a write lock on sb->s_umount. * Note that we don't call writeback_inodes_sb() directly, because it * will emit a warning if sb->s_umount is not locked. */ if (btrfs_test_opt(fs_info, FLUSHONCOMMIT)) try_to_writeback_inodes_sb(fs_info->sb, WB_REASON_SYNC); return 0; } static inline void btrfs_wait_delalloc_flush(struct btrfs_fs_info *fs_info) { if (btrfs_test_opt(fs_info, FLUSHONCOMMIT)) btrfs_wait_ordered_roots(fs_info, U64_MAX, 0, (u64)-1); } /* * Add a pending snapshot associated with the given transaction handle to the * respective handle. This must be called after the transaction commit started * and while holding fs_info->trans_lock. * This serves to guarantee a caller of btrfs_commit_transaction() that it can * safely free the pending snapshot pointer in case btrfs_commit_transaction() * returns an error. */ static void add_pending_snapshot(struct btrfs_trans_handle *trans) { struct btrfs_transaction *cur_trans = trans->transaction; if (!trans->pending_snapshot) return; lockdep_assert_held(&trans->fs_info->trans_lock); ASSERT(cur_trans->state >= TRANS_STATE_COMMIT_PREP); list_add(&trans->pending_snapshot->list, &cur_trans->pending_snapshots); } static void update_commit_stats(struct btrfs_fs_info *fs_info, ktime_t interval) { fs_info->commit_stats.commit_count++; fs_info->commit_stats.last_commit_dur = interval; fs_info->commit_stats.max_commit_dur = max_t(u64, fs_info->commit_stats.max_commit_dur, interval); fs_info->commit_stats.total_commit_dur += interval; } int btrfs_commit_transaction(struct btrfs_trans_handle *trans) { struct btrfs_fs_info *fs_info = trans->fs_info; struct btrfs_transaction *cur_trans = trans->transaction; struct btrfs_transaction *prev_trans = NULL; int ret; ktime_t start_time; ktime_t interval; ASSERT(refcount_read(&trans->use_count) == 1); btrfs_trans_state_lockdep_acquire(fs_info, BTRFS_LOCKDEP_TRANS_COMMIT_PREP); clear_bit(BTRFS_FS_NEED_TRANS_COMMIT, &fs_info->flags); /* Stop the commit early if ->aborted is set */ if (TRANS_ABORTED(cur_trans)) { ret = cur_trans->aborted; goto lockdep_trans_commit_start_release; } btrfs_trans_release_metadata(trans); trans->block_rsv = NULL; /* * We only want one transaction commit doing the flushing so we do not * waste a bunch of time on lock contention on the extent root node. */ if (!test_and_set_bit(BTRFS_DELAYED_REFS_FLUSHING, &cur_trans->delayed_refs.flags)) { /* * Make a pass through all the delayed refs we have so far. * Any running threads may add more while we are here. */ ret = btrfs_run_delayed_refs(trans, 0); if (ret) goto lockdep_trans_commit_start_release; } btrfs_create_pending_block_groups(trans); if (!test_bit(BTRFS_TRANS_DIRTY_BG_RUN, &cur_trans->flags)) { int run_it = 0; /* this mutex is also taken before trying to set * block groups readonly. We need to make sure * that nobody has set a block group readonly * after a extents from that block group have been * allocated for cache files. btrfs_set_block_group_ro * will wait for the transaction to commit if it * finds BTRFS_TRANS_DIRTY_BG_RUN set. * * The BTRFS_TRANS_DIRTY_BG_RUN flag is also used to make sure * only one process starts all the block group IO. It wouldn't * hurt to have more than one go through, but there's no * real advantage to it either. */ mutex_lock(&fs_info->ro_block_group_mutex); if (!test_and_set_bit(BTRFS_TRANS_DIRTY_BG_RUN, &cur_trans->flags)) run_it = 1; mutex_unlock(&fs_info->ro_block_group_mutex); if (run_it) { ret = btrfs_start_dirty_block_groups(trans); if (ret) goto lockdep_trans_commit_start_release; } } spin_lock(&fs_info->trans_lock); if (cur_trans->state >= TRANS_STATE_COMMIT_PREP) { enum btrfs_trans_state want_state = TRANS_STATE_COMPLETED; add_pending_snapshot(trans); spin_unlock(&fs_info->trans_lock); refcount_inc(&cur_trans->use_count); if (trans->in_fsync) want_state = TRANS_STATE_SUPER_COMMITTED; btrfs_trans_state_lockdep_release(fs_info, BTRFS_LOCKDEP_TRANS_COMMIT_PREP); ret = btrfs_end_transaction(trans); wait_for_commit(cur_trans, want_state); if (TRANS_ABORTED(cur_trans)) ret = cur_trans->aborted; btrfs_put_transaction(cur_trans); return ret; } cur_trans->state = TRANS_STATE_COMMIT_PREP; wake_up(&fs_info->transaction_blocked_wait); btrfs_trans_state_lockdep_release(fs_info, BTRFS_LOCKDEP_TRANS_COMMIT_PREP); if (cur_trans->list.prev != &fs_info->trans_list) { enum btrfs_trans_state want_state = TRANS_STATE_COMPLETED; if (trans->in_fsync) want_state = TRANS_STATE_SUPER_COMMITTED; prev_trans = list_entry(cur_trans->list.prev, struct btrfs_transaction, list); if (prev_trans->state < want_state) { refcount_inc(&prev_trans->use_count); spin_unlock(&fs_info->trans_lock); wait_for_commit(prev_trans, want_state); ret = READ_ONCE(prev_trans->aborted); btrfs_put_transaction(prev_trans); if (ret) goto lockdep_release; spin_lock(&fs_info->trans_lock); } } else { /* * The previous transaction was aborted and was already removed * from the list of transactions at fs_info->trans_list. So we * abort to prevent writing a new superblock that reflects a * corrupt state (pointing to trees with unwritten nodes/leafs). */ if (BTRFS_FS_ERROR(fs_info)) { spin_unlock(&fs_info->trans_lock); ret = -EROFS; goto lockdep_release; } } cur_trans->state = TRANS_STATE_COMMIT_START; wake_up(&fs_info->transaction_blocked_wait); spin_unlock(&fs_info->trans_lock); /* * Get the time spent on the work done by the commit thread and not * the time spent waiting on a previous commit */ start_time = ktime_get_ns(); extwriter_counter_dec(cur_trans, trans->type); ret = btrfs_start_delalloc_flush(fs_info); if (ret) goto lockdep_release; ret = btrfs_run_delayed_items(trans); if (ret) goto lockdep_release; /* * The thread has started/joined the transaction thus it holds the * lockdep map as a reader. It has to release it before acquiring the * lockdep map as a writer. */ btrfs_lockdep_release(fs_info, btrfs_trans_num_extwriters); btrfs_might_wait_for_event(fs_info, btrfs_trans_num_extwriters); wait_event(cur_trans->writer_wait, extwriter_counter_read(cur_trans) == 0); /* some pending stuffs might be added after the previous flush. */ ret = btrfs_run_delayed_items(trans); if (ret) { btrfs_lockdep_release(fs_info, btrfs_trans_num_writers); goto cleanup_transaction; } btrfs_wait_delalloc_flush(fs_info); /* * Wait for all ordered extents started by a fast fsync that joined this * transaction. Otherwise if this transaction commits before the ordered * extents complete we lose logged data after a power failure. */ btrfs_might_wait_for_event(fs_info, btrfs_trans_pending_ordered); wait_event(cur_trans->pending_wait, atomic_read(&cur_trans->pending_ordered) == 0); btrfs_scrub_pause(fs_info); /* * Ok now we need to make sure to block out any other joins while we * commit the transaction. We could have started a join before setting * COMMIT_DOING so make sure to wait for num_writers to == 1 again. */ spin_lock(&fs_info->trans_lock); add_pending_snapshot(trans); cur_trans->state = TRANS_STATE_COMMIT_DOING; spin_unlock(&fs_info->trans_lock); /* * The thread has started/joined the transaction thus it holds the * lockdep map as a reader. It has to release it before acquiring the * lockdep map as a writer. */ btrfs_lockdep_release(fs_info, btrfs_trans_num_writers); btrfs_might_wait_for_event(fs_info, btrfs_trans_num_writers); wait_event(cur_trans->writer_wait, atomic_read(&cur_trans->num_writers) == 1); /* * Make lockdep happy by acquiring the state locks after * btrfs_trans_num_writers is released. If we acquired the state locks * before releasing the btrfs_trans_num_writers lock then lockdep would * complain because we did not follow the reverse order unlocking rule. */ btrfs_trans_state_lockdep_acquire(fs_info, BTRFS_LOCKDEP_TRANS_COMPLETED); btrfs_trans_state_lockdep_acquire(fs_info, BTRFS_LOCKDEP_TRANS_SUPER_COMMITTED); btrfs_trans_state_lockdep_acquire(fs_info, BTRFS_LOCKDEP_TRANS_UNBLOCKED); /* * We've started the commit, clear the flag in case we were triggered to * do an async commit but somebody else started before the transaction * kthread could do the work. */ clear_bit(BTRFS_FS_COMMIT_TRANS, &fs_info->flags); if (TRANS_ABORTED(cur_trans)) { ret = cur_trans->aborted; btrfs_trans_state_lockdep_release(fs_info, BTRFS_LOCKDEP_TRANS_UNBLOCKED); goto scrub_continue; } /* * the reloc mutex makes sure that we stop * the balancing code from coming in and moving * extents around in the middle of the commit */ mutex_lock(&fs_info->reloc_mutex); /* * We needn't worry about the delayed items because we will * deal with them in create_pending_snapshot(), which is the * core function of the snapshot creation. */ ret = create_pending_snapshots(trans); if (ret) goto unlock_reloc; /* * We insert the dir indexes of the snapshots and update the inode * of the snapshots' parents after the snapshot creation, so there * are some delayed items which are not dealt with. Now deal with * them. * * We needn't worry that this operation will corrupt the snapshots, * because all the tree which are snapshoted will be forced to COW * the nodes and leaves. */ ret = btrfs_run_delayed_items(trans); if (ret) goto unlock_reloc; ret = btrfs_run_delayed_refs(trans, U64_MAX); if (ret) goto unlock_reloc; /* * make sure none of the code above managed to slip in a * delayed item */ btrfs_assert_delayed_root_empty(fs_info); WARN_ON(cur_trans != trans->transaction); ret = commit_fs_roots(trans); if (ret) goto unlock_reloc; /* commit_fs_roots gets rid of all the tree log roots, it is now * safe to free the root of tree log roots */ btrfs_free_log_root_tree(trans, fs_info); /* * Since fs roots are all committed, we can get a quite accurate * new_roots. So let's do quota accounting. */ ret = btrfs_qgroup_account_extents(trans); if (ret < 0) goto unlock_reloc; ret = commit_cowonly_roots(trans); if (ret) goto unlock_reloc; /* * The tasks which save the space cache and inode cache may also * update ->aborted, check it. */ if (TRANS_ABORTED(cur_trans)) { ret = cur_trans->aborted; goto unlock_reloc; } cur_trans = fs_info->running_transaction; btrfs_set_root_node(&fs_info->tree_root->root_item, fs_info->tree_root->node); list_add_tail(&fs_info->tree_root->dirty_list, &cur_trans->switch_commits); btrfs_set_root_node(&fs_info->chunk_root->root_item, fs_info->chunk_root->node); list_add_tail(&fs_info->chunk_root->dirty_list, &cur_trans->switch_commits); if (btrfs_fs_incompat(fs_info, EXTENT_TREE_V2)) { btrfs_set_root_node(&fs_info->block_group_root->root_item, fs_info->block_group_root->node); list_add_tail(&fs_info->block_group_root->dirty_list, &cur_trans->switch_commits); } switch_commit_roots(trans); ASSERT(list_empty(&cur_trans->dirty_bgs)); ASSERT(list_empty(&cur_trans->io_bgs)); update_super_roots(fs_info); btrfs_set_super_log_root(fs_info->super_copy, 0); btrfs_set_super_log_root_level(fs_info->super_copy, 0); memcpy(fs_info->super_for_commit, fs_info->super_copy, sizeof(*fs_info->super_copy)); btrfs_commit_device_sizes(cur_trans); clear_bit(BTRFS_FS_LOG1_ERR, &fs_info->flags); clear_bit(BTRFS_FS_LOG2_ERR, &fs_info->flags); btrfs_trans_release_chunk_metadata(trans); /* * Before changing the transaction state to TRANS_STATE_UNBLOCKED and * setting fs_info->running_transaction to NULL, lock tree_log_mutex to * make sure that before we commit our superblock, no other task can * start a new transaction and commit a log tree before we commit our * superblock. Anyone trying to commit a log tree locks this mutex before * writing its superblock. */ mutex_lock(&fs_info->tree_log_mutex); spin_lock(&fs_info->trans_lock); cur_trans->state = TRANS_STATE_UNBLOCKED; fs_info->running_transaction = NULL; spin_unlock(&fs_info->trans_lock); mutex_unlock(&fs_info->reloc_mutex); wake_up(&fs_info->transaction_wait); btrfs_trans_state_lockdep_release(fs_info, BTRFS_LOCKDEP_TRANS_UNBLOCKED); /* If we have features changed, wake up the cleaner to update sysfs. */ if (test_bit(BTRFS_FS_FEATURE_CHANGED, &fs_info->flags) && fs_info->cleaner_kthread) wake_up_process(fs_info->cleaner_kthread); ret = btrfs_write_and_wait_transaction(trans); if (ret) { btrfs_handle_fs_error(fs_info, ret, "Error while writing out transaction"); mutex_unlock(&fs_info->tree_log_mutex); goto scrub_continue; } ret = write_all_supers(fs_info, 0); /* * the super is written, we can safely allow the tree-loggers * to go about their business */ mutex_unlock(&fs_info->tree_log_mutex); if (ret) goto scrub_continue; /* * We needn't acquire the lock here because there is no other task * which can change it. */ cur_trans->state = TRANS_STATE_SUPER_COMMITTED; wake_up(&cur_trans->commit_wait); btrfs_trans_state_lockdep_release(fs_info, BTRFS_LOCKDEP_TRANS_SUPER_COMMITTED); btrfs_finish_extent_commit(trans); if (test_bit(BTRFS_TRANS_HAVE_FREE_BGS, &cur_trans->flags)) btrfs_clear_space_info_full(fs_info); btrfs_set_last_trans_committed(fs_info, cur_trans->transid); /* * We needn't acquire the lock here because there is no other task * which can change it. */ cur_trans->state = TRANS_STATE_COMPLETED; wake_up(&cur_trans->commit_wait); btrfs_trans_state_lockdep_release(fs_info, BTRFS_LOCKDEP_TRANS_COMPLETED); spin_lock(&fs_info->trans_lock); list_del_init(&cur_trans->list); spin_unlock(&fs_info->trans_lock); btrfs_put_transaction(cur_trans); btrfs_put_transaction(cur_trans); if (trans->type & __TRANS_FREEZABLE) sb_end_intwrite(fs_info->sb); trace_btrfs_transaction_commit(fs_info); interval = ktime_get_ns() - start_time; btrfs_scrub_continue(fs_info); if (current->journal_info == trans) current->journal_info = NULL; kmem_cache_free(btrfs_trans_handle_cachep, trans); update_commit_stats(fs_info, interval); return ret; unlock_reloc: mutex_unlock(&fs_info->reloc_mutex); btrfs_trans_state_lockdep_release(fs_info, BTRFS_LOCKDEP_TRANS_UNBLOCKED); scrub_continue: btrfs_trans_state_lockdep_release(fs_info, BTRFS_LOCKDEP_TRANS_SUPER_COMMITTED); btrfs_trans_state_lockdep_release(fs_info, BTRFS_LOCKDEP_TRANS_COMPLETED); btrfs_scrub_continue(fs_info); cleanup_transaction: btrfs_trans_release_metadata(trans); btrfs_cleanup_pending_block_groups(trans); btrfs_trans_release_chunk_metadata(trans); trans->block_rsv = NULL; btrfs_warn(fs_info, "Skipping commit of aborted transaction."); if (current->journal_info == trans) current->journal_info = NULL; cleanup_transaction(trans, ret); return ret; lockdep_release: btrfs_lockdep_release(fs_info, btrfs_trans_num_extwriters); btrfs_lockdep_release(fs_info, btrfs_trans_num_writers); goto cleanup_transaction; lockdep_trans_commit_start_release: btrfs_trans_state_lockdep_release(fs_info, BTRFS_LOCKDEP_TRANS_COMMIT_PREP); btrfs_end_transaction(trans); return ret; } /* * return < 0 if error * 0 if there are no more dead_roots at the time of call * 1 there are more to be processed, call me again * * The return value indicates there are certainly more snapshots to delete, but * if there comes a new one during processing, it may return 0. We don't mind, * because btrfs_commit_super will poke cleaner thread and it will process it a * few seconds later. */ int btrfs_clean_one_deleted_snapshot(struct btrfs_fs_info *fs_info) { struct btrfs_root *root; int ret; spin_lock(&fs_info->trans_lock); if (list_empty(&fs_info->dead_roots)) { spin_unlock(&fs_info->trans_lock); return 0; } root = list_first_entry(&fs_info->dead_roots, struct btrfs_root, root_list); list_del_init(&root->root_list); spin_unlock(&fs_info->trans_lock); btrfs_debug(fs_info, "cleaner removing %llu", root->root_key.objectid); btrfs_kill_all_delayed_nodes(root); if (btrfs_header_backref_rev(root->node) < BTRFS_MIXED_BACKREF_REV) ret = btrfs_drop_snapshot(root, 0, 0); else ret = btrfs_drop_snapshot(root, 1, 0); btrfs_put_root(root); return (ret < 0) ? 0 : 1; } /* * We only mark the transaction aborted and then set the file system read-only. * This will prevent new transactions from starting or trying to join this * one. * * This means that error recovery at the call site is limited to freeing * any local memory allocations and passing the error code up without * further cleanup. The transaction should complete as it normally would * in the call path but will return -EIO. * * We'll complete the cleanup in btrfs_end_transaction and * btrfs_commit_transaction. */ void __cold __btrfs_abort_transaction(struct btrfs_trans_handle *trans, const char *function, unsigned int line, int error, bool first_hit) { struct btrfs_fs_info *fs_info = trans->fs_info; WRITE_ONCE(trans->aborted, error); WRITE_ONCE(trans->transaction->aborted, error); if (first_hit && error == -ENOSPC) btrfs_dump_space_info_for_trans_abort(fs_info); /* Wake up anybody who may be waiting on this transaction */ wake_up(&fs_info->transaction_wait); wake_up(&fs_info->transaction_blocked_wait); __btrfs_handle_fs_error(fs_info, function, line, error, NULL); } int __init btrfs_transaction_init(void) { btrfs_trans_handle_cachep = kmem_cache_create("btrfs_trans_handle", sizeof(struct btrfs_trans_handle), 0, SLAB_TEMPORARY | SLAB_MEM_SPREAD, NULL); if (!btrfs_trans_handle_cachep) return -ENOMEM; return 0; } void __cold btrfs_transaction_exit(void) { kmem_cache_destroy(btrfs_trans_handle_cachep); } |
450 490 11 489 2 489 490 173 488 490 488 489 11 11 15 14 1 5 430 23 431 313 2 53 279 314 314 113 1 114 277 276 266 36 12 39 264 349 36 314 76 274 428 390 102 275 433 431 3 1 1 45 45 45 1 2 202 8 407 433 433 456 423 425 152 431 1 236 247 168 423 423 423 423 423 427 11 427 409 421 409 408 407 422 404 420 420 430 452 432 214 88 219 428 236 63 11 1 54 54 53 1 11 8 20 19 1 1 20 8 10 401 401 442 389 4 1 409 409 408 409 407 409 406 409 405 391 391 366 25 389 390 391 390 408 2 2 2 2 22 385 196 388 390 389 389 194 406 108 108 3 13 1 96 11 1 86 7 1 87 10 5 15 11 39 3 8 3 1 15 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 | // SPDX-License-Identifier: GPL-2.0 /* * message.c - synchronous message handling * * Released under the GPLv2 only. */ #include <linux/acpi.h> #include <linux/pci.h> /* for scatterlist macros */ #include <linux/usb.h> #include <linux/module.h> #include <linux/of.h> #include <linux/slab.h> #include <linux/mm.h> #include <linux/timer.h> #include <linux/ctype.h> #include <linux/nls.h> #include <linux/device.h> #include <linux/scatterlist.h> #include <linux/usb/cdc.h> #include <linux/usb/quirks.h> #include <linux/usb/hcd.h> /* for usbcore internals */ #include <linux/usb/of.h> #include <asm/byteorder.h> #include "usb.h" static void cancel_async_set_config(struct usb_device *udev); struct api_context { struct completion done; int status; }; static void usb_api_blocking_completion(struct urb *urb) { struct api_context *ctx = urb->context; ctx->status = urb->status; complete(&ctx->done); } /* * Starts urb and waits for completion or timeout. Note that this call * is NOT interruptible. Many device driver i/o requests should be * interruptible and therefore these drivers should implement their * own interruptible routines. */ static int usb_start_wait_urb(struct urb *urb, int timeout, int *actual_length) { struct api_context ctx; unsigned long expire; int retval; init_completion(&ctx.done); urb->context = &ctx; urb->actual_length = 0; retval = usb_submit_urb(urb, GFP_NOIO); if (unlikely(retval)) goto out; expire = timeout ? msecs_to_jiffies(timeout) : MAX_SCHEDULE_TIMEOUT; if (!wait_for_completion_timeout(&ctx.done, expire)) { usb_kill_urb(urb); retval = (ctx.status == -ENOENT ? -ETIMEDOUT : ctx.status); dev_dbg(&urb->dev->dev, "%s timed out on ep%d%s len=%u/%u\n", current->comm, usb_endpoint_num(&urb->ep->desc), usb_urb_dir_in(urb) ? "in" : "out", urb->actual_length, urb->transfer_buffer_length); } else retval = ctx.status; out: if (actual_length) *actual_length = urb->actual_length; usb_free_urb(urb); return retval; } /*-------------------------------------------------------------------*/ /* returns status (negative) or length (positive) */ static int usb_internal_control_msg(struct usb_device *usb_dev, unsigned int pipe, struct usb_ctrlrequest *cmd, void *data, int len, int timeout) { struct urb *urb; int retv; int length; urb = usb_alloc_urb(0, GFP_NOIO); if (!urb) return -ENOMEM; usb_fill_control_urb(urb, usb_dev, pipe, (unsigned char *)cmd, data, len, usb_api_blocking_completion, NULL); retv = usb_start_wait_urb(urb, timeout, &length); if (retv < 0) return retv; else return length; } /** * usb_control_msg - Builds a control urb, sends it off and waits for completion * @dev: pointer to the usb device to send the message to * @pipe: endpoint "pipe" to send the message to * @request: USB message request value * @requesttype: USB message request type value * @value: USB message value * @index: USB message index value * @data: pointer to the data to send * @size: length in bytes of the data to send * @timeout: time in msecs to wait for the message to complete before timing * out (if 0 the wait is forever) * * Context: task context, might sleep. * * This function sends a simple control message to a specified endpoint and * waits for the message to complete, or timeout. * * Don't use this function from within an interrupt context. If you need * an asynchronous message, or need to send a message from within interrupt * context, use usb_submit_urb(). If a thread in your driver uses this call, * make sure your disconnect() method can wait for it to complete. Since you * don't have a handle on the URB used, you can't cancel the request. * * Return: If successful, the number of bytes transferred. Otherwise, a negative * error number. */ int usb_control_msg(struct usb_device *dev, unsigned int pipe, __u8 request, __u8 requesttype, __u16 value, __u16 index, void *data, __u16 size, int timeout) { struct usb_ctrlrequest *dr; int ret; dr = kmalloc(sizeof(struct usb_ctrlrequest), GFP_NOIO); if (!dr) return -ENOMEM; dr->bRequestType = requesttype; dr->bRequest = request; dr->wValue = cpu_to_le16(value); dr->wIndex = cpu_to_le16(index); dr->wLength = cpu_to_le16(size); ret = usb_internal_control_msg(dev, pipe, dr, data, size, timeout); /* Linger a bit, prior to the next control message. */ if (dev->quirks & USB_QUIRK_DELAY_CTRL_MSG) msleep(200); kfree(dr); return ret; } EXPORT_SYMBOL_GPL(usb_control_msg); /** * usb_control_msg_send - Builds a control "send" message, sends it off and waits for completion * @dev: pointer to the usb device to send the message to * @endpoint: endpoint to send the message to * @request: USB message request value * @requesttype: USB message request type value * @value: USB message value * @index: USB message index value * @driver_data: pointer to the data to send * @size: length in bytes of the data to send * @timeout: time in msecs to wait for the message to complete before timing * out (if 0 the wait is forever) * @memflags: the flags for memory allocation for buffers * * Context: !in_interrupt () * * This function sends a control message to a specified endpoint that is not * expected to fill in a response (i.e. a "send message") and waits for the * message to complete, or timeout. * * Do not use this function from within an interrupt context. If you need * an asynchronous message, or need to send a message from within interrupt * context, use usb_submit_urb(). If a thread in your driver uses this call, * make sure your disconnect() method can wait for it to complete. Since you * don't have a handle on the URB used, you can't cancel the request. * * The data pointer can be made to a reference on the stack, or anywhere else, * as it will not be modified at all. This does not have the restriction that * usb_control_msg() has where the data pointer must be to dynamically allocated * memory (i.e. memory that can be successfully DMAed to a device). * * Return: If successful, 0 is returned, Otherwise, a negative error number. */ int usb_control_msg_send(struct usb_device *dev, __u8 endpoint, __u8 request, __u8 requesttype, __u16 value, __u16 index, const void *driver_data, __u16 size, int timeout, gfp_t memflags) { unsigned int pipe = usb_sndctrlpipe(dev, endpoint); int ret; u8 *data = NULL; if (size) { data = kmemdup(driver_data, size, memflags); if (!data) return -ENOMEM; } ret = usb_control_msg(dev, pipe, request, requesttype, value, index, data, size, timeout); kfree(data); if (ret < 0) return ret; return 0; } EXPORT_SYMBOL_GPL(usb_control_msg_send); /** * usb_control_msg_recv - Builds a control "receive" message, sends it off and waits for completion * @dev: pointer to the usb device to send the message to * @endpoint: endpoint to send the message to * @request: USB message request value * @requesttype: USB message request type value * @value: USB message value * @index: USB message index value * @driver_data: pointer to the data to be filled in by the message * @size: length in bytes of the data to be received * @timeout: time in msecs to wait for the message to complete before timing * out (if 0 the wait is forever) * @memflags: the flags for memory allocation for buffers * * Context: !in_interrupt () * * This function sends a control message to a specified endpoint that is * expected to fill in a response (i.e. a "receive message") and waits for the * message to complete, or timeout. * * Do not use this function from within an interrupt context. If you need * an asynchronous message, or need to send a message from within interrupt * context, use usb_submit_urb(). If a thread in your driver uses this call, * make sure your disconnect() method can wait for it to complete. Since you * don't have a handle on the URB used, you can't cancel the request. * * The data pointer can be made to a reference on the stack, or anywhere else * that can be successfully written to. This function does not have the * restriction that usb_control_msg() has where the data pointer must be to * dynamically allocated memory (i.e. memory that can be successfully DMAed to a * device). * * The "whole" message must be properly received from the device in order for * this function to be successful. If a device returns less than the expected * amount of data, then the function will fail. Do not use this for messages * where a variable amount of data might be returned. * * Return: If successful, 0 is returned, Otherwise, a negative error number. */ int usb_control_msg_recv(struct usb_device *dev, __u8 endpoint, __u8 request, __u8 requesttype, __u16 value, __u16 index, void *driver_data, __u16 size, int timeout, gfp_t memflags) { unsigned int pipe = usb_rcvctrlpipe(dev, endpoint); int ret; u8 *data; if (!size || !driver_data) return -EINVAL; data = kmalloc(size, memflags); if (!data) return -ENOMEM; ret = usb_control_msg(dev, pipe, request, requesttype, value, index, data, size, timeout); if (ret < 0) goto exit; if (ret == size) { memcpy(driver_data, data, size); ret = 0; } else { ret = -EREMOTEIO; } exit: kfree(data); return ret; } EXPORT_SYMBOL_GPL(usb_control_msg_recv); /** * usb_interrupt_msg - Builds an interrupt urb, sends it off and waits for completion * @usb_dev: pointer to the usb device to send the message to * @pipe: endpoint "pipe" to send the message to * @data: pointer to the data to send * @len: length in bytes of the data to send * @actual_length: pointer to a location to put the actual length transferred * in bytes * @timeout: time in msecs to wait for the message to complete before * timing out (if 0 the wait is forever) * * Context: task context, might sleep. * * This function sends a simple interrupt message to a specified endpoint and * waits for the message to complete, or timeout. * * Don't use this function from within an interrupt context. If you need * an asynchronous message, or need to send a message from within interrupt * context, use usb_submit_urb() If a thread in your driver uses this call, * make sure your disconnect() method can wait for it to complete. Since you * don't have a handle on the URB used, you can't cancel the request. * * Return: * If successful, 0. Otherwise a negative error number. The number of actual * bytes transferred will be stored in the @actual_length parameter. */ int usb_interrupt_msg(struct usb_device *usb_dev, unsigned int pipe, void *data, int len, int *actual_length, int timeout) { return usb_bulk_msg(usb_dev, pipe, data, len, actual_length, timeout); } EXPORT_SYMBOL_GPL(usb_interrupt_msg); /** * usb_bulk_msg - Builds a bulk urb, sends it off and waits for completion * @usb_dev: pointer to the usb device to send the message to * @pipe: endpoint "pipe" to send the message to * @data: pointer to the data to send * @len: length in bytes of the data to send * @actual_length: pointer to a location to put the actual length transferred * in bytes * @timeout: time in msecs to wait for the message to complete before * timing out (if 0 the wait is forever) * * Context: task context, might sleep. * * This function sends a simple bulk message to a specified endpoint * and waits for the message to complete, or timeout. * * Don't use this function from within an interrupt context. If you need * an asynchronous message, or need to send a message from within interrupt * context, use usb_submit_urb() If a thread in your driver uses this call, * make sure your disconnect() method can wait for it to complete. Since you * don't have a handle on the URB used, you can't cancel the request. * * Because there is no usb_interrupt_msg() and no USBDEVFS_INTERRUPT ioctl, * users are forced to abuse this routine by using it to submit URBs for * interrupt endpoints. We will take the liberty of creating an interrupt URB * (with the default interval) if the target is an interrupt endpoint. * * Return: * If successful, 0. Otherwise a negative error number. The number of actual * bytes transferred will be stored in the @actual_length parameter. * */ int usb_bulk_msg(struct usb_device *usb_dev, unsigned int pipe, void *data, int len, int *actual_length, int timeout) { struct urb *urb; struct usb_host_endpoint *ep; ep = usb_pipe_endpoint(usb_dev, pipe); if (!ep || len < 0) return -EINVAL; urb = usb_alloc_urb(0, GFP_KERNEL); if (!urb) return -ENOMEM; if ((ep->desc.bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) == USB_ENDPOINT_XFER_INT) { pipe = (pipe & ~(3 << 30)) | (PIPE_INTERRUPT << 30); usb_fill_int_urb(urb, usb_dev, pipe, data, len, usb_api_blocking_completion, NULL, ep->desc.bInterval); } else usb_fill_bulk_urb(urb, usb_dev, pipe, data, len, usb_api_blocking_completion, NULL); return usb_start_wait_urb(urb, timeout, actual_length); } EXPORT_SYMBOL_GPL(usb_bulk_msg); /*-------------------------------------------------------------------*/ static void sg_clean(struct usb_sg_request *io) { if (io->urbs) { while (io->entries--) usb_free_urb(io->urbs[io->entries]); kfree(io->urbs); io->urbs = NULL; } io->dev = NULL; } static void sg_complete(struct urb *urb) { unsigned long flags; struct usb_sg_request *io = urb->context; int status = urb->status; spin_lock_irqsave(&io->lock, flags); /* In 2.5 we require hcds' endpoint queues not to progress after fault * reports, until the completion callback (this!) returns. That lets * device driver code (like this routine) unlink queued urbs first, * if it needs to, since the HC won't work on them at all. So it's * not possible for page N+1 to overwrite page N, and so on. * * That's only for "hard" faults; "soft" faults (unlinks) sometimes * complete before the HCD can get requests away from hardware, * though never during cleanup after a hard fault. */ if (io->status && (io->status != -ECONNRESET || status != -ECONNRESET) && urb->actual_length) { dev_err(io->dev->bus->controller, "dev %s ep%d%s scatterlist error %d/%d\n", io->dev->devpath, usb_endpoint_num(&urb->ep->desc), usb_urb_dir_in(urb) ? "in" : "out", status, io->status); /* BUG (); */ } if (io->status == 0 && status && status != -ECONNRESET) { int i, found, retval; io->status = status; /* the previous urbs, and this one, completed already. * unlink pending urbs so they won't rx/tx bad data. * careful: unlink can sometimes be synchronous... */ spin_unlock_irqrestore(&io->lock, flags); for (i = 0, found = 0; i < io->entries; i++) { if (!io->urbs[i]) continue; if (found) { usb_block_urb(io->urbs[i]); retval = usb_unlink_urb(io->urbs[i]); if (retval != -EINPROGRESS && retval != -ENODEV && retval != -EBUSY && retval != -EIDRM) dev_err(&io->dev->dev, "%s, unlink --> %d\n", __func__, retval); } else if (urb == io->urbs[i]) found = 1; } spin_lock_irqsave(&io->lock, flags); } /* on the last completion, signal usb_sg_wait() */ io->bytes += urb->actual_length; io->count--; if (!io->count) complete(&io->complete); spin_unlock_irqrestore(&io->lock, flags); } /** * usb_sg_init - initializes scatterlist-based bulk/interrupt I/O request * @io: request block being initialized. until usb_sg_wait() returns, * treat this as a pointer to an opaque block of memory, * @dev: the usb device that will send or receive the data * @pipe: endpoint "pipe" used to transfer the data * @period: polling rate for interrupt endpoints, in frames or * (for high speed endpoints) microframes; ignored for bulk * @sg: scatterlist entries * @nents: how many entries in the scatterlist * @length: how many bytes to send from the scatterlist, or zero to * send every byte identified in the list. * @mem_flags: SLAB_* flags affecting memory allocations in this call * * This initializes a scatter/gather request, allocating resources such as * I/O mappings and urb memory (except maybe memory used by USB controller * drivers). * * The request must be issued using usb_sg_wait(), which waits for the I/O to * complete (or to be canceled) and then cleans up all resources allocated by * usb_sg_init(). * * The request may be canceled with usb_sg_cancel(), either before or after * usb_sg_wait() is called. * * Return: Zero for success, else a negative errno value. */ int usb_sg_init(struct usb_sg_request *io, struct usb_device *dev, unsigned pipe, unsigned period, struct scatterlist *sg, int nents, size_t length, gfp_t mem_flags) { int i; int urb_flags; int use_sg; if (!io || !dev || !sg || usb_pipecontrol(pipe) || usb_pipeisoc(pipe) || nents <= 0) return -EINVAL; spin_lock_init(&io->lock); io->dev = dev; io->pipe = pipe; if (dev->bus->sg_tablesize > 0) { use_sg = true; io->entries = 1; } else { use_sg = false; io->entries = nents; } /* initialize all the urbs we'll use */ io->urbs = kmalloc_array(io->entries, sizeof(*io->urbs), mem_flags); if (!io->urbs) goto nomem; urb_flags = URB_NO_INTERRUPT; if (usb_pipein(pipe)) urb_flags |= URB_SHORT_NOT_OK; for_each_sg(sg, sg, io->entries, i) { struct urb *urb; unsigned len; urb = usb_alloc_urb(0, mem_flags); if (!urb) { io->entries = i; goto nomem; } io->urbs[i] = urb; urb->dev = NULL; urb->pipe = pipe; urb->interval = period; urb->transfer_flags = urb_flags; urb->complete = sg_complete; urb->context = io; urb->sg = sg; if (use_sg) { /* There is no single transfer buffer */ urb->transfer_buffer = NULL; urb->num_sgs = nents; /* A length of zero means transfer the whole sg list */ len = length; if (len == 0) { struct scatterlist *sg2; int j; for_each_sg(sg, sg2, nents, j) len += sg2->length; } } else { /* * Some systems can't use DMA; they use PIO instead. * For their sakes, transfer_buffer is set whenever * possible. */ if (!PageHighMem(sg_page(sg))) urb->transfer_buffer = sg_virt(sg); else urb->transfer_buffer = NULL; len = sg->length; if (length) { len = min_t(size_t, len, length); length -= len; if (length == 0) io->entries = i + 1; } } urb->transfer_buffer_length = len; } io->urbs[--i]->transfer_flags &= ~URB_NO_INTERRUPT; /* transaction state */ io->count = io->entries; io->status = 0; io->bytes = 0; init_completion(&io->complete); return 0; nomem: sg_clean(io); return -ENOMEM; } EXPORT_SYMBOL_GPL(usb_sg_init); /** * usb_sg_wait - synchronously execute scatter/gather request * @io: request block handle, as initialized with usb_sg_init(). * some fields become accessible when this call returns. * * Context: task context, might sleep. * * This function blocks until the specified I/O operation completes. It * leverages the grouping of the related I/O requests to get good transfer * rates, by queueing the requests. At higher speeds, such queuing can * significantly improve USB throughput. * * There are three kinds of completion for this function. * * (1) success, where io->status is zero. The number of io->bytes * transferred is as requested. * (2) error, where io->status is a negative errno value. The number * of io->bytes transferred before the error is usually less * than requested, and can be nonzero. * (3) cancellation, a type of error with status -ECONNRESET that * is initiated by usb_sg_cancel(). * * When this function returns, all memory allocated through usb_sg_init() or * this call will have been freed. The request block parameter may still be * passed to usb_sg_cancel(), or it may be freed. It could also be * reinitialized and then reused. * * Data Transfer Rates: * * Bulk transfers are valid for full or high speed endpoints. * The best full speed data rate is 19 packets of 64 bytes each * per frame, or 1216 bytes per millisecond. * The best high speed data rate is 13 packets of 512 bytes each * per microframe, or 52 KBytes per millisecond. * * The reason to use interrupt transfers through this API would most likely * be to reserve high speed bandwidth, where up to 24 KBytes per millisecond * could be transferred. That capability is less useful for low or full * speed interrupt endpoints, which allow at most one packet per millisecond, * of at most 8 or 64 bytes (respectively). * * It is not necessary to call this function to reserve bandwidth for devices * under an xHCI host controller, as the bandwidth is reserved when the * configuration or interface alt setting is selected. */ void usb_sg_wait(struct usb_sg_request *io) { int i; int entries = io->entries; /* queue the urbs. */ spin_lock_irq(&io->lock); i = 0; while (i < entries && !io->status) { int retval; io->urbs[i]->dev = io->dev; spin_unlock_irq(&io->lock); retval = usb_submit_urb(io->urbs[i], GFP_NOIO); switch (retval) { /* maybe we retrying will recover */ case -ENXIO: /* hc didn't queue this one */ case -EAGAIN: case -ENOMEM: retval = 0; yield(); break; /* no error? continue immediately. * * NOTE: to work better with UHCI (4K I/O buffer may * need 3K of TDs) it may be good to limit how many * URBs are queued at once; N milliseconds? */ case 0: ++i; cpu_relax(); break; /* fail any uncompleted urbs */ default: io->urbs[i]->status = retval; dev_dbg(&io->dev->dev, "%s, submit --> %d\n", __func__, retval); usb_sg_cancel(io); } spin_lock_irq(&io->lock); if (retval && (io->status == 0 || io->status == -ECONNRESET)) io->status = retval; } io->count -= entries - i; if (io->count == 0) complete(&io->complete); spin_unlock_irq(&io->lock); /* OK, yes, this could be packaged as non-blocking. * So could the submit loop above ... but it's easier to * solve neither problem than to solve both! */ wait_for_completion(&io->complete); sg_clean(io); } EXPORT_SYMBOL_GPL(usb_sg_wait); /** * usb_sg_cancel - stop scatter/gather i/o issued by usb_sg_wait() * @io: request block, initialized with usb_sg_init() * * This stops a request after it has been started by usb_sg_wait(). * It can also prevents one initialized by usb_sg_init() from starting, * so that call just frees resources allocated to the request. */ void usb_sg_cancel(struct usb_sg_request *io) { unsigned long flags; int i, retval; spin_lock_irqsave(&io->lock, flags); if (io->status || io->count == 0) { spin_unlock_irqrestore(&io->lock, flags); return; } /* shut everything down */ io->status = -ECONNRESET; io->count++; /* Keep the request alive until we're done */ spin_unlock_irqrestore(&io->lock, flags); for (i = io->entries - 1; i >= 0; --i) { usb_block_urb(io->urbs[i]); retval = usb_unlink_urb(io->urbs[i]); if (retval != -EINPROGRESS && retval != -ENODEV && retval != -EBUSY && retval != -EIDRM) dev_warn(&io->dev->dev, "%s, unlink --> %d\n", __func__, retval); } spin_lock_irqsave(&io->lock, flags); io->count--; if (!io->count) complete(&io->complete); spin_unlock_irqrestore(&io->lock, flags); } EXPORT_SYMBOL_GPL(usb_sg_cancel); /*-------------------------------------------------------------------*/ /** * usb_get_descriptor - issues a generic GET_DESCRIPTOR request * @dev: the device whose descriptor is being retrieved * @type: the descriptor type (USB_DT_*) * @index: the number of the descriptor * @buf: where to put the descriptor * @size: how big is "buf"? * * Context: task context, might sleep. * * Gets a USB descriptor. Convenience functions exist to simplify * getting some types of descriptors. Use * usb_get_string() or usb_string() for USB_DT_STRING. * Device (USB_DT_DEVICE) and configuration descriptors (USB_DT_CONFIG) * are part of the device structure. * In addition to a number of USB-standard descriptors, some * devices also use class-specific or vendor-specific descriptors. * * This call is synchronous, and may not be used in an interrupt context. * * Return: The number of bytes received on success, or else the status code * returned by the underlying usb_control_msg() call. */ int usb_get_descriptor(struct usb_device *dev, unsigned char type, unsigned char index, void *buf, int size) { int i; int result; if (size <= 0) /* No point in asking for no data */ return -EINVAL; memset(buf, 0, size); /* Make sure we parse really received data */ for (i = 0; i < 3; ++i) { /* retry on length 0 or error; some devices are flakey */ result = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), USB_REQ_GET_DESCRIPTOR, USB_DIR_IN, (type << 8) + index, 0, buf, size, USB_CTRL_GET_TIMEOUT); if (result <= 0 && result != -ETIMEDOUT) continue; if (result > 1 && ((u8 *)buf)[1] != type) { result = -ENODATA; continue; } break; } return result; } EXPORT_SYMBOL_GPL(usb_get_descriptor); /** * usb_get_string - gets a string descriptor * @dev: the device whose string descriptor is being retrieved * @langid: code for language chosen (from string descriptor zero) * @index: the number of the descriptor * @buf: where to put the string * @size: how big is "buf"? * * Context: task context, might sleep. * * Retrieves a string, encoded using UTF-16LE (Unicode, 16 bits per character, * in little-endian byte order). * The usb_string() function will often be a convenient way to turn * these strings into kernel-printable form. * * Strings may be referenced in device, configuration, interface, or other * descriptors, and could also be used in vendor-specific ways. * * This call is synchronous, and may not be used in an interrupt context. * * Return: The number of bytes received on success, or else the status code * returned by the underlying usb_control_msg() call. */ static int usb_get_string(struct usb_device *dev, unsigned short langid, unsigned char index, void *buf, int size) { int i; int result; if (size <= 0) /* No point in asking for no data */ return -EINVAL; for (i = 0; i < 3; ++i) { /* retry on length 0 or stall; some devices are flakey */ result = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), USB_REQ_GET_DESCRIPTOR, USB_DIR_IN, (USB_DT_STRING << 8) + index, langid, buf, size, USB_CTRL_GET_TIMEOUT); if (result == 0 || result == -EPIPE) continue; if (result > 1 && ((u8 *) buf)[1] != USB_DT_STRING) { result = -ENODATA; continue; } break; } return result; } static void usb_try_string_workarounds(unsigned char *buf, int *length) { int newlength, oldlength = *length; for (newlength = 2; newlength + 1 < oldlength; newlength += 2) if (!isprint(buf[newlength]) || buf[newlength + 1]) break; if (newlength > 2) { buf[0] = newlength; *length = newlength; } } static int usb_string_sub(struct usb_device *dev, unsigned int langid, unsigned int index, unsigned char *buf) { int rc; /* Try to read the string descriptor by asking for the maximum * possible number of bytes */ if (dev->quirks & USB_QUIRK_STRING_FETCH_255) rc = -EIO; else rc = usb_get_string(dev, langid, index, buf, 255); /* If that failed try to read the descriptor length, then * ask for just that many bytes */ if (rc < 2) { rc = usb_get_string(dev, langid, index, buf, 2); if (rc == 2) rc = usb_get_string(dev, langid, index, buf, buf[0]); } if (rc >= 2) { if (!buf[0] && !buf[1]) usb_try_string_workarounds(buf, &rc); /* There might be extra junk at the end of the descriptor */ if (buf[0] < rc) rc = buf[0]; rc = rc - (rc & 1); /* force a multiple of two */ } if (rc < 2) rc = (rc < 0 ? rc : -EINVAL); return rc; } static int usb_get_langid(struct usb_device *dev, unsigned char *tbuf) { int err; if (dev->have_langid) return 0; if (dev->string_langid < 0) return -EPIPE; err = usb_string_sub(dev, 0, 0, tbuf); /* If the string was reported but is malformed, default to english * (0x0409) */ if (err == -ENODATA || (err > 0 && err < 4)) { dev->string_langid = 0x0409; dev->have_langid = 1; dev_err(&dev->dev, "language id specifier not provided by device, defaulting to English\n"); return 0; } /* In case of all other errors, we assume the device is not able to * deal with strings at all. Set string_langid to -1 in order to * prevent any string to be retrieved from the device */ if (err < 0) { dev_info(&dev->dev, "string descriptor 0 read error: %d\n", err); dev->string_langid = -1; return -EPIPE; } /* always use the first langid listed */ dev->string_langid = tbuf[2] | (tbuf[3] << 8); dev->have_langid = 1; dev_dbg(&dev->dev, "default language 0x%04x\n", dev->string_langid); return 0; } /** * usb_string - returns UTF-8 version of a string descriptor * @dev: the device whose string descriptor is being retrieved * @index: the number of the descriptor * @buf: where to put the string * @size: how big is "buf"? * * Context: task context, might sleep. * * This converts the UTF-16LE encoded strings returned by devices, from * usb_get_string_descriptor(), to null-terminated UTF-8 encoded ones * that are more usable in most kernel contexts. Note that this function * chooses strings in the first language supported by the device. * * This call is synchronous, and may not be used in an interrupt context. * * Return: length of the string (>= 0) or usb_control_msg status (< 0). */ int usb_string(struct usb_device *dev, int index, char *buf, size_t size) { unsigned char *tbuf; int err; if (dev->state == USB_STATE_SUSPENDED) return -EHOSTUNREACH; if (size <= 0 || !buf) return -EINVAL; buf[0] = 0; if (index <= 0 || index >= 256) return -EINVAL; tbuf = kmalloc(256, GFP_NOIO); if (!tbuf) return -ENOMEM; err = usb_get_langid(dev, tbuf); if (err < 0) goto errout; err = usb_string_sub(dev, dev->string_langid, index, tbuf); if (err < 0) goto errout; size--; /* leave room for trailing NULL char in output buffer */ err = utf16s_to_utf8s((wchar_t *) &tbuf[2], (err - 2) / 2, UTF16_LITTLE_ENDIAN, buf, size); buf[err] = 0; if (tbuf[1] != USB_DT_STRING) dev_dbg(&dev->dev, "wrong descriptor type %02x for string %d (\"%s\")\n", tbuf[1], index, buf); errout: kfree(tbuf); return err; } EXPORT_SYMBOL_GPL(usb_string); /* one UTF-8-encoded 16-bit character has at most three bytes */ #define MAX_USB_STRING_SIZE (127 * 3 + 1) /** * usb_cache_string - read a string descriptor and cache it for later use * @udev: the device whose string descriptor is being read * @index: the descriptor index * * Return: A pointer to a kmalloc'ed buffer containing the descriptor string, * or %NULL if the index is 0 or the string could not be read. */ char *usb_cache_string(struct usb_device *udev, int index) { char *buf; char *smallbuf = NULL; int len; if (index <= 0) return NULL; buf = kmalloc(MAX_USB_STRING_SIZE, GFP_NOIO); if (buf) { len = usb_string(udev, index, buf, MAX_USB_STRING_SIZE); if (len > 0) { smallbuf = kmalloc(++len, GFP_NOIO); if (!smallbuf) return buf; memcpy(smallbuf, buf, len); } kfree(buf); } return smallbuf; } EXPORT_SYMBOL_GPL(usb_cache_string); /* * usb_get_device_descriptor - read the device descriptor * @udev: the device whose device descriptor should be read * * Context: task context, might sleep. * * Not exported, only for use by the core. If drivers really want to read * the device descriptor directly, they can call usb_get_descriptor() with * type = USB_DT_DEVICE and index = 0. * * Returns: a pointer to a dynamically allocated usb_device_descriptor * structure (which the caller must deallocate), or an ERR_PTR value. */ struct usb_device_descriptor *usb_get_device_descriptor(struct usb_device *udev) { struct usb_device_descriptor *desc; int ret; desc = kmalloc(sizeof(*desc), GFP_NOIO); if (!desc) return ERR_PTR(-ENOMEM); ret = usb_get_descriptor(udev, USB_DT_DEVICE, 0, desc, sizeof(*desc)); if (ret == sizeof(*desc)) return desc; if (ret >= 0) ret = -EMSGSIZE; kfree(desc); return ERR_PTR(ret); } /* * usb_set_isoch_delay - informs the device of the packet transmit delay * @dev: the device whose delay is to be informed * Context: task context, might sleep * * Since this is an optional request, we don't bother if it fails. */ int usb_set_isoch_delay(struct usb_device *dev) { /* skip hub devices */ if (dev->descriptor.bDeviceClass == USB_CLASS_HUB) return 0; /* skip non-SS/non-SSP devices */ if (dev->speed < USB_SPEED_SUPER) return 0; return usb_control_msg_send(dev, 0, USB_REQ_SET_ISOCH_DELAY, USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE, dev->hub_delay, 0, NULL, 0, USB_CTRL_SET_TIMEOUT, GFP_NOIO); } /** * usb_get_status - issues a GET_STATUS call * @dev: the device whose status is being checked * @recip: USB_RECIP_*; for device, interface, or endpoint * @type: USB_STATUS_TYPE_*; for standard or PTM status types * @target: zero (for device), else interface or endpoint number * @data: pointer to two bytes of bitmap data * * Context: task context, might sleep. * * Returns device, interface, or endpoint status. Normally only of * interest to see if the device is self powered, or has enabled the * remote wakeup facility; or whether a bulk or interrupt endpoint * is halted ("stalled"). * * Bits in these status bitmaps are set using the SET_FEATURE request, * and cleared using the CLEAR_FEATURE request. The usb_clear_halt() * function should be used to clear halt ("stall") status. * * This call is synchronous, and may not be used in an interrupt context. * * Returns 0 and the status value in *@data (in host byte order) on success, * or else the status code from the underlying usb_control_msg() call. */ int usb_get_status(struct usb_device *dev, int recip, int type, int target, void *data) { int ret; void *status; int length; switch (type) { case USB_STATUS_TYPE_STANDARD: length = 2; break; case USB_STATUS_TYPE_PTM: if (recip != USB_RECIP_DEVICE) return -EINVAL; length = 4; break; default: return -EINVAL; } status = kmalloc(length, GFP_KERNEL); if (!status) return -ENOMEM; ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), USB_REQ_GET_STATUS, USB_DIR_IN | recip, USB_STATUS_TYPE_STANDARD, target, status, length, USB_CTRL_GET_TIMEOUT); switch (ret) { case 4: if (type != USB_STATUS_TYPE_PTM) { ret = -EIO; break; } *(u32 *) data = le32_to_cpu(*(__le32 *) status); ret = 0; break; case 2: if (type != USB_STATUS_TYPE_STANDARD) { ret = -EIO; break; } *(u16 *) data = le16_to_cpu(*(__le16 *) status); ret = 0; break; default: ret = -EIO; } kfree(status); return ret; } EXPORT_SYMBOL_GPL(usb_get_status); /** * usb_clear_halt - tells device to clear endpoint halt/stall condition * @dev: device whose endpoint is halted * @pipe: endpoint "pipe" being cleared * * Context: task context, might sleep. * * This is used to clear halt conditions for bulk and interrupt endpoints, * as reported by URB completion status. Endpoints that are halted are * sometimes referred to as being "stalled". Such endpoints are unable * to transmit or receive data until the halt status is cleared. Any URBs * queued for such an endpoint should normally be unlinked by the driver * before clearing the halt condition, as described in sections 5.7.5 * and 5.8.5 of the USB 2.0 spec. * * Note that control and isochronous endpoints don't halt, although control * endpoints report "protocol stall" (for unsupported requests) using the * same status code used to report a true stall. * * This call is synchronous, and may not be used in an interrupt context. * * Return: Zero on success, or else the status code returned by the * underlying usb_control_msg() call. */ int usb_clear_halt(struct usb_device *dev, int pipe) { int result; int endp = usb_pipeendpoint(pipe); if (usb_pipein(pipe)) endp |= USB_DIR_IN; /* we don't care if it wasn't halted first. in fact some devices * (like some ibmcam model 1 units) seem to expect hosts to make * this request for iso endpoints, which can't halt! */ result = usb_control_msg_send(dev, 0, USB_REQ_CLEAR_FEATURE, USB_RECIP_ENDPOINT, USB_ENDPOINT_HALT, endp, NULL, 0, USB_CTRL_SET_TIMEOUT, GFP_NOIO); /* don't un-halt or force to DATA0 except on success */ if (result) return result; /* NOTE: seems like Microsoft and Apple don't bother verifying * the clear "took", so some devices could lock up if you check... * such as the Hagiwara FlashGate DUAL. So we won't bother. * * NOTE: make sure the logic here doesn't diverge much from * the copy in usb-storage, for as long as we need two copies. */ usb_reset_endpoint(dev, endp); return 0; } EXPORT_SYMBOL_GPL(usb_clear_halt); static int create_intf_ep_devs(struct usb_interface *intf) { struct usb_device *udev = interface_to_usbdev(intf); struct usb_host_interface *alt = intf->cur_altsetting; int i; if (intf->ep_devs_created || intf->unregistering) return 0; for (i = 0; i < alt->desc.bNumEndpoints; ++i) (void) usb_create_ep_devs(&intf->dev, &alt->endpoint[i], udev); intf->ep_devs_created = 1; return 0; } static void remove_intf_ep_devs(struct usb_interface *intf) { struct usb_host_interface *alt = intf->cur_altsetting; int i; if (!intf->ep_devs_created) return; for (i = 0; i < alt->desc.bNumEndpoints; ++i) usb_remove_ep_devs(&alt->endpoint[i]); intf->ep_devs_created = 0; } /** * usb_disable_endpoint -- Disable an endpoint by address * @dev: the device whose endpoint is being disabled * @epaddr: the endpoint's address. Endpoint number for output, * endpoint number + USB_DIR_IN for input * @reset_hardware: flag to erase any endpoint state stored in the * controller hardware * * Disables the endpoint for URB submission and nukes all pending URBs. * If @reset_hardware is set then also deallocates hcd/hardware state * for the endpoint. */ void usb_disable_endpoint(struct usb_device *dev, unsigned int epaddr, bool reset_hardware) { unsigned int epnum = epaddr & USB_ENDPOINT_NUMBER_MASK; struct usb_host_endpoint *ep; if (!dev) return; if (usb_endpoint_out(epaddr)) { ep = dev->ep_out[epnum]; if (reset_hardware && epnum != 0) dev->ep_out[epnum] = NULL; } else { ep = dev->ep_in[epnum]; if (reset_hardware && epnum != 0) dev->ep_in[epnum] = NULL; } if (ep) { ep->enabled = 0; usb_hcd_flush_endpoint(dev, ep); if (reset_hardware) usb_hcd_disable_endpoint(dev, ep); } } /** * usb_reset_endpoint - Reset an endpoint's state. * @dev: the device whose endpoint is to be reset * @epaddr: the endpoint's address. Endpoint number for output, * endpoint number + USB_DIR_IN for input * * Resets any host-side endpoint state such as the toggle bit, * sequence number or current window. */ void usb_reset_endpoint(struct usb_device *dev, unsigned int epaddr) { unsigned int epnum = epaddr & USB_ENDPOINT_NUMBER_MASK; struct usb_host_endpoint *ep; if (usb_endpoint_out(epaddr)) ep = dev->ep_out[epnum]; else ep = dev->ep_in[epnum]; if (ep) usb_hcd_reset_endpoint(dev, ep); } EXPORT_SYMBOL_GPL(usb_reset_endpoint); /** * usb_disable_interface -- Disable all endpoints for an interface * @dev: the device whose interface is being disabled * @intf: pointer to the interface descriptor * @reset_hardware: flag to erase any endpoint state stored in the * controller hardware * * Disables all the endpoints for the interface's current altsetting. */ void usb_disable_interface(struct usb_device *dev, struct usb_interface *intf, bool reset_hardware) { struct usb_host_interface *alt = intf->cur_altsetting; int i; for (i = 0; i < alt->desc.bNumEndpoints; ++i) { usb_disable_endpoint(dev, alt->endpoint[i].desc.bEndpointAddress, reset_hardware); } } /* * usb_disable_device_endpoints -- Disable all endpoints for a device * @dev: the device whose endpoints are being disabled * @skip_ep0: 0 to disable endpoint 0, 1 to skip it. */ static void usb_disable_device_endpoints(struct usb_device *dev, int skip_ep0) { struct usb_hcd *hcd = bus_to_hcd(dev->bus); int i; if (hcd->driver->check_bandwidth) { /* First pass: Cancel URBs, leave endpoint pointers intact. */ for (i = skip_ep0; i < 16; ++i) { usb_disable_endpoint(dev, i, false); usb_disable_endpoint(dev, i + USB_DIR_IN, false); } /* Remove endpoints from the host controller internal state */ mutex_lock(hcd->bandwidth_mutex); usb_hcd_alloc_bandwidth(dev, NULL, NULL, NULL); mutex_unlock(hcd->bandwidth_mutex); } /* Second pass: remove endpoint pointers */ for (i = skip_ep0; i < 16; ++i) { usb_disable_endpoint(dev, i, true); usb_disable_endpoint(dev, i + USB_DIR_IN, true); } } /** * usb_disable_device - Disable all the endpoints for a USB device * @dev: the device whose endpoints are being disabled * @skip_ep0: 0 to disable endpoint 0, 1 to skip it. * * Disables all the device's endpoints, potentially including endpoint 0. * Deallocates hcd/hardware state for the endpoints (nuking all or most * pending urbs) and usbcore state for the interfaces, so that usbcore * must usb_set_configuration() before any interfaces could be used. */ void usb_disable_device(struct usb_device *dev, int skip_ep0) { int i; /* getting rid of interfaces will disconnect * any drivers bound to them (a key side effect) */ if (dev->actconfig) { /* * FIXME: In order to avoid self-deadlock involving the * bandwidth_mutex, we have to mark all the interfaces * before unregistering any of them. */ for (i = 0; i < dev->actconfig->desc.bNumInterfaces; i++) dev->actconfig->interface[i]->unregistering = 1; for (i = 0; i < dev->actconfig->desc.bNumInterfaces; i++) { struct usb_interface *interface; /* remove this interface if it has been registered */ interface = dev->actconfig->interface[i]; if (!device_is_registered(&interface->dev)) continue; dev_dbg(&dev->dev, "unregistering interface %s\n", dev_name(&interface->dev)); remove_intf_ep_devs(interface); device_del(&interface->dev); } /* Now that the interfaces are unbound, nobody should * try to access them. */ for (i = 0; i < dev->actconfig->desc.bNumInterfaces; i++) { put_device(&dev->actconfig->interface[i]->dev); dev->actconfig->interface[i] = NULL; } usb_disable_usb2_hardware_lpm(dev); usb_unlocked_disable_lpm(dev); usb_disable_ltm(dev); dev->actconfig = NULL; if (dev->state == USB_STATE_CONFIGURED) usb_set_device_state(dev, USB_STATE_ADDRESS); } dev_dbg(&dev->dev, "%s nuking %s URBs\n", __func__, skip_ep0 ? "non-ep0" : "all"); usb_disable_device_endpoints(dev, skip_ep0); } /** * usb_enable_endpoint - Enable an endpoint for USB communications * @dev: the device whose interface is being enabled * @ep: the endpoint * @reset_ep: flag to reset the endpoint state * * Resets the endpoint state if asked, and sets dev->ep_{in,out} pointers. * For control endpoints, both the input and output sides are handled. */ void usb_enable_endpoint(struct usb_device *dev, struct usb_host_endpoint *ep, bool reset_ep) { int epnum = usb_endpoint_num(&ep->desc); int is_out = usb_endpoint_dir_out(&ep->desc); int is_control = usb_endpoint_xfer_control(&ep->desc); if (reset_ep) usb_hcd_reset_endpoint(dev, ep); if (is_out || is_control) dev->ep_out[epnum] = ep; if (!is_out || is_control) dev->ep_in[epnum] = ep; ep->enabled = 1; } /** * usb_enable_interface - Enable all the endpoints for an interface * @dev: the device whose interface is being enabled * @intf: pointer to the interface descriptor * @reset_eps: flag to reset the endpoints' state * * Enables all the endpoints for the interface's current altsetting. */ void usb_enable_interface(struct usb_device *dev, struct usb_interface *intf, bool reset_eps) { struct usb_host_interface *alt = intf->cur_altsetting; int i; for (i = 0; i < alt->desc.bNumEndpoints; ++i) usb_enable_endpoint(dev, &alt->endpoint[i], reset_eps); } /** * usb_set_interface - Makes a particular alternate setting be current * @dev: the device whose interface is being updated * @interface: the interface being updated * @alternate: the setting being chosen. * * Context: task context, might sleep. * * This is used to enable data transfers on interfaces that may not * be enabled by default. Not all devices support such configurability. * Only the driver bound to an interface may change its setting. * * Within any given configuration, each interface may have several * alternative settings. These are often used to control levels of * bandwidth consumption. For example, the default setting for a high * speed interrupt endpoint may not send more than 64 bytes per microframe, * while interrupt transfers of up to 3KBytes per microframe are legal. * Also, isochronous endpoints may never be part of an * interface's default setting. To access such bandwidth, alternate * interface settings must be made current. * * Note that in the Linux USB subsystem, bandwidth associated with * an endpoint in a given alternate setting is not reserved until an URB * is submitted that needs that bandwidth. Some other operating systems * allocate bandwidth early, when a configuration is chosen. * * xHCI reserves bandwidth and configures the alternate setting in * usb_hcd_alloc_bandwidth(). If it fails the original interface altsetting * may be disabled. Drivers cannot rely on any particular alternate * setting being in effect after a failure. * * This call is synchronous, and may not be used in an interrupt context. * Also, drivers must not change altsettings while urbs are scheduled for * endpoints in that interface; all such urbs must first be completed * (perhaps forced by unlinking). * * Return: Zero on success, or else the status code returned by the * underlying usb_control_msg() call. */ int usb_set_interface(struct usb_device *dev, int interface, int alternate) { struct usb_interface *iface; struct usb_host_interface *alt; struct usb_hcd *hcd = bus_to_hcd(dev->bus); int i, ret, manual = 0; unsigned int epaddr; unsigned int pipe; if (dev->state == USB_STATE_SUSPENDED) return -EHOSTUNREACH; iface = usb_ifnum_to_if(dev, interface); if (!iface) { dev_dbg(&dev->dev, "selecting invalid interface %d\n", interface); return -EINVAL; } if (iface->unregistering) return -ENODEV; alt = usb_altnum_to_altsetting(iface, alternate); if (!alt) { dev_warn(&dev->dev, "selecting invalid altsetting %d\n", alternate); return -EINVAL; } /* * usb3 hosts configure the interface in usb_hcd_alloc_bandwidth, * including freeing dropped endpoint ring buffers. * Make sure the interface endpoints are flushed before that */ usb_disable_interface(dev, iface, false); /* Make sure we have enough bandwidth for this alternate interface. * Remove the current alt setting and add the new alt setting. */ mutex_lock(hcd->bandwidth_mutex); /* Disable LPM, and re-enable it once the new alt setting is installed, * so that the xHCI driver can recalculate the U1/U2 timeouts. */ if (usb_disable_lpm(dev)) { dev_err(&iface->dev, "%s Failed to disable LPM\n", __func__); mutex_unlock(hcd->bandwidth_mutex); return -ENOMEM; } /* Changing alt-setting also frees any allocated streams */ for (i = 0; i < iface->cur_altsetting->desc.bNumEndpoints; i++) iface->cur_altsetting->endpoint[i].streams = 0; ret = usb_hcd_alloc_bandwidth(dev, NULL, iface->cur_altsetting, alt); if (ret < 0) { dev_info(&dev->dev, "Not enough bandwidth for altsetting %d\n", alternate); usb_enable_lpm(dev); mutex_unlock(hcd->bandwidth_mutex); return ret; } if (dev->quirks & USB_QUIRK_NO_SET_INTF) ret = -EPIPE; else ret = usb_control_msg_send(dev, 0, USB_REQ_SET_INTERFACE, USB_RECIP_INTERFACE, alternate, interface, NULL, 0, 5000, GFP_NOIO); /* 9.4.10 says devices don't need this and are free to STALL the * request if the interface only has one alternate setting. */ if (ret == -EPIPE && iface->num_altsetting == 1) { dev_dbg(&dev->dev, "manual set_interface for iface %d, alt %d\n", interface, alternate); manual = 1; } else if (ret) { /* Re-instate the old alt setting */ usb_hcd_alloc_bandwidth(dev, NULL, alt, iface->cur_altsetting); usb_enable_lpm(dev); mutex_unlock(hcd->bandwidth_mutex); return ret; } mutex_unlock(hcd->bandwidth_mutex); /* FIXME drivers shouldn't need to replicate/bugfix the logic here * when they implement async or easily-killable versions of this or * other "should-be-internal" functions (like clear_halt). * should hcd+usbcore postprocess control requests? */ /* prevent submissions using previous endpoint settings */ if (iface->cur_altsetting != alt) { remove_intf_ep_devs(iface); usb_remove_sysfs_intf_files(iface); } usb_disable_interface(dev, iface, true); iface->cur_altsetting = alt; /* Now that the interface is installed, re-enable LPM. */ usb_unlocked_enable_lpm(dev); /* If the interface only has one altsetting and the device didn't * accept the request, we attempt to carry out the equivalent action * by manually clearing the HALT feature for each endpoint in the * new altsetting. */ if (manual) { for (i = 0; i < alt->desc.bNumEndpoints; i++) { epaddr = alt->endpoint[i].desc.bEndpointAddress; pipe = __create_pipe(dev, USB_ENDPOINT_NUMBER_MASK & epaddr) | (usb_endpoint_out(epaddr) ? USB_DIR_OUT : USB_DIR_IN); usb_clear_halt(dev, pipe); } } /* 9.1.1.5: reset toggles for all endpoints in the new altsetting * * Note: * Despite EP0 is always present in all interfaces/AS, the list of * endpoints from the descriptor does not contain EP0. Due to its * omnipresence one might expect EP0 being considered "affected" by * any SetInterface request and hence assume toggles need to be reset. * However, EP0 toggles are re-synced for every individual transfer * during the SETUP stage - hence EP0 toggles are "don't care" here. * (Likewise, EP0 never "halts" on well designed devices.) */ usb_enable_interface(dev, iface, true); if (device_is_registered(&iface->dev)) { usb_create_sysfs_intf_files(iface); create_intf_ep_devs(iface); } return 0; } EXPORT_SYMBOL_GPL(usb_set_interface); /** * usb_reset_configuration - lightweight device reset * @dev: the device whose configuration is being reset * * This issues a standard SET_CONFIGURATION request to the device using * the current configuration. The effect is to reset most USB-related * state in the device, including interface altsettings (reset to zero), * endpoint halts (cleared), and endpoint state (only for bulk and interrupt * endpoints). Other usbcore state is unchanged, including bindings of * usb device drivers to interfaces. * * Because this affects multiple interfaces, avoid using this with composite * (multi-interface) devices. Instead, the driver for each interface may * use usb_set_interface() on the interfaces it claims. Be careful though; * some devices don't support the SET_INTERFACE request, and others won't * reset all the interface state (notably endpoint state). Resetting the whole * configuration would affect other drivers' interfaces. * * The caller must own the device lock. * * Return: Zero on success, else a negative error code. * * If this routine fails the device will probably be in an unusable state * with endpoints disabled, and interfaces only partially enabled. */ int usb_reset_configuration(struct usb_device *dev) { int i, retval; struct usb_host_config *config; struct usb_hcd *hcd = bus_to_hcd(dev->bus); if (dev->state == USB_STATE_SUSPENDED) return -EHOSTUNREACH; /* caller must have locked the device and must own * the usb bus readlock (so driver bindings are stable); * calls during probe() are fine */ usb_disable_device_endpoints(dev, 1); /* skip ep0*/ config = dev->actconfig; retval = 0; mutex_lock(hcd->bandwidth_mutex); /* Disable LPM, and re-enable it once the configuration is reset, so * that the xHCI driver can recalculate the U1/U2 timeouts. */ if (usb_disable_lpm(dev)) { dev_err(&dev->dev, "%s Failed to disable LPM\n", __func__); mutex_unlock(hcd->bandwidth_mutex); return -ENOMEM; } /* xHCI adds all endpoints in usb_hcd_alloc_bandwidth */ retval = usb_hcd_alloc_bandwidth(dev, config, NULL, NULL); if (retval < 0) { usb_enable_lpm(dev); mutex_unlock(hcd->bandwidth_mutex); return retval; } retval = usb_control_msg_send(dev, 0, USB_REQ_SET_CONFIGURATION, 0, config->desc.bConfigurationValue, 0, NULL, 0, USB_CTRL_SET_TIMEOUT, GFP_NOIO); if (retval) { usb_hcd_alloc_bandwidth(dev, NULL, NULL, NULL); usb_enable_lpm(dev); mutex_unlock(hcd->bandwidth_mutex); return retval; } mutex_unlock(hcd->bandwidth_mutex); /* re-init hc/hcd interface/endpoint state */ for (i = 0; i < config->desc.bNumInterfaces; i++) { struct usb_interface *intf = config->interface[i]; struct usb_host_interface *alt; alt = usb_altnum_to_altsetting(intf, 0); /* No altsetting 0? We'll assume the first altsetting. * We could use a GetInterface call, but if a device is * so non-compliant that it doesn't have altsetting 0 * then I wouldn't trust its reply anyway. */ if (!alt) alt = &intf->altsetting[0]; if (alt != intf->cur_altsetting) { remove_intf_ep_devs(intf); usb_remove_sysfs_intf_files(intf); } intf->cur_altsetting = alt; usb_enable_interface(dev, intf, true); if (device_is_registered(&intf->dev)) { usb_create_sysfs_intf_files(intf); create_intf_ep_devs(intf); } } /* Now that the interfaces are installed, re-enable LPM. */ usb_unlocked_enable_lpm(dev); return 0; } EXPORT_SYMBOL_GPL(usb_reset_configuration); static void usb_release_interface(struct device *dev) { struct usb_interface *intf = to_usb_interface(dev); struct usb_interface_cache *intfc = altsetting_to_usb_interface_cache(intf->altsetting); kref_put(&intfc->ref, usb_release_interface_cache); usb_put_dev(interface_to_usbdev(intf)); of_node_put(dev->of_node); kfree(intf); } /* * usb_deauthorize_interface - deauthorize an USB interface * * @intf: USB interface structure */ void usb_deauthorize_interface(struct usb_interface *intf) { struct device *dev = &intf->dev; device_lock(dev->parent); if (intf->authorized) { device_lock(dev); intf->authorized = 0; device_unlock(dev); usb_forced_unbind_intf(intf); } device_unlock(dev->parent); } /* * usb_authorize_interface - authorize an USB interface * * @intf: USB interface structure */ void usb_authorize_interface(struct usb_interface *intf) { struct device *dev = &intf->dev; if (!intf->authorized) { device_lock(dev); intf->authorized = 1; /* authorize interface */ device_unlock(dev); } } static int usb_if_uevent(const struct device *dev, struct kobj_uevent_env *env) { const struct usb_device *usb_dev; const struct usb_interface *intf; const struct usb_host_interface *alt; intf = to_usb_interface(dev); usb_dev = interface_to_usbdev(intf); alt = intf->cur_altsetting; if (add_uevent_var(env, "INTERFACE=%d/%d/%d", alt->desc.bInterfaceClass, alt->desc.bInterfaceSubClass, alt->desc.bInterfaceProtocol)) return -ENOMEM; if (add_uevent_var(env, "MODALIAS=usb:" "v%04Xp%04Xd%04Xdc%02Xdsc%02Xdp%02Xic%02Xisc%02Xip%02Xin%02X", le16_to_cpu(usb_dev->descriptor.idVendor), le16_to_cpu(usb_dev->descriptor.idProduct), le16_to_cpu(usb_dev->descriptor.bcdDevice), usb_dev->descriptor.bDeviceClass, usb_dev->descriptor.bDeviceSubClass, usb_dev->descriptor.bDeviceProtocol, alt->desc.bInterfaceClass, alt->desc.bInterfaceSubClass, alt->desc.bInterfaceProtocol, alt->desc.bInterfaceNumber)) return -ENOMEM; return 0; } struct device_type usb_if_device_type = { .name = "usb_interface", .release = usb_release_interface, .uevent = usb_if_uevent, }; static struct usb_interface_assoc_descriptor *find_iad(struct usb_device *dev, struct usb_host_config *config, u8 inum) { struct usb_interface_assoc_descriptor *retval = NULL; struct usb_interface_assoc_descriptor *intf_assoc; int first_intf; int last_intf; int i; for (i = 0; (i < USB_MAXIADS && config->intf_assoc[i]); i++) { intf_assoc = config->intf_assoc[i]; if (intf_assoc->bInterfaceCount == 0) continue; first_intf = intf_assoc->bFirstInterface; last_intf = first_intf + (intf_assoc->bInterfaceCount - 1); if (inum >= first_intf && inum <= last_intf) { if (!retval) retval = intf_assoc; else dev_err(&dev->dev, "Interface #%d referenced" " by multiple IADs\n", inum); } } return retval; } /* * Internal function to queue a device reset * See usb_queue_reset_device() for more details */ static void __usb_queue_reset_device(struct work_struct *ws) { int rc; struct usb_interface *iface = container_of(ws, struct usb_interface, reset_ws); struct usb_device *udev = interface_to_usbdev(iface); rc = usb_lock_device_for_reset(udev, iface); if (rc >= 0) { usb_reset_device(udev); usb_unlock_device(udev); } usb_put_intf(iface); /* Undo _get_ in usb_queue_reset_device() */ } /* * Internal function to set the wireless_status sysfs attribute * See usb_set_wireless_status() for more details */ static void __usb_wireless_status_intf(struct work_struct *ws) { struct usb_interface *iface = container_of(ws, struct usb_interface, wireless_status_work); device_lock(iface->dev.parent); if (iface->sysfs_files_created) usb_update_wireless_status_attr(iface); device_unlock(iface->dev.parent); usb_put_intf(iface); /* Undo _get_ in usb_set_wireless_status() */ } /** * usb_set_wireless_status - sets the wireless_status struct member * @iface: the interface to modify * @status: the new wireless status * * Set the wireless_status struct member to the new value, and emit * sysfs changes as necessary. * * Returns: 0 on success, -EALREADY if already set. */ int usb_set_wireless_status(struct usb_interface *iface, enum usb_wireless_status status) { if (iface->wireless_status == status) return -EALREADY; usb_get_intf(iface); iface->wireless_status = status; schedule_work(&iface->wireless_status_work); return 0; } EXPORT_SYMBOL_GPL(usb_set_wireless_status); /* * usb_set_configuration - Makes a particular device setting be current * @dev: the device whose configuration is being updated * @configuration: the configuration being chosen. * * Context: task context, might sleep. Caller holds device lock. * * This is used to enable non-default device modes. Not all devices * use this kind of configurability; many devices only have one * configuration. * * @configuration is the value of the configuration to be installed. * According to the USB spec (e.g. section 9.1.1.5), configuration values * must be non-zero; a value of zero indicates that the device in * unconfigured. However some devices erroneously use 0 as one of their * configuration values. To help manage such devices, this routine will * accept @configuration = -1 as indicating the device should be put in * an unconfigured state. * * USB device configurations may affect Linux interoperability, * power consumption and the functionality available. For example, * the default configuration is limited to using 100mA of bus power, * so that when certain device functionality requires more power, * and the device is bus powered, that functionality should be in some * non-default device configuration. Other device modes may also be * reflected as configuration options, such as whether two ISDN * channels are available independently; and choosing between open * standard device protocols (like CDC) or proprietary ones. * * Note that a non-authorized device (dev->authorized == 0) will only * be put in unconfigured mode. * * Note that USB has an additional level of device configurability, * associated with interfaces. That configurability is accessed using * usb_set_interface(). * * This call is synchronous. The calling context must be able to sleep, * must own the device lock, and must not hold the driver model's USB * bus mutex; usb interface driver probe() methods cannot use this routine. * * Returns zero on success, or else the status code returned by the * underlying call that failed. On successful completion, each interface * in the original device configuration has been destroyed, and each one * in the new configuration has been probed by all relevant usb device * drivers currently known to the kernel. */ int usb_set_configuration(struct usb_device *dev, int configuration) { int i, ret; struct usb_host_config *cp = NULL; struct usb_interface **new_interfaces = NULL; struct usb_hcd *hcd = bus_to_hcd(dev->bus); int n, nintf; if (dev->authorized == 0 || configuration == -1) configuration = 0; else { for (i = 0; i < dev->descriptor.bNumConfigurations; i++) { if (dev->config[i].desc.bConfigurationValue == configuration) { cp = &dev->config[i]; break; } } } if ((!cp && configuration != 0)) return -EINVAL; /* The USB spec says configuration 0 means unconfigured. * But if a device includes a configuration numbered 0, * we will accept it as a correctly configured state. * Use -1 if you really want to unconfigure the device. */ if (cp && configuration == 0) dev_warn(&dev->dev, "config 0 descriptor??\n"); /* Allocate memory for new interfaces before doing anything else, * so that if we run out then nothing will have changed. */ n = nintf = 0; if (cp) { nintf = cp->desc.bNumInterfaces; new_interfaces = kmalloc_array(nintf, sizeof(*new_interfaces), GFP_NOIO); if (!new_interfaces) return -ENOMEM; for (; n < nintf; ++n) { new_interfaces[n] = kzalloc( sizeof(struct usb_interface), GFP_NOIO); if (!new_interfaces[n]) { ret = -ENOMEM; free_interfaces: while (--n >= 0) kfree(new_interfaces[n]); kfree(new_interfaces); return ret; } } i = dev->bus_mA - usb_get_max_power(dev, cp); if (i < 0) dev_warn(&dev->dev, "new config #%d exceeds power " "limit by %dmA\n", configuration, -i); } /* Wake up the device so we can send it the Set-Config request */ ret = usb_autoresume_device(dev); if (ret) goto free_interfaces; /* if it's already configured, clear out old state first. * getting rid of old interfaces means unbinding their drivers. */ if (dev->state != USB_STATE_ADDRESS) usb_disable_device(dev, 1); /* Skip ep0 */ /* Get rid of pending async Set-Config requests for this device */ cancel_async_set_config(dev); /* Make sure we have bandwidth (and available HCD resources) for this * configuration. Remove endpoints from the schedule if we're dropping * this configuration to set configuration 0. After this point, the * host controller will not allow submissions to dropped endpoints. If * this call fails, the device state is unchanged. */ mutex_lock(hcd->bandwidth_mutex); /* Disable LPM, and re-enable it once the new configuration is * installed, so that the xHCI driver can recalculate the U1/U2 * timeouts. */ if (dev->actconfig && usb_disable_lpm(dev)) { dev_err(&dev->dev, "%s Failed to disable LPM\n", __func__); mutex_unlock(hcd->bandwidth_mutex); ret = -ENOMEM; goto free_interfaces; } ret = usb_hcd_alloc_bandwidth(dev, cp, NULL, NULL); if (ret < 0) { if (dev->actconfig) usb_enable_lpm(dev); mutex_unlock(hcd->bandwidth_mutex); usb_autosuspend_device(dev); goto free_interfaces; } /* * Initialize the new interface structures and the * hc/hcd/usbcore interface/endpoint state. */ for (i = 0; i < nintf; ++i) { struct usb_interface_cache *intfc; struct usb_interface *intf; struct usb_host_interface *alt; u8 ifnum; cp->interface[i] = intf = new_interfaces[i]; intfc = cp->intf_cache[i]; intf->altsetting = intfc->altsetting; intf->num_altsetting = intfc->num_altsetting; intf->authorized = !!HCD_INTF_AUTHORIZED(hcd); kref_get(&intfc->ref); alt = usb_altnum_to_altsetting(intf, 0); /* No altsetting 0? We'll assume the first altsetting. * We could use a GetInterface call, but if a device is * so non-compliant that it doesn't have altsetting 0 * then I wouldn't trust its reply anyway. */ if (!alt) alt = &intf->altsetting[0]; ifnum = alt->desc.bInterfaceNumber; intf->intf_assoc = find_iad(dev, cp, ifnum); intf->cur_altsetting = alt; usb_enable_interface(dev, intf, true); intf->dev.parent = &dev->dev; if (usb_of_has_combined_node(dev)) { device_set_of_node_from_dev(&intf->dev, &dev->dev); } else { intf->dev.of_node = usb_of_get_interface_node(dev, configuration, ifnum); } ACPI_COMPANION_SET(&intf->dev, ACPI_COMPANION(&dev->dev)); intf->dev.driver = NULL; intf->dev.bus = &usb_bus_type; intf->dev.type = &usb_if_device_type; intf->dev.groups = usb_interface_groups; INIT_WORK(&intf->reset_ws, __usb_queue_reset_device); INIT_WORK(&intf->wireless_status_work, __usb_wireless_status_intf); intf->minor = -1; device_initialize(&intf->dev); pm_runtime_no_callbacks(&intf->dev); dev_set_name(&intf->dev, "%d-%s:%d.%d", dev->bus->busnum, dev->devpath, configuration, ifnum); usb_get_dev(dev); } kfree(new_interfaces); ret = usb_control_msg_send(dev, 0, USB_REQ_SET_CONFIGURATION, 0, configuration, 0, NULL, 0, USB_CTRL_SET_TIMEOUT, GFP_NOIO); if (ret && cp) { /* * All the old state is gone, so what else can we do? * The device is probably useless now anyway. */ usb_hcd_alloc_bandwidth(dev, NULL, NULL, NULL); for (i = 0; i < nintf; ++i) { usb_disable_interface(dev, cp->interface[i], true); put_device(&cp->interface[i]->dev); cp->interface[i] = NULL; } cp = NULL; } dev->actconfig = cp; mutex_unlock(hcd->bandwidth_mutex); if (!cp) { usb_set_device_state(dev, USB_STATE_ADDRESS); /* Leave LPM disabled while the device is unconfigured. */ usb_autosuspend_device(dev); return ret; } usb_set_device_state(dev, USB_STATE_CONFIGURED); if (cp->string == NULL && !(dev->quirks & USB_QUIRK_CONFIG_INTF_STRINGS)) cp->string = usb_cache_string(dev, cp->desc.iConfiguration); /* Now that the interfaces are installed, re-enable LPM. */ usb_unlocked_enable_lpm(dev); /* Enable LTM if it was turned off by usb_disable_device. */ usb_enable_ltm(dev); /* Now that all the interfaces are set up, register them * to trigger binding of drivers to interfaces. probe() * routines may install different altsettings and may * claim() any interfaces not yet bound. Many class drivers * need that: CDC, audio, video, etc. */ for (i = 0; i < nintf; ++i) { struct usb_interface *intf = cp->interface[i]; if (intf->dev.of_node && !of_device_is_available(intf->dev.of_node)) { dev_info(&dev->dev, "skipping disabled interface %d\n", intf->cur_altsetting->desc.bInterfaceNumber); continue; } dev_dbg(&dev->dev, "adding %s (config #%d, interface %d)\n", dev_name(&intf->dev), configuration, intf->cur_altsetting->desc.bInterfaceNumber); device_enable_async_suspend(&intf->dev); ret = device_add(&intf->dev); if (ret != 0) { dev_err(&dev->dev, "device_add(%s) --> %d\n", dev_name(&intf->dev), ret); continue; } create_intf_ep_devs(intf); } usb_autosuspend_device(dev); return 0; } EXPORT_SYMBOL_GPL(usb_set_configuration); static LIST_HEAD(set_config_list); static DEFINE_SPINLOCK(set_config_lock); struct set_config_request { struct usb_device *udev; int config; struct work_struct work; struct list_head node; }; /* Worker routine for usb_driver_set_configuration() */ static void driver_set_config_work(struct work_struct *work) { struct set_config_request *req = container_of(work, struct set_config_request, work); struct usb_device *udev = req->udev; usb_lock_device(udev); spin_lock(&set_config_lock); list_del(&req->node); spin_unlock(&set_config_lock); if (req->config >= -1) /* Is req still valid? */ usb_set_configuration(udev, req->config); usb_unlock_device(udev); usb_put_dev(udev); kfree(req); } /* Cancel pending Set-Config requests for a device whose configuration * was just changed */ static void cancel_async_set_config(struct usb_device *udev) { struct set_config_request *req; spin_lock(&set_config_lock); list_for_each_entry(req, &set_config_list, node) { if (req->udev == udev) req->config = -999; /* Mark as cancelled */ } spin_unlock(&set_config_lock); } /** * usb_driver_set_configuration - Provide a way for drivers to change device configurations * @udev: the device whose configuration is being updated * @config: the configuration being chosen. * Context: In process context, must be able to sleep * * Device interface drivers are not allowed to change device configurations. * This is because changing configurations will destroy the interface the * driver is bound to and create new ones; it would be like a floppy-disk * driver telling the computer to replace the floppy-disk drive with a * tape drive! * * Still, in certain specialized circumstances the need may arise. This * routine gets around the normal restrictions by using a work thread to * submit the change-config request. * * Return: 0 if the request was successfully queued, error code otherwise. * The caller has no way to know whether the queued request will eventually * succeed. */ int usb_driver_set_configuration(struct usb_device *udev, int config) { struct set_config_request *req; req = kmalloc(sizeof(*req), GFP_KERNEL); if (!req) return -ENOMEM; req->udev = udev; req->config = config; INIT_WORK(&req->work, driver_set_config_work); spin_lock(&set_config_lock); list_add(&req->node, &set_config_list); spin_unlock(&set_config_lock); usb_get_dev(udev); schedule_work(&req->work); return 0; } EXPORT_SYMBOL_GPL(usb_driver_set_configuration); /** * cdc_parse_cdc_header - parse the extra headers present in CDC devices * @hdr: the place to put the results of the parsing * @intf: the interface for which parsing is requested * @buffer: pointer to the extra headers to be parsed * @buflen: length of the extra headers * * This evaluates the extra headers present in CDC devices which * bind the interfaces for data and control and provide details * about the capabilities of the device. * * Return: number of descriptors parsed or -EINVAL * if the header is contradictory beyond salvage */ int cdc_parse_cdc_header(struct usb_cdc_parsed_header *hdr, struct usb_interface *intf, u8 *buffer, int buflen) { /* duplicates are ignored */ struct usb_cdc_union_desc *union_header = NULL; /* duplicates are not tolerated */ struct usb_cdc_header_desc *header = NULL; struct usb_cdc_ether_desc *ether = NULL; struct usb_cdc_mdlm_detail_desc *detail = NULL; struct usb_cdc_mdlm_desc *desc = NULL; unsigned int elength; int cnt = 0; memset(hdr, 0x00, sizeof(struct usb_cdc_parsed_header)); hdr->phonet_magic_present = false; while (buflen > 0) { elength = buffer[0]; if (!elength) { dev_err(&intf->dev, "skipping garbage byte\n"); elength = 1; goto next_desc; } if ((buflen < elength) || (elength < 3)) { dev_err(&intf->dev, "invalid descriptor buffer length\n"); break; } if (buffer[1] != USB_DT_CS_INTERFACE) { dev_err(&intf->dev, "skipping garbage\n"); goto next_desc; } switch (buffer[2]) { case USB_CDC_UNION_TYPE: /* we've found it */ if (elength < sizeof(struct usb_cdc_union_desc)) goto next_desc; if (union_header) { dev_err(&intf->dev, "More than one union descriptor, skipping ...\n"); goto next_desc; } union_header = (struct usb_cdc_union_desc *)buffer; break; case USB_CDC_COUNTRY_TYPE: if (elength < sizeof(struct usb_cdc_country_functional_desc)) goto next_desc; hdr->usb_cdc_country_functional_desc = (struct usb_cdc_country_functional_desc *)buffer; break; case USB_CDC_HEADER_TYPE: if (elength != sizeof(struct usb_cdc_header_desc)) goto next_desc; if (header) return -EINVAL; header = (struct usb_cdc_header_desc *)buffer; break; case USB_CDC_ACM_TYPE: if (elength < sizeof(struct usb_cdc_acm_descriptor)) goto next_desc; hdr->usb_cdc_acm_descriptor = (struct usb_cdc_acm_descriptor *)buffer; break; case USB_CDC_ETHERNET_TYPE: if (elength != sizeof(struct usb_cdc_ether_desc)) goto next_desc; if (ether) return -EINVAL; ether = (struct usb_cdc_ether_desc *)buffer; break; case USB_CDC_CALL_MANAGEMENT_TYPE: if (elength < sizeof(struct usb_cdc_call_mgmt_descriptor)) goto next_desc; hdr->usb_cdc_call_mgmt_descriptor = (struct usb_cdc_call_mgmt_descriptor *)buffer; break; case USB_CDC_DMM_TYPE: if (elength < sizeof(struct usb_cdc_dmm_desc)) goto next_desc; hdr->usb_cdc_dmm_desc = (struct usb_cdc_dmm_desc *)buffer; break; case USB_CDC_MDLM_TYPE: if (elength < sizeof(struct usb_cdc_mdlm_desc)) goto next_desc; if (desc) return -EINVAL; desc = (struct usb_cdc_mdlm_desc *)buffer; break; case USB_CDC_MDLM_DETAIL_TYPE: if (elength < sizeof(struct usb_cdc_mdlm_detail_desc)) goto next_desc; if (detail) return -EINVAL; detail = (struct usb_cdc_mdlm_detail_desc *)buffer; break; case USB_CDC_NCM_TYPE: if (elength < sizeof(struct usb_cdc_ncm_desc)) goto next_desc; hdr->usb_cdc_ncm_desc = (struct usb_cdc_ncm_desc *)buffer; break; case USB_CDC_MBIM_TYPE: if (elength < sizeof(struct usb_cdc_mbim_desc)) goto next_desc; hdr->usb_cdc_mbim_desc = (struct usb_cdc_mbim_desc *)buffer; break; case USB_CDC_MBIM_EXTENDED_TYPE: if (elength < sizeof(struct usb_cdc_mbim_extended_desc)) break; hdr->usb_cdc_mbim_extended_desc = (struct usb_cdc_mbim_extended_desc *)buffer; break; case CDC_PHONET_MAGIC_NUMBER: hdr->phonet_magic_present = true; break; default: /* * there are LOTS more CDC descriptors that * could legitimately be found here. */ dev_dbg(&intf->dev, "Ignoring descriptor: type %02x, length %ud\n", buffer[2], elength); goto next_desc; } cnt++; next_desc: buflen -= elength; buffer += elength; } hdr->usb_cdc_union_desc = union_header; hdr->usb_cdc_header_desc = header; hdr->usb_cdc_mdlm_detail_desc = detail; hdr->usb_cdc_mdlm_desc = desc; hdr->usb_cdc_ether_desc = ether; return cnt; } EXPORT_SYMBOL(cdc_parse_cdc_header); |
12 12 12 6 4 12 7 11 14 4 4 3 3 2 1 1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 | // SPDX-License-Identifier: GPL-2.0-only /* * Cryptographic API * * Michael MIC (IEEE 802.11i/TKIP) keyed digest * * Copyright (c) 2004 Jouni Malinen <j@w1.fi> */ #include <crypto/internal/hash.h> #include <asm/unaligned.h> #include <linux/init.h> #include <linux/module.h> #include <linux/string.h> #include <linux/types.h> struct michael_mic_ctx { u32 l, r; }; struct michael_mic_desc_ctx { __le32 pending; size_t pending_len; u32 l, r; }; static inline u32 xswap(u32 val) { return ((val & 0x00ff00ff) << 8) | ((val & 0xff00ff00) >> 8); } #define michael_block(l, r) \ do { \ r ^= rol32(l, 17); \ l += r; \ r ^= xswap(l); \ l += r; \ r ^= rol32(l, 3); \ l += r; \ r ^= ror32(l, 2); \ l += r; \ } while (0) static int michael_init(struct shash_desc *desc) { struct michael_mic_desc_ctx *mctx = shash_desc_ctx(desc); struct michael_mic_ctx *ctx = crypto_shash_ctx(desc->tfm); mctx->pending_len = 0; mctx->l = ctx->l; mctx->r = ctx->r; return 0; } static int michael_update(struct shash_desc *desc, const u8 *data, unsigned int len) { struct michael_mic_desc_ctx *mctx = shash_desc_ctx(desc); if (mctx->pending_len) { int flen = 4 - mctx->pending_len; if (flen > len) flen = len; memcpy((u8 *)&mctx->pending + mctx->pending_len, data, flen); mctx->pending_len += flen; data += flen; len -= flen; if (mctx->pending_len < 4) return 0; mctx->l ^= le32_to_cpu(mctx->pending); michael_block(mctx->l, mctx->r); mctx->pending_len = 0; } while (len >= 4) { mctx->l ^= get_unaligned_le32(data); michael_block(mctx->l, mctx->r); data += 4; len -= 4; } if (len > 0) { mctx->pending_len = len; memcpy(&mctx->pending, data, len); } return 0; } static int michael_final(struct shash_desc *desc, u8 *out) { struct michael_mic_desc_ctx *mctx = shash_desc_ctx(desc); u8 *data = (u8 *)&mctx->pending; /* Last block and padding (0x5a, 4..7 x 0) */ switch (mctx->pending_len) { case 0: mctx->l ^= 0x5a; break; case 1: mctx->l ^= data[0] | 0x5a00; break; case 2: mctx->l ^= data[0] | (data[1] << 8) | 0x5a0000; break; case 3: mctx->l ^= data[0] | (data[1] << 8) | (data[2] << 16) | 0x5a000000; break; } michael_block(mctx->l, mctx->r); /* l ^= 0; */ michael_block(mctx->l, mctx->r); put_unaligned_le32(mctx->l, out); put_unaligned_le32(mctx->r, out + 4); return 0; } static int michael_setkey(struct crypto_shash *tfm, const u8 *key, unsigned int keylen) { struct michael_mic_ctx *mctx = crypto_shash_ctx(tfm); if (keylen != 8) return -EINVAL; mctx->l = get_unaligned_le32(key); mctx->r = get_unaligned_le32(key + 4); return 0; } static struct shash_alg alg = { .digestsize = 8, .setkey = michael_setkey, .init = michael_init, .update = michael_update, .final = michael_final, .descsize = sizeof(struct michael_mic_desc_ctx), .base = { .cra_name = "michael_mic", .cra_driver_name = "michael_mic-generic", .cra_blocksize = 8, .cra_ctxsize = sizeof(struct michael_mic_ctx), .cra_module = THIS_MODULE, } }; static int __init michael_mic_init(void) { return crypto_register_shash(&alg); } static void __exit michael_mic_exit(void) { crypto_unregister_shash(&alg); } subsys_initcall(michael_mic_init); module_exit(michael_mic_exit); MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("Michael MIC"); MODULE_AUTHOR("Jouni Malinen <j@w1.fi>"); MODULE_ALIAS_CRYPTO("michael_mic"); |
19 16 4 1 1 3 1 1 3 1 3 4 12 3 12 3 12 3 19 5 15 54 54 53 1 54 1 1 5 5 8 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 | // SPDX-License-Identifier: GPL-2.0-only /* * inode.c - part of tracefs, a pseudo file system for activating tracing * * Based on debugfs by: Greg Kroah-Hartman <greg@kroah.com> * * Copyright (C) 2014 Red Hat Inc, author: Steven Rostedt <srostedt@redhat.com> * * tracefs is the file system that is used by the tracing infrastructure. */ #include <linux/module.h> #include <linux/fs.h> #include <linux/mount.h> #include <linux/kobject.h> #include <linux/namei.h> #include <linux/tracefs.h> #include <linux/fsnotify.h> #include <linux/security.h> #include <linux/seq_file.h> #include <linux/parser.h> #include <linux/magic.h> #include <linux/slab.h> #include "internal.h" #define TRACEFS_DEFAULT_MODE 0700 static struct kmem_cache *tracefs_inode_cachep __ro_after_init; static struct vfsmount *tracefs_mount; static int tracefs_mount_count; static bool tracefs_registered; static struct inode *tracefs_alloc_inode(struct super_block *sb) { struct tracefs_inode *ti; ti = kmem_cache_alloc(tracefs_inode_cachep, GFP_KERNEL); if (!ti) return NULL; return &ti->vfs_inode; } static void tracefs_free_inode(struct inode *inode) { kmem_cache_free(tracefs_inode_cachep, get_tracefs(inode)); } static ssize_t default_read_file(struct file *file, char __user *buf, size_t count, loff_t *ppos) { return 0; } static ssize_t default_write_file(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { return count; } static const struct file_operations tracefs_file_operations = { .read = default_read_file, .write = default_write_file, .open = simple_open, .llseek = noop_llseek, }; static struct tracefs_dir_ops { int (*mkdir)(const char *name); int (*rmdir)(const char *name); } tracefs_ops __ro_after_init; static char *get_dname(struct dentry *dentry) { const char *dname; char *name; int len = dentry->d_name.len; dname = dentry->d_name.name; name = kmalloc(len + 1, GFP_KERNEL); if (!name) return NULL; memcpy(name, dname, len); name[len] = 0; return name; } static int tracefs_syscall_mkdir(struct mnt_idmap *idmap, struct inode *inode, struct dentry *dentry, umode_t mode) { struct tracefs_inode *ti; char *name; int ret; name = get_dname(dentry); if (!name) return -ENOMEM; /* * This is a new directory that does not take the default of * the rootfs. It becomes the default permissions for all the * files and directories underneath it. */ ti = get_tracefs(inode); ti->flags |= TRACEFS_INSTANCE_INODE; ti->private = inode; /* * The mkdir call can call the generic functions that create * the files within the tracefs system. It is up to the individual * mkdir routine to handle races. */ inode_unlock(inode); ret = tracefs_ops.mkdir(name); inode_lock(inode); kfree(name); return ret; } static int tracefs_syscall_rmdir(struct inode *inode, struct dentry *dentry) { char *name; int ret; name = get_dname(dentry); if (!name) return -ENOMEM; /* * The rmdir call can call the generic functions that create * the files within the tracefs system. It is up to the individual * rmdir routine to handle races. * This time we need to unlock not only the parent (inode) but * also the directory that is being deleted. */ inode_unlock(inode); inode_unlock(d_inode(dentry)); ret = tracefs_ops.rmdir(name); inode_lock_nested(inode, I_MUTEX_PARENT); inode_lock(d_inode(dentry)); kfree(name); return ret; } static void set_tracefs_inode_owner(struct inode *inode) { struct tracefs_inode *ti = get_tracefs(inode); struct inode *root_inode = ti->private; /* * If this inode has never been referenced, then update * the permissions to the superblock. */ if (!(ti->flags & TRACEFS_UID_PERM_SET)) inode->i_uid = root_inode->i_uid; if (!(ti->flags & TRACEFS_GID_PERM_SET)) inode->i_gid = root_inode->i_gid; } static int tracefs_permission(struct mnt_idmap *idmap, struct inode *inode, int mask) { set_tracefs_inode_owner(inode); return generic_permission(idmap, inode, mask); } static int tracefs_getattr(struct mnt_idmap *idmap, const struct path *path, struct kstat *stat, u32 request_mask, unsigned int flags) { struct inode *inode = d_backing_inode(path->dentry); set_tracefs_inode_owner(inode); generic_fillattr(idmap, request_mask, inode, stat); return 0; } static int tracefs_setattr(struct mnt_idmap *idmap, struct dentry *dentry, struct iattr *attr) { unsigned int ia_valid = attr->ia_valid; struct inode *inode = d_inode(dentry); struct tracefs_inode *ti = get_tracefs(inode); if (ia_valid & ATTR_UID) ti->flags |= TRACEFS_UID_PERM_SET; if (ia_valid & ATTR_GID) ti->flags |= TRACEFS_GID_PERM_SET; return simple_setattr(idmap, dentry, attr); } static const struct inode_operations tracefs_instance_dir_inode_operations = { .lookup = simple_lookup, .mkdir = tracefs_syscall_mkdir, .rmdir = tracefs_syscall_rmdir, .permission = tracefs_permission, .getattr = tracefs_getattr, .setattr = tracefs_setattr, }; static const struct inode_operations tracefs_dir_inode_operations = { .lookup = simple_lookup, .permission = tracefs_permission, .getattr = tracefs_getattr, .setattr = tracefs_setattr, }; static const struct inode_operations tracefs_file_inode_operations = { .permission = tracefs_permission, .getattr = tracefs_getattr, .setattr = tracefs_setattr, }; struct inode *tracefs_get_inode(struct super_block *sb) { struct inode *inode = new_inode(sb); if (inode) { inode->i_ino = get_next_ino(); simple_inode_init_ts(inode); } return inode; } struct tracefs_mount_opts { kuid_t uid; kgid_t gid; umode_t mode; /* Opt_* bitfield. */ unsigned int opts; }; enum { Opt_uid, Opt_gid, Opt_mode, Opt_err }; static const match_table_t tokens = { {Opt_uid, "uid=%u"}, {Opt_gid, "gid=%u"}, {Opt_mode, "mode=%o"}, {Opt_err, NULL} }; struct tracefs_fs_info { struct tracefs_mount_opts mount_opts; }; static int tracefs_parse_options(char *data, struct tracefs_mount_opts *opts) { substring_t args[MAX_OPT_ARGS]; int option; int token; kuid_t uid; kgid_t gid; char *p; opts->opts = 0; opts->mode = TRACEFS_DEFAULT_MODE; while ((p = strsep(&data, ",")) != NULL) { if (!*p) continue; token = match_token(p, tokens, args); switch (token) { case Opt_uid: if (match_int(&args[0], &option)) return -EINVAL; uid = make_kuid(current_user_ns(), option); if (!uid_valid(uid)) return -EINVAL; opts->uid = uid; break; case Opt_gid: if (match_int(&args[0], &option)) return -EINVAL; gid = make_kgid(current_user_ns(), option); if (!gid_valid(gid)) return -EINVAL; opts->gid = gid; break; case Opt_mode: if (match_octal(&args[0], &option)) return -EINVAL; opts->mode = option & S_IALLUGO; break; /* * We might like to report bad mount options here; * but traditionally tracefs has ignored all mount options */ } opts->opts |= BIT(token); } return 0; } static int tracefs_apply_options(struct super_block *sb, bool remount) { struct tracefs_fs_info *fsi = sb->s_fs_info; struct inode *inode = d_inode(sb->s_root); struct tracefs_mount_opts *opts = &fsi->mount_opts; umode_t tmp_mode; /* * On remount, only reset mode/uid/gid if they were provided as mount * options. */ if (!remount || opts->opts & BIT(Opt_mode)) { tmp_mode = READ_ONCE(inode->i_mode) & ~S_IALLUGO; tmp_mode |= opts->mode; WRITE_ONCE(inode->i_mode, tmp_mode); } if (!remount || opts->opts & BIT(Opt_uid)) inode->i_uid = opts->uid; if (!remount || opts->opts & BIT(Opt_gid)) inode->i_gid = opts->gid; return 0; } static int tracefs_remount(struct super_block *sb, int *flags, char *data) { int err; struct tracefs_fs_info *fsi = sb->s_fs_info; sync_filesystem(sb); err = tracefs_parse_options(data, &fsi->mount_opts); if (err) goto fail; tracefs_apply_options(sb, true); fail: return err; } static int tracefs_show_options(struct seq_file *m, struct dentry *root) { struct tracefs_fs_info *fsi = root->d_sb->s_fs_info; struct tracefs_mount_opts *opts = &fsi->mount_opts; if (!uid_eq(opts->uid, GLOBAL_ROOT_UID)) seq_printf(m, ",uid=%u", from_kuid_munged(&init_user_ns, opts->uid)); if (!gid_eq(opts->gid, GLOBAL_ROOT_GID)) seq_printf(m, ",gid=%u", from_kgid_munged(&init_user_ns, opts->gid)); if (opts->mode != TRACEFS_DEFAULT_MODE) seq_printf(m, ",mode=%o", opts->mode); return 0; } static const struct super_operations tracefs_super_operations = { .alloc_inode = tracefs_alloc_inode, .free_inode = tracefs_free_inode, .drop_inode = generic_delete_inode, .statfs = simple_statfs, .remount_fs = tracefs_remount, .show_options = tracefs_show_options, }; /* * It would be cleaner if eventfs had its own dentry ops. * * Note that d_revalidate is called potentially under RCU, * so it can't take the eventfs mutex etc. It's fine - if * we open a file just as it's marked dead, things will * still work just fine, and just see the old stale case. */ static void tracefs_d_release(struct dentry *dentry) { if (dentry->d_fsdata) eventfs_d_release(dentry); } static int tracefs_d_revalidate(struct dentry *dentry, unsigned int flags) { struct eventfs_inode *ei = dentry->d_fsdata; return !(ei && ei->is_freed); } static const struct dentry_operations tracefs_dentry_operations = { .d_revalidate = tracefs_d_revalidate, .d_release = tracefs_d_release, }; static int trace_fill_super(struct super_block *sb, void *data, int silent) { static const struct tree_descr trace_files[] = {{""}}; struct tracefs_fs_info *fsi; int err; fsi = kzalloc(sizeof(struct tracefs_fs_info), GFP_KERNEL); sb->s_fs_info = fsi; if (!fsi) { err = -ENOMEM; goto fail; } err = tracefs_parse_options(data, &fsi->mount_opts); if (err) goto fail; err = simple_fill_super(sb, TRACEFS_MAGIC, trace_files); if (err) goto fail; sb->s_op = &tracefs_super_operations; sb->s_d_op = &tracefs_dentry_operations; tracefs_apply_options(sb, false); return 0; fail: kfree(fsi); sb->s_fs_info = NULL; return err; } static struct dentry *trace_mount(struct file_system_type *fs_type, int flags, const char *dev_name, void *data) { return mount_single(fs_type, flags, data, trace_fill_super); } static struct file_system_type trace_fs_type = { .owner = THIS_MODULE, .name = "tracefs", .mount = trace_mount, .kill_sb = kill_litter_super, }; MODULE_ALIAS_FS("tracefs"); struct dentry *tracefs_start_creating(const char *name, struct dentry *parent) { struct dentry *dentry; int error; pr_debug("tracefs: creating file '%s'\n",name); error = simple_pin_fs(&trace_fs_type, &tracefs_mount, &tracefs_mount_count); if (error) return ERR_PTR(error); /* If the parent is not specified, we create it in the root. * We need the root dentry to do this, which is in the super * block. A pointer to that is in the struct vfsmount that we * have around. */ if (!parent) parent = tracefs_mount->mnt_root; inode_lock(d_inode(parent)); if (unlikely(IS_DEADDIR(d_inode(parent)))) dentry = ERR_PTR(-ENOENT); else dentry = lookup_one_len(name, parent, strlen(name)); if (!IS_ERR(dentry) && d_inode(dentry)) { dput(dentry); dentry = ERR_PTR(-EEXIST); } if (IS_ERR(dentry)) { inode_unlock(d_inode(parent)); simple_release_fs(&tracefs_mount, &tracefs_mount_count); } return dentry; } struct dentry *tracefs_failed_creating(struct dentry *dentry) { inode_unlock(d_inode(dentry->d_parent)); dput(dentry); simple_release_fs(&tracefs_mount, &tracefs_mount_count); return NULL; } struct dentry *tracefs_end_creating(struct dentry *dentry) { inode_unlock(d_inode(dentry->d_parent)); return dentry; } /* Find the inode that this will use for default */ static struct inode *instance_inode(struct dentry *parent, struct inode *inode) { struct tracefs_inode *ti; /* If parent is NULL then use root inode */ if (!parent) return d_inode(inode->i_sb->s_root); /* Find the inode that is flagged as an instance or the root inode */ while (!IS_ROOT(parent)) { ti = get_tracefs(d_inode(parent)); if (ti->flags & TRACEFS_INSTANCE_INODE) break; parent = parent->d_parent; } return d_inode(parent); } /** * tracefs_create_file - create a file in the tracefs filesystem * @name: a pointer to a string containing the name of the file to create. * @mode: the permission that the file should have. * @parent: a pointer to the parent dentry for this file. This should be a * directory dentry if set. If this parameter is NULL, then the * file will be created in the root of the tracefs filesystem. * @data: a pointer to something that the caller will want to get to later * on. The inode.i_private pointer will point to this value on * the open() call. * @fops: a pointer to a struct file_operations that should be used for * this file. * * This is the basic "create a file" function for tracefs. It allows for a * wide range of flexibility in creating a file, or a directory (if you want * to create a directory, the tracefs_create_dir() function is * recommended to be used instead.) * * This function will return a pointer to a dentry if it succeeds. This * pointer must be passed to the tracefs_remove() function when the file is * to be removed (no automatic cleanup happens if your module is unloaded, * you are responsible here.) If an error occurs, %NULL will be returned. * * If tracefs is not enabled in the kernel, the value -%ENODEV will be * returned. */ struct dentry *tracefs_create_file(const char *name, umode_t mode, struct dentry *parent, void *data, const struct file_operations *fops) { struct tracefs_inode *ti; struct dentry *dentry; struct inode *inode; if (security_locked_down(LOCKDOWN_TRACEFS)) return NULL; if (!(mode & S_IFMT)) mode |= S_IFREG; BUG_ON(!S_ISREG(mode)); dentry = tracefs_start_creating(name, parent); if (IS_ERR(dentry)) return NULL; inode = tracefs_get_inode(dentry->d_sb); if (unlikely(!inode)) return tracefs_failed_creating(dentry); ti = get_tracefs(inode); ti->private = instance_inode(parent, inode); inode->i_mode = mode; inode->i_op = &tracefs_file_inode_operations; inode->i_fop = fops ? fops : &tracefs_file_operations; inode->i_private = data; inode->i_uid = d_inode(dentry->d_parent)->i_uid; inode->i_gid = d_inode(dentry->d_parent)->i_gid; d_instantiate(dentry, inode); fsnotify_create(d_inode(dentry->d_parent), dentry); return tracefs_end_creating(dentry); } static struct dentry *__create_dir(const char *name, struct dentry *parent, const struct inode_operations *ops) { struct tracefs_inode *ti; struct dentry *dentry = tracefs_start_creating(name, parent); struct inode *inode; if (IS_ERR(dentry)) return NULL; inode = tracefs_get_inode(dentry->d_sb); if (unlikely(!inode)) return tracefs_failed_creating(dentry); /* Do not set bits for OTH */ inode->i_mode = S_IFDIR | S_IRWXU | S_IRUSR| S_IRGRP | S_IXUSR | S_IXGRP; inode->i_op = ops; inode->i_fop = &simple_dir_operations; inode->i_uid = d_inode(dentry->d_parent)->i_uid; inode->i_gid = d_inode(dentry->d_parent)->i_gid; ti = get_tracefs(inode); ti->private = instance_inode(parent, inode); /* directory inodes start off with i_nlink == 2 (for "." entry) */ inc_nlink(inode); d_instantiate(dentry, inode); inc_nlink(d_inode(dentry->d_parent)); fsnotify_mkdir(d_inode(dentry->d_parent), dentry); return tracefs_end_creating(dentry); } /** * tracefs_create_dir - create a directory in the tracefs filesystem * @name: a pointer to a string containing the name of the directory to * create. * @parent: a pointer to the parent dentry for this file. This should be a * directory dentry if set. If this parameter is NULL, then the * directory will be created in the root of the tracefs filesystem. * * This function creates a directory in tracefs with the given name. * * This function will return a pointer to a dentry if it succeeds. This * pointer must be passed to the tracefs_remove() function when the file is * to be removed. If an error occurs, %NULL will be returned. * * If tracing is not enabled in the kernel, the value -%ENODEV will be * returned. */ struct dentry *tracefs_create_dir(const char *name, struct dentry *parent) { if (security_locked_down(LOCKDOWN_TRACEFS)) return NULL; return __create_dir(name, parent, &tracefs_dir_inode_operations); } /** * tracefs_create_instance_dir - create the tracing instances directory * @name: The name of the instances directory to create * @parent: The parent directory that the instances directory will exist * @mkdir: The function to call when a mkdir is performed. * @rmdir: The function to call when a rmdir is performed. * * Only one instances directory is allowed. * * The instances directory is special as it allows for mkdir and rmdir * to be done by userspace. When a mkdir or rmdir is performed, the inode * locks are released and the methods passed in (@mkdir and @rmdir) are * called without locks and with the name of the directory being created * within the instances directory. * * Returns the dentry of the instances directory. */ __init struct dentry *tracefs_create_instance_dir(const char *name, struct dentry *parent, int (*mkdir)(const char *name), int (*rmdir)(const char *name)) { struct dentry *dentry; /* Only allow one instance of the instances directory. */ if (WARN_ON(tracefs_ops.mkdir || tracefs_ops.rmdir)) return NULL; dentry = __create_dir(name, parent, &tracefs_instance_dir_inode_operations); if (!dentry) return NULL; tracefs_ops.mkdir = mkdir; tracefs_ops.rmdir = rmdir; return dentry; } static void remove_one(struct dentry *victim) { simple_release_fs(&tracefs_mount, &tracefs_mount_count); } /** * tracefs_remove - recursively removes a directory * @dentry: a pointer to a the dentry of the directory to be removed. * * This function recursively removes a directory tree in tracefs that * was previously created with a call to another tracefs function * (like tracefs_create_file() or variants thereof.) */ void tracefs_remove(struct dentry *dentry) { if (IS_ERR_OR_NULL(dentry)) return; simple_pin_fs(&trace_fs_type, &tracefs_mount, &tracefs_mount_count); simple_recursive_removal(dentry, remove_one); simple_release_fs(&tracefs_mount, &tracefs_mount_count); } /** * tracefs_initialized - Tells whether tracefs has been registered */ bool tracefs_initialized(void) { return tracefs_registered; } static void init_once(void *foo) { struct tracefs_inode *ti = (struct tracefs_inode *) foo; /* inode_init_once() calls memset() on the vfs_inode portion */ inode_init_once(&ti->vfs_inode); /* Zero out the rest */ memset_after(ti, 0, vfs_inode); } static int __init tracefs_init(void) { int retval; tracefs_inode_cachep = kmem_cache_create("tracefs_inode_cache", sizeof(struct tracefs_inode), 0, (SLAB_RECLAIM_ACCOUNT| SLAB_MEM_SPREAD| SLAB_ACCOUNT), init_once); if (!tracefs_inode_cachep) return -ENOMEM; retval = sysfs_create_mount_point(kernel_kobj, "tracing"); if (retval) return -EINVAL; retval = register_filesystem(&trace_fs_type); if (!retval) tracefs_registered = true; return retval; } core_initcall(tracefs_init); |
30 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 | /* SPDX-License-Identifier: GPL-2.0 */ /* * Shared Memory Communications over RDMA (SMC-R) and RoCE * * Definitions for SMC Connections, Link Groups and Links * * Copyright IBM Corp. 2016 * * Author(s): Ursula Braun <ubraun@linux.vnet.ibm.com> */ #ifndef _SMC_CORE_H #define _SMC_CORE_H #include <linux/atomic.h> #include <linux/smc.h> #include <linux/pci.h> #include <rdma/ib_verbs.h> #include <net/genetlink.h> #include <net/smc.h> #include "smc.h" #include "smc_ib.h" #include "smc_clc.h" #define SMC_RMBS_PER_LGR_MAX 255 /* max. # of RMBs per link group */ #define SMC_CONN_PER_LGR_MIN 16 /* min. # of connections per link group */ #define SMC_CONN_PER_LGR_MAX 255 /* max. # of connections per link group, * also is the default value for SMC-R v1 and v2.0 */ #define SMC_CONN_PER_LGR_PREFER 255 /* Preferred connections per link group used for * SMC-R v2.1 and later negotiation, vendors or * distrubutions may modify it to a value between * 16-255 as needed. */ struct smc_lgr_list { /* list of link group definition */ struct list_head list; spinlock_t lock; /* protects list of link groups */ u32 num; /* unique link group number */ }; enum smc_lgr_role { /* possible roles of a link group */ SMC_CLNT, /* client */ SMC_SERV /* server */ }; enum smc_link_state { /* possible states of a link */ SMC_LNK_UNUSED, /* link is unused */ SMC_LNK_INACTIVE, /* link is inactive */ SMC_LNK_ACTIVATING, /* link is being activated */ SMC_LNK_ACTIVE, /* link is active */ }; #define SMC_WR_BUF_SIZE 48 /* size of work request buffer */ #define SMC_WR_BUF_V2_SIZE 8192 /* size of v2 work request buffer */ struct smc_wr_buf { u8 raw[SMC_WR_BUF_SIZE]; }; struct smc_wr_v2_buf { u8 raw[SMC_WR_BUF_V2_SIZE]; }; #define SMC_WR_REG_MR_WAIT_TIME (5 * HZ)/* wait time for ib_wr_reg_mr result */ enum smc_wr_reg_state { POSTED, /* ib_wr_reg_mr request posted */ CONFIRMED, /* ib_wr_reg_mr response: successful */ FAILED /* ib_wr_reg_mr response: failure */ }; struct smc_rdma_sge { /* sges for RDMA writes */ struct ib_sge wr_tx_rdma_sge[SMC_IB_MAX_SEND_SGE]; }; #define SMC_MAX_RDMA_WRITES 2 /* max. # of RDMA writes per * message send */ struct smc_rdma_sges { /* sges per message send */ struct smc_rdma_sge tx_rdma_sge[SMC_MAX_RDMA_WRITES]; }; struct smc_rdma_wr { /* work requests per message * send */ struct ib_rdma_wr wr_tx_rdma[SMC_MAX_RDMA_WRITES]; }; #define SMC_LGR_ID_SIZE 4 struct smc_link { struct smc_ib_device *smcibdev; /* ib-device */ u8 ibport; /* port - values 1 | 2 */ struct ib_pd *roce_pd; /* IB protection domain, * unique for every RoCE QP */ struct ib_qp *roce_qp; /* IB queue pair */ struct ib_qp_attr qp_attr; /* IB queue pair attributes */ struct smc_wr_buf *wr_tx_bufs; /* WR send payload buffers */ struct ib_send_wr *wr_tx_ibs; /* WR send meta data */ struct ib_sge *wr_tx_sges; /* WR send gather meta data */ struct smc_rdma_sges *wr_tx_rdma_sges;/*RDMA WRITE gather meta data*/ struct smc_rdma_wr *wr_tx_rdmas; /* WR RDMA WRITE */ struct smc_wr_tx_pend *wr_tx_pends; /* WR send waiting for CQE */ struct completion *wr_tx_compl; /* WR send CQE completion */ /* above four vectors have wr_tx_cnt elements and use the same index */ struct ib_send_wr *wr_tx_v2_ib; /* WR send v2 meta data */ struct ib_sge *wr_tx_v2_sge; /* WR send v2 gather meta data*/ struct smc_wr_tx_pend *wr_tx_v2_pend; /* WR send v2 waiting for CQE */ dma_addr_t wr_tx_dma_addr; /* DMA address of wr_tx_bufs */ dma_addr_t wr_tx_v2_dma_addr; /* DMA address of v2 tx buf*/ atomic_long_t wr_tx_id; /* seq # of last sent WR */ unsigned long *wr_tx_mask; /* bit mask of used indexes */ u32 wr_tx_cnt; /* number of WR send buffers */ wait_queue_head_t wr_tx_wait; /* wait for free WR send buf */ struct { struct percpu_ref wr_tx_refs; } ____cacheline_aligned_in_smp; struct completion tx_ref_comp; struct smc_wr_buf *wr_rx_bufs; /* WR recv payload buffers */ struct ib_recv_wr *wr_rx_ibs; /* WR recv meta data */ struct ib_sge *wr_rx_sges; /* WR recv scatter meta data */ /* above three vectors have wr_rx_cnt elements and use the same index */ dma_addr_t wr_rx_dma_addr; /* DMA address of wr_rx_bufs */ dma_addr_t wr_rx_v2_dma_addr; /* DMA address of v2 rx buf*/ u64 wr_rx_id; /* seq # of last recv WR */ u64 wr_rx_id_compl; /* seq # of last completed WR */ u32 wr_rx_cnt; /* number of WR recv buffers */ unsigned long wr_rx_tstamp; /* jiffies when last buf rx */ wait_queue_head_t wr_rx_empty_wait; /* wait for RQ empty */ struct ib_reg_wr wr_reg; /* WR register memory region */ wait_queue_head_t wr_reg_wait; /* wait for wr_reg result */ struct { struct percpu_ref wr_reg_refs; } ____cacheline_aligned_in_smp; struct completion reg_ref_comp; enum smc_wr_reg_state wr_reg_state; /* state of wr_reg request */ u8 gid[SMC_GID_SIZE];/* gid matching used vlan id*/ u8 sgid_index; /* gid index for vlan id */ u32 peer_qpn; /* QP number of peer */ enum ib_mtu path_mtu; /* used mtu */ enum ib_mtu peer_mtu; /* mtu size of peer */ u32 psn_initial; /* QP tx initial packet seqno */ u32 peer_psn; /* QP rx initial packet seqno */ u8 peer_mac[ETH_ALEN]; /* = gid[8:10||13:15] */ u8 peer_gid[SMC_GID_SIZE]; /* gid of peer*/ u8 link_id; /* unique # within link group */ u8 link_uid[SMC_LGR_ID_SIZE]; /* unique lnk id */ u8 peer_link_uid[SMC_LGR_ID_SIZE]; /* peer uid */ u8 link_idx; /* index in lgr link array */ u8 link_is_asym; /* is link asymmetric? */ u8 clearing : 1; /* link is being cleared */ refcount_t refcnt; /* link reference count */ struct smc_link_group *lgr; /* parent link group */ struct work_struct link_down_wrk; /* wrk to bring link down */ char ibname[IB_DEVICE_NAME_MAX]; /* ib device name */ int ndev_ifidx; /* network device ifindex */ enum smc_link_state state; /* state of link */ struct delayed_work llc_testlink_wrk; /* testlink worker */ struct completion llc_testlink_resp; /* wait for rx of testlink */ int llc_testlink_time; /* testlink interval */ atomic_t conn_cnt; /* connections on this link */ }; /* For now we just allow one parallel link per link group. The SMC protocol * allows more (up to 8). */ #define SMC_LINKS_PER_LGR_MAX 3 #define SMC_SINGLE_LINK 0 #define SMC_LINKS_ADD_LNK_MIN 1 /* min. # of links per link group */ #define SMC_LINKS_ADD_LNK_MAX 2 /* max. # of links per link group, also is the * default value for smc-r v1.0 and v2.0 */ #define SMC_LINKS_PER_LGR_MAX_PREFER 2 /* Preferred max links per link group used for * SMC-R v2.1 and later negotiation, vendors or * distrubutions may modify it to a value between * 1-2 as needed. */ /* tx/rx buffer list element for sndbufs list and rmbs list of a lgr */ struct smc_buf_desc { struct list_head list; void *cpu_addr; /* virtual address of buffer */ struct page *pages; int len; /* length of buffer */ u32 used; /* currently used / unused */ union { struct { /* SMC-R */ struct sg_table sgt[SMC_LINKS_PER_LGR_MAX]; /* virtual buffer */ struct ib_mr *mr[SMC_LINKS_PER_LGR_MAX]; /* memory region: for rmb and * vzalloced sndbuf * incl. rkey provided to peer * and lkey provided to local */ u32 order; /* allocation order */ u8 is_conf_rkey; /* confirm_rkey done */ u8 is_reg_mr[SMC_LINKS_PER_LGR_MAX]; /* mem region registered */ u8 is_map_ib[SMC_LINKS_PER_LGR_MAX]; /* mem region mapped to lnk */ u8 is_dma_need_sync; u8 is_reg_err; /* buffer registration err */ u8 is_vm; /* virtually contiguous */ }; struct { /* SMC-D */ unsigned short sba_idx; /* SBA index number */ u64 token; /* DMB token number */ dma_addr_t dma_addr; /* DMA address */ }; }; }; struct smc_rtoken { /* address/key of remote RMB */ u64 dma_addr; u32 rkey; }; #define SMC_BUF_MIN_SIZE 16384 /* minimum size of an RMB */ #define SMC_RMBE_SIZES 16 /* number of distinct RMBE sizes */ /* theoretically, the RFC states that largest size would be 512K, * i.e. compressed 5 and thus 6 sizes (0..5), despite * struct smc_clc_msg_accept_confirm.rmbe_size being a 4 bit value (0..15) */ struct smcd_dev; enum smc_lgr_type { /* redundancy state of lgr */ SMC_LGR_NONE, /* no active links, lgr to be deleted */ SMC_LGR_SINGLE, /* 1 active RNIC on each peer */ SMC_LGR_SYMMETRIC, /* 2 active RNICs on each peer */ SMC_LGR_ASYMMETRIC_PEER, /* local has 2, peer 1 active RNICs */ SMC_LGR_ASYMMETRIC_LOCAL, /* local has 1, peer 2 active RNICs */ }; enum smcr_buf_type { /* types of SMC-R sndbufs and RMBs */ SMCR_PHYS_CONT_BUFS = 0, SMCR_VIRT_CONT_BUFS = 1, SMCR_MIXED_BUFS = 2, }; enum smc_llc_flowtype { SMC_LLC_FLOW_NONE = 0, SMC_LLC_FLOW_ADD_LINK = 2, SMC_LLC_FLOW_DEL_LINK = 4, SMC_LLC_FLOW_REQ_ADD_LINK = 5, SMC_LLC_FLOW_RKEY = 6, }; struct smc_llc_qentry; struct smc_llc_flow { enum smc_llc_flowtype type; struct smc_llc_qentry *qentry; }; struct smc_link_group { struct list_head list; struct rb_root conns_all; /* connection tree */ rwlock_t conns_lock; /* protects conns_all */ unsigned int conns_num; /* current # of connections */ unsigned short vlan_id; /* vlan id of link group */ struct list_head sndbufs[SMC_RMBE_SIZES];/* tx buffers */ struct rw_semaphore sndbufs_lock; /* protects tx buffers */ struct list_head rmbs[SMC_RMBE_SIZES]; /* rx buffers */ struct rw_semaphore rmbs_lock; /* protects rx buffers */ u8 id[SMC_LGR_ID_SIZE]; /* unique lgr id */ struct delayed_work free_work; /* delayed freeing of an lgr */ struct work_struct terminate_work; /* abnormal lgr termination */ struct workqueue_struct *tx_wq; /* wq for conn. tx workers */ u8 sync_err : 1; /* lgr no longer fits to peer */ u8 terminating : 1;/* lgr is terminating */ u8 freeing : 1; /* lgr is being freed */ refcount_t refcnt; /* lgr reference count */ bool is_smcd; /* SMC-R or SMC-D */ u8 smc_version; u8 negotiated_eid[SMC_MAX_EID_LEN]; u8 peer_os; /* peer operating system */ u8 peer_smc_release; u8 peer_hostname[SMC_MAX_HOSTNAME_LEN]; union { struct { /* SMC-R */ enum smc_lgr_role role; /* client or server */ struct smc_link lnk[SMC_LINKS_PER_LGR_MAX]; /* smc link */ struct smc_wr_v2_buf *wr_rx_buf_v2; /* WR v2 recv payload buffer */ struct smc_wr_v2_buf *wr_tx_buf_v2; /* WR v2 send payload buffer */ char peer_systemid[SMC_SYSTEMID_LEN]; /* unique system_id of peer */ struct smc_rtoken rtokens[SMC_RMBS_PER_LGR_MAX] [SMC_LINKS_PER_LGR_MAX]; /* remote addr/key pairs */ DECLARE_BITMAP(rtokens_used_mask, SMC_RMBS_PER_LGR_MAX); /* used rtoken elements */ u8 next_link_id; enum smc_lgr_type type; enum smcr_buf_type buf_type; /* redundancy state */ u8 pnet_id[SMC_MAX_PNETID_LEN + 1]; /* pnet id of this lgr */ struct list_head llc_event_q; /* queue for llc events */ spinlock_t llc_event_q_lock; /* protects llc_event_q */ struct rw_semaphore llc_conf_mutex; /* protects lgr reconfig. */ struct work_struct llc_add_link_work; struct work_struct llc_del_link_work; struct work_struct llc_event_work; /* llc event worker */ wait_queue_head_t llc_flow_waiter; /* w4 next llc event */ wait_queue_head_t llc_msg_waiter; /* w4 next llc msg */ struct smc_llc_flow llc_flow_lcl; /* llc local control field */ struct smc_llc_flow llc_flow_rmt; /* llc remote control field */ struct smc_llc_qentry *delayed_event; /* arrived when flow active */ spinlock_t llc_flow_lock; /* protects llc flow */ int llc_testlink_time; /* link keep alive time */ u32 llc_termination_rsn; /* rsn code for termination */ u8 nexthop_mac[ETH_ALEN]; u8 uses_gateway; __be32 saddr; /* net namespace */ struct net *net; u8 max_conns; /* max conn can be assigned to lgr */ u8 max_links; /* max links can be added in lgr */ }; struct { /* SMC-D */ struct smcd_gid peer_gid; /* Peer GID (remote) */ struct smcd_dev *smcd; /* ISM device for VLAN reg. */ u8 peer_shutdown : 1; /* peer triggered shutdownn */ }; }; }; struct smc_clc_msg_local; #define GID_LIST_SIZE 2 struct smc_gidlist { u8 len; u8 list[GID_LIST_SIZE][SMC_GID_SIZE]; }; struct smc_init_info_smcrv2 { /* Input fields */ __be32 saddr; struct sock *clc_sk; __be32 daddr; /* Output fields when saddr is set */ struct smc_ib_device *ib_dev_v2; u8 ib_port_v2; u8 ib_gid_v2[SMC_GID_SIZE]; /* Additional output fields when clc_sk and daddr is set as well */ u8 uses_gateway; u8 nexthop_mac[ETH_ALEN]; struct smc_gidlist gidlist; }; #define SMC_MAX_V2_ISM_DEVS SMCD_CLC_MAX_V2_GID_ENTRIES /* max # of proposed non-native ISM devices, * which can't exceed the max # of CHID-GID * entries in CLC proposal SMC-Dv2 extension. */ struct smc_init_info { u8 is_smcd; u8 smc_type_v1; u8 smc_type_v2; u8 release_nr; u8 max_conns; u8 max_links; u8 first_contact_peer; u8 first_contact_local; u16 feature_mask; unsigned short vlan_id; u32 rc; u8 negotiated_eid[SMC_MAX_EID_LEN]; /* SMC-R */ u8 smcr_version; u8 check_smcrv2; u8 peer_gid[SMC_GID_SIZE]; u8 peer_mac[ETH_ALEN]; u8 peer_systemid[SMC_SYSTEMID_LEN]; struct smc_ib_device *ib_dev; u8 ib_gid[SMC_GID_SIZE]; u8 ib_port; u32 ib_clcqpn; struct smc_init_info_smcrv2 smcrv2; /* SMC-D */ struct smcd_gid ism_peer_gid[SMC_MAX_V2_ISM_DEVS + 1]; struct smcd_dev *ism_dev[SMC_MAX_V2_ISM_DEVS + 1]; u16 ism_chid[SMC_MAX_V2_ISM_DEVS + 1]; u8 ism_offered_cnt; /* # of ISM devices offered */ u8 ism_selected; /* index of selected ISM dev*/ u8 smcd_version; }; /* Find the connection associated with the given alert token in the link group. * To use rbtrees we have to implement our own search core. * Requires @conns_lock * @token alert token to search for * @lgr link group to search in * Returns connection associated with token if found, NULL otherwise. */ static inline struct smc_connection *smc_lgr_find_conn( u32 token, struct smc_link_group *lgr) { struct smc_connection *res = NULL; struct rb_node *node; node = lgr->conns_all.rb_node; while (node) { struct smc_connection *cur = rb_entry(node, struct smc_connection, alert_node); if (cur->alert_token_local > token) { node = node->rb_left; } else { if (cur->alert_token_local < token) { node = node->rb_right; } else { res = cur; break; } } } return res; } static inline bool smc_conn_lgr_valid(struct smc_connection *conn) { return conn->lgr && conn->alert_token_local; } /* * Returns true if the specified link is usable. * * usable means the link is ready to receive RDMA messages, map memory * on the link, etc. This doesn't ensure we are able to send RDMA messages * on this link, if sending RDMA messages is needed, use smc_link_sendable() */ static inline bool smc_link_usable(struct smc_link *lnk) { if (lnk->state == SMC_LNK_UNUSED || lnk->state == SMC_LNK_INACTIVE) return false; return true; } /* * Returns true if the specified link is ready to receive AND send RDMA * messages. * * For the client side in first contact, the underlying QP may still in * RESET or RTR when the link state is ACTIVATING, checks in smc_link_usable() * is not strong enough. For those places that need to send any CDC or LLC * messages, use smc_link_sendable(), otherwise, use smc_link_usable() instead */ static inline bool smc_link_sendable(struct smc_link *lnk) { return smc_link_usable(lnk) && lnk->qp_attr.cur_qp_state == IB_QPS_RTS; } static inline bool smc_link_active(struct smc_link *lnk) { return lnk->state == SMC_LNK_ACTIVE; } static inline void smc_gid_be16_convert(__u8 *buf, u8 *gid_raw) { sprintf(buf, "%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x", be16_to_cpu(((__be16 *)gid_raw)[0]), be16_to_cpu(((__be16 *)gid_raw)[1]), be16_to_cpu(((__be16 *)gid_raw)[2]), be16_to_cpu(((__be16 *)gid_raw)[3]), be16_to_cpu(((__be16 *)gid_raw)[4]), be16_to_cpu(((__be16 *)gid_raw)[5]), be16_to_cpu(((__be16 *)gid_raw)[6]), be16_to_cpu(((__be16 *)gid_raw)[7])); } struct smc_pci_dev { __u32 pci_fid; __u16 pci_pchid; __u16 pci_vendor; __u16 pci_device; __u8 pci_id[SMC_PCI_ID_STR_LEN]; }; static inline void smc_set_pci_values(struct pci_dev *pci_dev, struct smc_pci_dev *smc_dev) { smc_dev->pci_vendor = pci_dev->vendor; smc_dev->pci_device = pci_dev->device; snprintf(smc_dev->pci_id, sizeof(smc_dev->pci_id), "%s", pci_name(pci_dev)); #if IS_ENABLED(CONFIG_S390) { /* Set s390 specific PCI information */ struct zpci_dev *zdev; zdev = to_zpci(pci_dev); smc_dev->pci_fid = zdev->fid; smc_dev->pci_pchid = zdev->pchid; } #endif } struct smc_sock; struct smc_clc_msg_accept_confirm; void smc_lgr_cleanup_early(struct smc_link_group *lgr); void smc_lgr_terminate_sched(struct smc_link_group *lgr); void smc_lgr_hold(struct smc_link_group *lgr); void smc_lgr_put(struct smc_link_group *lgr); void smcr_port_add(struct smc_ib_device *smcibdev, u8 ibport); void smcr_port_err(struct smc_ib_device *smcibdev, u8 ibport); void smc_smcd_terminate(struct smcd_dev *dev, struct smcd_gid *peer_gid, unsigned short vlan); void smc_smcd_terminate_all(struct smcd_dev *dev); void smc_smcr_terminate_all(struct smc_ib_device *smcibdev); int smc_buf_create(struct smc_sock *smc, bool is_smcd); int smc_uncompress_bufsize(u8 compressed); int smc_rmb_rtoken_handling(struct smc_connection *conn, struct smc_link *link, struct smc_clc_msg_accept_confirm *clc); int smc_rtoken_add(struct smc_link *lnk, __be64 nw_vaddr, __be32 nw_rkey); int smc_rtoken_delete(struct smc_link *lnk, __be32 nw_rkey); void smc_rtoken_set(struct smc_link_group *lgr, int link_idx, int link_idx_new, __be32 nw_rkey_known, __be64 nw_vaddr, __be32 nw_rkey); void smc_rtoken_set2(struct smc_link_group *lgr, int rtok_idx, int link_id, __be64 nw_vaddr, __be32 nw_rkey); void smc_sndbuf_sync_sg_for_device(struct smc_connection *conn); void smc_rmb_sync_sg_for_cpu(struct smc_connection *conn); int smc_vlan_by_tcpsk(struct socket *clcsock, struct smc_init_info *ini); void smc_conn_free(struct smc_connection *conn); int smc_conn_create(struct smc_sock *smc, struct smc_init_info *ini); int smc_core_init(void); void smc_core_exit(void); int smcr_link_init(struct smc_link_group *lgr, struct smc_link *lnk, u8 link_idx, struct smc_init_info *ini); void smcr_link_clear(struct smc_link *lnk, bool log); void smcr_link_hold(struct smc_link *lnk); void smcr_link_put(struct smc_link *lnk); void smc_switch_link_and_count(struct smc_connection *conn, struct smc_link *to_lnk); int smcr_buf_map_lgr(struct smc_link *lnk); int smcr_buf_reg_lgr(struct smc_link *lnk); void smcr_lgr_set_type(struct smc_link_group *lgr, enum smc_lgr_type new_type); void smcr_lgr_set_type_asym(struct smc_link_group *lgr, enum smc_lgr_type new_type, int asym_lnk_idx); int smcr_link_reg_buf(struct smc_link *link, struct smc_buf_desc *rmb_desc); struct smc_link *smc_switch_conns(struct smc_link_group *lgr, struct smc_link *from_lnk, bool is_dev_err); void smcr_link_down_cond(struct smc_link *lnk); void smcr_link_down_cond_sched(struct smc_link *lnk); int smc_nl_get_sys_info(struct sk_buff *skb, struct netlink_callback *cb); int smcr_nl_get_lgr(struct sk_buff *skb, struct netlink_callback *cb); int smcr_nl_get_link(struct sk_buff *skb, struct netlink_callback *cb); int smcd_nl_get_lgr(struct sk_buff *skb, struct netlink_callback *cb); static inline struct smc_link_group *smc_get_lgr(struct smc_link *link) { return link->lgr; } #endif |
21 3421 3440 10 3436 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 | // SPDX-License-Identifier: GPL-2.0 #include <linux/kernel.h> #include <linux/export.h> #include <linux/uaccess.h> #include <linux/mm.h> #include <linux/bitops.h> #include <asm/word-at-a-time.h> /* * Do a strnlen, return length of string *with* final '\0'. * 'count' is the user-supplied count, while 'max' is the * address space maximum. * * Return 0 for exceptions (which includes hitting the address * space maximum), or 'count+1' if hitting the user-supplied * maximum count. * * NOTE! We can sometimes overshoot the user-supplied maximum * if it fits in a aligned 'long'. The caller needs to check * the return value against "> max". */ static __always_inline long do_strnlen_user(const char __user *src, unsigned long count, unsigned long max) { const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS; unsigned long align, res = 0; unsigned long c; /* * Do everything aligned. But that means that we * need to also expand the maximum.. */ align = (sizeof(unsigned long) - 1) & (unsigned long)src; src -= align; max += align; unsafe_get_user(c, (unsigned long __user *)src, efault); c |= aligned_byte_mask(align); for (;;) { unsigned long data; if (has_zero(c, &data, &constants)) { data = prep_zero_mask(c, data, &constants); data = create_zero_mask(data); return res + find_zero(data) + 1 - align; } res += sizeof(unsigned long); /* We already handled 'unsigned long' bytes. Did we do it all ? */ if (unlikely(max <= sizeof(unsigned long))) break; max -= sizeof(unsigned long); unsafe_get_user(c, (unsigned long __user *)(src+res), efault); } res -= align; /* * Uhhuh. We hit 'max'. But was that the user-specified maximum * too? If so, return the marker for "too long". */ if (res >= count) return count+1; /* * Nope: we hit the address space limit, and we still had more * characters the caller would have wanted. That's 0. */ efault: return 0; } /** * strnlen_user: - Get the size of a user string INCLUDING final NUL. * @str: The string to measure. * @count: Maximum count (including NUL character) * * Context: User context only. This function may sleep if pagefaults are * enabled. * * Get the size of a NUL-terminated string in user space. * * Returns the size of the string INCLUDING the terminating NUL. * If the string is too long, returns a number larger than @count. User * has to check the return value against "> count". * On exception (or invalid count), returns 0. * * NOTE! You should basically never use this function. There is * almost never any valid case for using the length of a user space * string, since the string can be changed at any time by other * threads. Use "strncpy_from_user()" instead to get a stable copy * of the string. */ long strnlen_user(const char __user *str, long count) { unsigned long max_addr, src_addr; if (unlikely(count <= 0)) return 0; max_addr = TASK_SIZE_MAX; src_addr = (unsigned long)untagged_addr(str); if (likely(src_addr < max_addr)) { unsigned long max = max_addr - src_addr; long retval; /* * Truncate 'max' to the user-specified limit, so that * we only have one limit we need to check in the loop */ if (max > count) max = count; if (user_read_access_begin(str, max)) { retval = do_strnlen_user(str, count, max); user_read_access_end(); return retval; } } return 0; } EXPORT_SYMBOL(strnlen_user); |
26 26 26 1535 1537 1511 26 26 22 22 12 12 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 | // SPDX-License-Identifier: GPL-2.0-only /* (C) 1999-2001 Paul `Rusty' Russell * (C) 2002-2004 Netfilter Core Team <coreteam@netfilter.org> */ #include <linux/types.h> #include <linux/ip.h> #include <linux/netfilter.h> #include <linux/module.h> #include <linux/rcupdate.h> #include <linux/skbuff.h> #include <net/netns/generic.h> #include <net/route.h> #include <net/ip.h> #include <linux/netfilter_bridge.h> #include <linux/netfilter_ipv4.h> #include <net/netfilter/ipv4/nf_defrag_ipv4.h> #if IS_ENABLED(CONFIG_NF_CONNTRACK) #include <net/netfilter/nf_conntrack.h> #endif #include <net/netfilter/nf_conntrack_zones.h> static DEFINE_MUTEX(defrag4_mutex); static int nf_ct_ipv4_gather_frags(struct net *net, struct sk_buff *skb, u_int32_t user) { int err; local_bh_disable(); err = ip_defrag(net, skb, user); local_bh_enable(); if (!err) skb->ignore_df = 1; return err; } static enum ip_defrag_users nf_ct_defrag_user(unsigned int hooknum, struct sk_buff *skb) { u16 zone_id = NF_CT_DEFAULT_ZONE_ID; #if IS_ENABLED(CONFIG_NF_CONNTRACK) if (skb_nfct(skb)) { enum ip_conntrack_info ctinfo; const struct nf_conn *ct = nf_ct_get(skb, &ctinfo); zone_id = nf_ct_zone_id(nf_ct_zone(ct), CTINFO2DIR(ctinfo)); } #endif if (nf_bridge_in_prerouting(skb)) return IP_DEFRAG_CONNTRACK_BRIDGE_IN + zone_id; if (hooknum == NF_INET_PRE_ROUTING) return IP_DEFRAG_CONNTRACK_IN + zone_id; else return IP_DEFRAG_CONNTRACK_OUT + zone_id; } static unsigned int ipv4_conntrack_defrag(void *priv, struct sk_buff *skb, const struct nf_hook_state *state) { struct sock *sk = skb->sk; if (sk && sk_fullsock(sk) && (sk->sk_family == PF_INET) && inet_test_bit(NODEFRAG, sk)) return NF_ACCEPT; #if IS_ENABLED(CONFIG_NF_CONNTRACK) #if !IS_ENABLED(CONFIG_NF_NAT) /* Previously seen (loopback)? Ignore. Do this before fragment check. */ if (skb_nfct(skb) && !nf_ct_is_template((struct nf_conn *)skb_nfct(skb))) return NF_ACCEPT; #endif if (skb->_nfct == IP_CT_UNTRACKED) return NF_ACCEPT; #endif /* Gather fragments. */ if (ip_is_fragment(ip_hdr(skb))) { enum ip_defrag_users user = nf_ct_defrag_user(state->hook, skb); if (nf_ct_ipv4_gather_frags(state->net, skb, user)) return NF_STOLEN; } return NF_ACCEPT; } static const struct nf_hook_ops ipv4_defrag_ops[] = { { .hook = ipv4_conntrack_defrag, .pf = NFPROTO_IPV4, .hooknum = NF_INET_PRE_ROUTING, .priority = NF_IP_PRI_CONNTRACK_DEFRAG, }, { .hook = ipv4_conntrack_defrag, .pf = NFPROTO_IPV4, .hooknum = NF_INET_LOCAL_OUT, .priority = NF_IP_PRI_CONNTRACK_DEFRAG, }, }; static void __net_exit defrag4_net_exit(struct net *net) { if (net->nf.defrag_ipv4_users) { nf_unregister_net_hooks(net, ipv4_defrag_ops, ARRAY_SIZE(ipv4_defrag_ops)); net->nf.defrag_ipv4_users = 0; } } static const struct nf_defrag_hook defrag_hook = { .owner = THIS_MODULE, .enable = nf_defrag_ipv4_enable, .disable = nf_defrag_ipv4_disable, }; static struct pernet_operations defrag4_net_ops = { .exit = defrag4_net_exit, }; static int __init nf_defrag_init(void) { int err; err = register_pernet_subsys(&defrag4_net_ops); if (err) return err; rcu_assign_pointer(nf_defrag_v4_hook, &defrag_hook); return err; } static void __exit nf_defrag_fini(void) { rcu_assign_pointer(nf_defrag_v4_hook, NULL); unregister_pernet_subsys(&defrag4_net_ops); } int nf_defrag_ipv4_enable(struct net *net) { int err = 0; mutex_lock(&defrag4_mutex); if (net->nf.defrag_ipv4_users == UINT_MAX) { err = -EOVERFLOW; goto out_unlock; } if (net->nf.defrag_ipv4_users) { net->nf.defrag_ipv4_users++; goto out_unlock; } err = nf_register_net_hooks(net, ipv4_defrag_ops, ARRAY_SIZE(ipv4_defrag_ops)); if (err == 0) net->nf.defrag_ipv4_users = 1; out_unlock: mutex_unlock(&defrag4_mutex); return err; } EXPORT_SYMBOL_GPL(nf_defrag_ipv4_enable); void nf_defrag_ipv4_disable(struct net *net) { mutex_lock(&defrag4_mutex); if (net->nf.defrag_ipv4_users) { net->nf.defrag_ipv4_users--; if (net->nf.defrag_ipv4_users == 0) nf_unregister_net_hooks(net, ipv4_defrag_ops, ARRAY_SIZE(ipv4_defrag_ops)); } mutex_unlock(&defrag4_mutex); } EXPORT_SYMBOL_GPL(nf_defrag_ipv4_disable); module_init(nf_defrag_init); module_exit(nf_defrag_fini); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("IPv4 defragmentation support"); |
3682 1534 3441 6318 6313 4237 3737 3665 2317 2317 3695 3697 2083 2082 3864 1 3868 596 598 597 14 2976 24 9 2952 125 7 117 1 1603 1603 1590 868 764 15 15 13695 12880 13889 4 39 12512 58 12711 292 96 209 63 63 189 6888 6891 6895 237 67 67 66 1 1 1 1922 1922 1 10 10 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 | // SPDX-License-Identifier: GPL-2.0-only #include <linux/mm.h> #include <linux/slab.h> #include <linux/string.h> #include <linux/compiler.h> #include <linux/export.h> #include <linux/err.h> #include <linux/sched.h> #include <linux/sched/mm.h> #include <linux/sched/signal.h> #include <linux/sched/task_stack.h> #include <linux/security.h> #include <linux/swap.h> #include <linux/swapops.h> #include <linux/mman.h> #include <linux/hugetlb.h> #include <linux/vmalloc.h> #include <linux/userfaultfd_k.h> #include <linux/elf.h> #include <linux/elf-randomize.h> #include <linux/personality.h> #include <linux/random.h> #include <linux/processor.h> #include <linux/sizes.h> #include <linux/compat.h> #include <linux/uaccess.h> #include "internal.h" #include "swap.h" /** * kfree_const - conditionally free memory * @x: pointer to the memory * * Function calls kfree only if @x is not in .rodata section. */ void kfree_const(const void *x) { if (!is_kernel_rodata((unsigned long)x)) kfree(x); } EXPORT_SYMBOL(kfree_const); /** * kstrdup - allocate space for and copy an existing string * @s: the string to duplicate * @gfp: the GFP mask used in the kmalloc() call when allocating memory * * Return: newly allocated copy of @s or %NULL in case of error */ noinline char *kstrdup(const char *s, gfp_t gfp) { size_t len; char *buf; if (!s) return NULL; len = strlen(s) + 1; buf = kmalloc_track_caller(len, gfp); if (buf) memcpy(buf, s, len); return buf; } EXPORT_SYMBOL(kstrdup); /** * kstrdup_const - conditionally duplicate an existing const string * @s: the string to duplicate * @gfp: the GFP mask used in the kmalloc() call when allocating memory * * Note: Strings allocated by kstrdup_const should be freed by kfree_const and * must not be passed to krealloc(). * * Return: source string if it is in .rodata section otherwise * fallback to kstrdup. */ const char *kstrdup_const(const char *s, gfp_t gfp) { if (is_kernel_rodata((unsigned long)s)) return s; return kstrdup(s, gfp); } EXPORT_SYMBOL(kstrdup_const); /** * kstrndup - allocate space for and copy an existing string * @s: the string to duplicate * @max: read at most @max chars from @s * @gfp: the GFP mask used in the kmalloc() call when allocating memory * * Note: Use kmemdup_nul() instead if the size is known exactly. * * Return: newly allocated copy of @s or %NULL in case of error */ char *kstrndup(const char *s, size_t max, gfp_t gfp) { size_t len; char *buf; if (!s) return NULL; len = strnlen(s, max); buf = kmalloc_track_caller(len+1, gfp); if (buf) { memcpy(buf, s, len); buf[len] = '\0'; } return buf; } EXPORT_SYMBOL(kstrndup); /** * kmemdup - duplicate region of memory * * @src: memory region to duplicate * @len: memory region length * @gfp: GFP mask to use * * Return: newly allocated copy of @src or %NULL in case of error, * result is physically contiguous. Use kfree() to free. */ void *kmemdup(const void *src, size_t len, gfp_t gfp) { void *p; p = kmalloc_track_caller(len, gfp); if (p) memcpy(p, src, len); return p; } EXPORT_SYMBOL(kmemdup); /** * kvmemdup - duplicate region of memory * * @src: memory region to duplicate * @len: memory region length * @gfp: GFP mask to use * * Return: newly allocated copy of @src or %NULL in case of error, * result may be not physically contiguous. Use kvfree() to free. */ void *kvmemdup(const void *src, size_t len, gfp_t gfp) { void *p; p = kvmalloc(len, gfp); if (p) memcpy(p, src, len); return p; } EXPORT_SYMBOL(kvmemdup); /** * kmemdup_nul - Create a NUL-terminated string from unterminated data * @s: The data to stringify * @len: The size of the data * @gfp: the GFP mask used in the kmalloc() call when allocating memory * * Return: newly allocated copy of @s with NUL-termination or %NULL in * case of error */ char *kmemdup_nul(const char *s, size_t len, gfp_t gfp) { char *buf; if (!s) return NULL; buf = kmalloc_track_caller(len + 1, gfp); if (buf) { memcpy(buf, s, len); buf[len] = '\0'; } return buf; } EXPORT_SYMBOL(kmemdup_nul); /** * memdup_user - duplicate memory region from user space * * @src: source address in user space * @len: number of bytes to copy * * Return: an ERR_PTR() on failure. Result is physically * contiguous, to be freed by kfree(). */ void *memdup_user(const void __user *src, size_t len) { void *p; p = kmalloc_track_caller(len, GFP_USER | __GFP_NOWARN); if (!p) return ERR_PTR(-ENOMEM); if (copy_from_user(p, src, len)) { kfree(p); return ERR_PTR(-EFAULT); } return p; } EXPORT_SYMBOL(memdup_user); /** * vmemdup_user - duplicate memory region from user space * * @src: source address in user space * @len: number of bytes to copy * * Return: an ERR_PTR() on failure. Result may be not * physically contiguous. Use kvfree() to free. */ void *vmemdup_user(const void __user *src, size_t len) { void *p; p = kvmalloc(len, GFP_USER); if (!p) return ERR_PTR(-ENOMEM); if (copy_from_user(p, src, len)) { kvfree(p); return ERR_PTR(-EFAULT); } return p; } EXPORT_SYMBOL(vmemdup_user); /** * strndup_user - duplicate an existing string from user space * @s: The string to duplicate * @n: Maximum number of bytes to copy, including the trailing NUL. * * Return: newly allocated copy of @s or an ERR_PTR() in case of error */ char *strndup_user(const char __user *s, long n) { char *p; long length; length = strnlen_user(s, n); if (!length) return ERR_PTR(-EFAULT); if (length > n) return ERR_PTR(-EINVAL); p = memdup_user(s, length); if (IS_ERR(p)) return p; p[length - 1] = '\0'; return p; } EXPORT_SYMBOL(strndup_user); /** * memdup_user_nul - duplicate memory region from user space and NUL-terminate * * @src: source address in user space * @len: number of bytes to copy * * Return: an ERR_PTR() on failure. */ void *memdup_user_nul(const void __user *src, size_t len) { char *p; /* * Always use GFP_KERNEL, since copy_from_user() can sleep and * cause pagefault, which makes it pointless to use GFP_NOFS * or GFP_ATOMIC. */ p = kmalloc_track_caller(len + 1, GFP_KERNEL); if (!p) return ERR_PTR(-ENOMEM); if (copy_from_user(p, src, len)) { kfree(p); return ERR_PTR(-EFAULT); } p[len] = '\0'; return p; } EXPORT_SYMBOL(memdup_user_nul); /* Check if the vma is being used as a stack by this task */ int vma_is_stack_for_current(struct vm_area_struct *vma) { struct task_struct * __maybe_unused t = current; return (vma->vm_start <= KSTK_ESP(t) && vma->vm_end >= KSTK_ESP(t)); } /* * Change backing file, only valid to use during initial VMA setup. */ void vma_set_file(struct vm_area_struct *vma, struct file *file) { /* Changing an anonymous vma with this is illegal */ get_file(file); swap(vma->vm_file, file); fput(file); } EXPORT_SYMBOL(vma_set_file); #ifndef STACK_RND_MASK #define STACK_RND_MASK (0x7ff >> (PAGE_SHIFT - 12)) /* 8MB of VA */ #endif unsigned long randomize_stack_top(unsigned long stack_top) { unsigned long random_variable = 0; if (current->flags & PF_RANDOMIZE) { random_variable = get_random_long(); random_variable &= STACK_RND_MASK; random_variable <<= PAGE_SHIFT; } #ifdef CONFIG_STACK_GROWSUP return PAGE_ALIGN(stack_top) + random_variable; #else return PAGE_ALIGN(stack_top) - random_variable; #endif } /** * randomize_page - Generate a random, page aligned address * @start: The smallest acceptable address the caller will take. * @range: The size of the area, starting at @start, within which the * random address must fall. * * If @start + @range would overflow, @range is capped. * * NOTE: Historical use of randomize_range, which this replaces, presumed that * @start was already page aligned. We now align it regardless. * * Return: A page aligned address within [start, start + range). On error, * @start is returned. */ unsigned long randomize_page(unsigned long start, unsigned long range) { if (!PAGE_ALIGNED(start)) { range -= PAGE_ALIGN(start) - start; start = PAGE_ALIGN(start); } if (start > ULONG_MAX - range) range = ULONG_MAX - start; range >>= PAGE_SHIFT; if (range == 0) return start; return start + (get_random_long() % range << PAGE_SHIFT); } #ifdef CONFIG_ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT unsigned long __weak arch_randomize_brk(struct mm_struct *mm) { /* Is the current task 32bit ? */ if (!IS_ENABLED(CONFIG_64BIT) || is_compat_task()) return randomize_page(mm->brk, SZ_32M); return randomize_page(mm->brk, SZ_1G); } unsigned long arch_mmap_rnd(void) { unsigned long rnd; #ifdef CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS if (is_compat_task()) rnd = get_random_long() & ((1UL << mmap_rnd_compat_bits) - 1); else #endif /* CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS */ rnd = get_random_long() & ((1UL << mmap_rnd_bits) - 1); return rnd << PAGE_SHIFT; } static int mmap_is_legacy(struct rlimit *rlim_stack) { if (current->personality & ADDR_COMPAT_LAYOUT) return 1; /* On parisc the stack always grows up - so a unlimited stack should * not be an indicator to use the legacy memory layout. */ if (rlim_stack->rlim_cur == RLIM_INFINITY && !IS_ENABLED(CONFIG_STACK_GROWSUP)) return 1; return sysctl_legacy_va_layout; } /* * Leave enough space between the mmap area and the stack to honour ulimit in * the face of randomisation. */ #define MIN_GAP (SZ_128M) #define MAX_GAP (STACK_TOP / 6 * 5) static unsigned long mmap_base(unsigned long rnd, struct rlimit *rlim_stack) { #ifdef CONFIG_STACK_GROWSUP /* * For an upwards growing stack the calculation is much simpler. * Memory for the maximum stack size is reserved at the top of the * task. mmap_base starts directly below the stack and grows * downwards. */ return PAGE_ALIGN_DOWN(mmap_upper_limit(rlim_stack) - rnd); #else unsigned long gap = rlim_stack->rlim_cur; unsigned long pad = stack_guard_gap; /* Account for stack randomization if necessary */ if (current->flags & PF_RANDOMIZE) pad += (STACK_RND_MASK << PAGE_SHIFT); /* Values close to RLIM_INFINITY can overflow. */ if (gap + pad > gap) gap += pad; if (gap < MIN_GAP) gap = MIN_GAP; else if (gap > MAX_GAP) gap = MAX_GAP; return PAGE_ALIGN(STACK_TOP - gap - rnd); #endif } void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack) { unsigned long random_factor = 0UL; if (current->flags & PF_RANDOMIZE) random_factor = arch_mmap_rnd(); if (mmap_is_legacy(rlim_stack)) { mm->mmap_base = TASK_UNMAPPED_BASE + random_factor; mm->get_unmapped_area = arch_get_unmapped_area; } else { mm->mmap_base = mmap_base(random_factor, rlim_stack); mm->get_unmapped_area = arch_get_unmapped_area_topdown; } } #elif defined(CONFIG_MMU) && !defined(HAVE_ARCH_PICK_MMAP_LAYOUT) void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack) { mm->mmap_base = TASK_UNMAPPED_BASE; mm->get_unmapped_area = arch_get_unmapped_area; } #endif /** * __account_locked_vm - account locked pages to an mm's locked_vm * @mm: mm to account against * @pages: number of pages to account * @inc: %true if @pages should be considered positive, %false if not * @task: task used to check RLIMIT_MEMLOCK * @bypass_rlim: %true if checking RLIMIT_MEMLOCK should be skipped * * Assumes @task and @mm are valid (i.e. at least one reference on each), and * that mmap_lock is held as writer. * * Return: * * 0 on success * * -ENOMEM if RLIMIT_MEMLOCK would be exceeded. */ int __account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc, struct task_struct *task, bool bypass_rlim) { unsigned long locked_vm, limit; int ret = 0; mmap_assert_write_locked(mm); locked_vm = mm->locked_vm; if (inc) { if (!bypass_rlim) { limit = task_rlimit(task, RLIMIT_MEMLOCK) >> PAGE_SHIFT; if (locked_vm + pages > limit) ret = -ENOMEM; } if (!ret) mm->locked_vm = locked_vm + pages; } else { WARN_ON_ONCE(pages > locked_vm); mm->locked_vm = locked_vm - pages; } pr_debug("%s: [%d] caller %ps %c%lu %lu/%lu%s\n", __func__, task->pid, (void *)_RET_IP_, (inc) ? '+' : '-', pages << PAGE_SHIFT, locked_vm << PAGE_SHIFT, task_rlimit(task, RLIMIT_MEMLOCK), ret ? " - exceeded" : ""); return ret; } EXPORT_SYMBOL_GPL(__account_locked_vm); /** * account_locked_vm - account locked pages to an mm's locked_vm * @mm: mm to account against, may be NULL * @pages: number of pages to account * @inc: %true if @pages should be considered positive, %false if not * * Assumes a non-NULL @mm is valid (i.e. at least one reference on it). * * Return: * * 0 on success, or if mm is NULL * * -ENOMEM if RLIMIT_MEMLOCK would be exceeded. */ int account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc) { int ret; if (pages == 0 || !mm) return 0; mmap_write_lock(mm); ret = __account_locked_vm(mm, pages, inc, current, capable(CAP_IPC_LOCK)); mmap_write_unlock(mm); return ret; } EXPORT_SYMBOL_GPL(account_locked_vm); unsigned long vm_mmap_pgoff(struct file *file, unsigned long addr, unsigned long len, unsigned long prot, unsigned long flag, unsigned long pgoff) { unsigned long ret; struct mm_struct *mm = current->mm; unsigned long populate; LIST_HEAD(uf); ret = security_mmap_file(file, prot, flag); if (!ret) { if (mmap_write_lock_killable(mm)) return -EINTR; ret = do_mmap(file, addr, len, prot, flag, 0, pgoff, &populate, &uf); mmap_write_unlock(mm); userfaultfd_unmap_complete(mm, &uf); if (populate) mm_populate(ret, populate); } return ret; } unsigned long vm_mmap(struct file *file, unsigned long addr, unsigned long len, unsigned long prot, unsigned long flag, unsigned long offset) { if (unlikely(offset + PAGE_ALIGN(len) < offset)) return -EINVAL; if (unlikely(offset_in_page(offset))) return -EINVAL; return vm_mmap_pgoff(file, addr, len, prot, flag, offset >> PAGE_SHIFT); } EXPORT_SYMBOL(vm_mmap); /** * kvmalloc_node - attempt to allocate physically contiguous memory, but upon * failure, fall back to non-contiguous (vmalloc) allocation. * @size: size of the request. * @flags: gfp mask for the allocation - must be compatible (superset) with GFP_KERNEL. * @node: numa node to allocate from * * Uses kmalloc to get the memory but if the allocation fails then falls back * to the vmalloc allocator. Use kvfree for freeing the memory. * * GFP_NOWAIT and GFP_ATOMIC are not supported, neither is the __GFP_NORETRY modifier. * __GFP_RETRY_MAYFAIL is supported, and it should be used only if kmalloc is * preferable to the vmalloc fallback, due to visible performance drawbacks. * * Return: pointer to the allocated memory of %NULL in case of failure */ void *kvmalloc_node(size_t size, gfp_t flags, int node) { gfp_t kmalloc_flags = flags; void *ret; /* * We want to attempt a large physically contiguous block first because * it is less likely to fragment multiple larger blocks and therefore * contribute to a long term fragmentation less than vmalloc fallback. * However make sure that larger requests are not too disruptive - no * OOM killer and no allocation failure warnings as we have a fallback. */ if (size > PAGE_SIZE) { kmalloc_flags |= __GFP_NOWARN; if (!(kmalloc_flags & __GFP_RETRY_MAYFAIL)) kmalloc_flags |= __GFP_NORETRY; /* nofail semantic is implemented by the vmalloc fallback */ kmalloc_flags &= ~__GFP_NOFAIL; } ret = kmalloc_node(size, kmalloc_flags, node); /* * It doesn't really make sense to fallback to vmalloc for sub page * requests */ if (ret || size <= PAGE_SIZE) return ret; /* non-sleeping allocations are not supported by vmalloc */ if (!gfpflags_allow_blocking(flags)) return NULL; /* Don't even allow crazy sizes */ if (unlikely(size > INT_MAX)) { WARN_ON_ONCE(!(flags & __GFP_NOWARN)); return NULL; } /* * kvmalloc() can always use VM_ALLOW_HUGE_VMAP, * since the callers already cannot assume anything * about the resulting pointer, and cannot play * protection games. */ return __vmalloc_node_range(size, 1, VMALLOC_START, VMALLOC_END, flags, PAGE_KERNEL, VM_ALLOW_HUGE_VMAP, node, __builtin_return_address(0)); } EXPORT_SYMBOL(kvmalloc_node); /** * kvfree() - Free memory. * @addr: Pointer to allocated memory. * * kvfree frees memory allocated by any of vmalloc(), kmalloc() or kvmalloc(). * It is slightly more efficient to use kfree() or vfree() if you are certain * that you know which one to use. * * Context: Either preemptible task context or not-NMI interrupt. */ void kvfree(const void *addr) { if (is_vmalloc_addr(addr)) vfree(addr); else kfree(addr); } EXPORT_SYMBOL(kvfree); /** * kvfree_sensitive - Free a data object containing sensitive information. * @addr: address of the data object to be freed. * @len: length of the data object. * * Use the special memzero_explicit() function to clear the content of a * kvmalloc'ed object containing sensitive data to make sure that the * compiler won't optimize out the data clearing. */ void kvfree_sensitive(const void *addr, size_t len) { if (likely(!ZERO_OR_NULL_PTR(addr))) { memzero_explicit((void *)addr, len); kvfree(addr); } } EXPORT_SYMBOL(kvfree_sensitive); void *kvrealloc(const void *p, size_t oldsize, size_t newsize, gfp_t flags) { void *newp; if (oldsize >= newsize) return (void *)p; newp = kvmalloc(newsize, flags); if (!newp) return NULL; memcpy(newp, p, oldsize); kvfree(p); return newp; } EXPORT_SYMBOL(kvrealloc); /** * __vmalloc_array - allocate memory for a virtually contiguous array. * @n: number of elements. * @size: element size. * @flags: the type of memory to allocate (see kmalloc). */ void *__vmalloc_array(size_t n, size_t size, gfp_t flags) { size_t bytes; if (unlikely(check_mul_overflow(n, size, &bytes))) return NULL; return __vmalloc(bytes, flags); } EXPORT_SYMBOL(__vmalloc_array); /** * vmalloc_array - allocate memory for a virtually contiguous array. * @n: number of elements. * @size: element size. */ void *vmalloc_array(size_t n, size_t size) { return __vmalloc_array(n, size, GFP_KERNEL); } EXPORT_SYMBOL(vmalloc_array); /** * __vcalloc - allocate and zero memory for a virtually contiguous array. * @n: number of elements. * @size: element size. * @flags: the type of memory to allocate (see kmalloc). */ void *__vcalloc(size_t n, size_t size, gfp_t flags) { return __vmalloc_array(n, size, flags | __GFP_ZERO); } EXPORT_SYMBOL(__vcalloc); /** * vcalloc - allocate and zero memory for a virtually contiguous array. * @n: number of elements. * @size: element size. */ void *vcalloc(size_t n, size_t size) { return __vmalloc_array(n, size, GFP_KERNEL | __GFP_ZERO); } EXPORT_SYMBOL(vcalloc); struct anon_vma *folio_anon_vma(struct folio *folio) { unsigned long mapping = (unsigned long)folio->mapping; if ((mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON) return NULL; return (void *)(mapping - PAGE_MAPPING_ANON); } /** * folio_mapping - Find the mapping where this folio is stored. * @folio: The folio. * * For folios which are in the page cache, return the mapping that this * page belongs to. Folios in the swap cache return the swap mapping * this page is stored in (which is different from the mapping for the * swap file or swap device where the data is stored). * * You can call this for folios which aren't in the swap cache or page * cache and it will return NULL. */ struct address_space *folio_mapping(struct folio *folio) { struct address_space *mapping; /* This happens if someone calls flush_dcache_page on slab page */ if (unlikely(folio_test_slab(folio))) return NULL; if (unlikely(folio_test_swapcache(folio))) return swap_address_space(folio->swap); mapping = folio->mapping; if ((unsigned long)mapping & PAGE_MAPPING_FLAGS) return NULL; return mapping; } EXPORT_SYMBOL(folio_mapping); /** * folio_copy - Copy the contents of one folio to another. * @dst: Folio to copy to. * @src: Folio to copy from. * * The bytes in the folio represented by @src are copied to @dst. * Assumes the caller has validated that @dst is at least as large as @src. * Can be called in atomic context for order-0 folios, but if the folio is * larger, it may sleep. */ void folio_copy(struct folio *dst, struct folio *src) { long i = 0; long nr = folio_nr_pages(src); for (;;) { copy_highpage(folio_page(dst, i), folio_page(src, i)); if (++i == nr) break; cond_resched(); } } EXPORT_SYMBOL(folio_copy); int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS; int sysctl_overcommit_ratio __read_mostly = 50; unsigned long sysctl_overcommit_kbytes __read_mostly; int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT; unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17; /* 128MB */ unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13; /* 8MB */ int overcommit_ratio_handler(struct ctl_table *table, int write, void *buffer, size_t *lenp, loff_t *ppos) { int ret; ret = proc_dointvec(table, write, buffer, lenp, ppos); if (ret == 0 && write) sysctl_overcommit_kbytes = 0; return ret; } static void sync_overcommit_as(struct work_struct *dummy) { percpu_counter_sync(&vm_committed_as); } int overcommit_policy_handler(struct ctl_table *table, int write, void *buffer, size_t *lenp, loff_t *ppos) { struct ctl_table t; int new_policy = -1; int ret; /* * The deviation of sync_overcommit_as could be big with loose policy * like OVERCOMMIT_ALWAYS/OVERCOMMIT_GUESS. When changing policy to * strict OVERCOMMIT_NEVER, we need to reduce the deviation to comply * with the strict "NEVER", and to avoid possible race condition (even * though user usually won't too frequently do the switching to policy * OVERCOMMIT_NEVER), the switch is done in the following order: * 1. changing the batch * 2. sync percpu count on each CPU * 3. switch the policy */ if (write) { t = *table; t.data = &new_policy; ret = proc_dointvec_minmax(&t, write, buffer, lenp, ppos); if (ret || new_policy == -1) return ret; mm_compute_batch(new_policy); if (new_policy == OVERCOMMIT_NEVER) schedule_on_each_cpu(sync_overcommit_as); sysctl_overcommit_memory = new_policy; } else { ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); } return ret; } int overcommit_kbytes_handler(struct ctl_table *table, int write, void *buffer, size_t *lenp, loff_t *ppos) { int ret; ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos); if (ret == 0 && write) sysctl_overcommit_ratio = 0; return ret; } /* * Committed memory limit enforced when OVERCOMMIT_NEVER policy is used */ unsigned long vm_commit_limit(void) { unsigned long allowed; if (sysctl_overcommit_kbytes) allowed = sysctl_overcommit_kbytes >> (PAGE_SHIFT - 10); else allowed = ((totalram_pages() - hugetlb_total_pages()) * sysctl_overcommit_ratio / 100); allowed += total_swap_pages; return allowed; } /* * Make sure vm_committed_as in one cacheline and not cacheline shared with * other variables. It can be updated by several CPUs frequently. */ struct percpu_counter vm_committed_as ____cacheline_aligned_in_smp; /* * The global memory commitment made in the system can be a metric * that can be used to drive ballooning decisions when Linux is hosted * as a guest. On Hyper-V, the host implements a policy engine for dynamically * balancing memory across competing virtual machines that are hosted. * Several metrics drive this policy engine including the guest reported * memory commitment. * * The time cost of this is very low for small platforms, and for big * platform like a 2S/36C/72T Skylake server, in worst case where * vm_committed_as's spinlock is under severe contention, the time cost * could be about 30~40 microseconds. */ unsigned long vm_memory_committed(void) { return percpu_counter_sum_positive(&vm_committed_as); } EXPORT_SYMBOL_GPL(vm_memory_committed); /* * Check that a process has enough memory to allocate a new virtual * mapping. 0 means there is enough memory for the allocation to * succeed and -ENOMEM implies there is not. * * We currently support three overcommit policies, which are set via the * vm.overcommit_memory sysctl. See Documentation/mm/overcommit-accounting.rst * * Strict overcommit modes added 2002 Feb 26 by Alan Cox. * Additional code 2002 Jul 20 by Robert Love. * * cap_sys_admin is 1 if the process has admin privileges, 0 otherwise. * * Note this is a helper function intended to be used by LSMs which * wish to use this logic. */ int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin) { long allowed; vm_acct_memory(pages); /* * Sometimes we want to use more memory than we have */ if (sysctl_overcommit_memory == OVERCOMMIT_ALWAYS) return 0; if (sysctl_overcommit_memory == OVERCOMMIT_GUESS) { if (pages > totalram_pages() + total_swap_pages) goto error; return 0; } allowed = vm_commit_limit(); /* * Reserve some for root */ if (!cap_sys_admin) allowed -= sysctl_admin_reserve_kbytes >> (PAGE_SHIFT - 10); /* * Don't let a single process grow so big a user can't recover */ if (mm) { long reserve = sysctl_user_reserve_kbytes >> (PAGE_SHIFT - 10); allowed -= min_t(long, mm->total_vm / 32, reserve); } if (percpu_counter_read_positive(&vm_committed_as) < allowed) return 0; error: pr_warn_ratelimited("%s: pid: %d, comm: %s, not enough memory for the allocation\n", __func__, current->pid, current->comm); vm_unacct_memory(pages); return -ENOMEM; } /** * get_cmdline() - copy the cmdline value to a buffer. * @task: the task whose cmdline value to copy. * @buffer: the buffer to copy to. * @buflen: the length of the buffer. Larger cmdline values are truncated * to this length. * * Return: the size of the cmdline field copied. Note that the copy does * not guarantee an ending NULL byte. */ int get_cmdline(struct task_struct *task, char *buffer, int buflen) { int res = 0; unsigned int len; struct mm_struct *mm = get_task_mm(task); unsigned long arg_start, arg_end, env_start, env_end; if (!mm) goto out; if (!mm->arg_end) goto out_mm; /* Shh! No looking before we're done */ spin_lock(&mm->arg_lock); arg_start = mm->arg_start; arg_end = mm->arg_end; env_start = mm->env_start; env_end = mm->env_end; spin_unlock(&mm->arg_lock); len = arg_end - arg_start; if (len > buflen) len = buflen; res = access_process_vm(task, arg_start, buffer, len, FOLL_FORCE); /* * If the nul at the end of args has been overwritten, then * assume application is using setproctitle(3). */ if (res > 0 && buffer[res-1] != '\0' && len < buflen) { len = strnlen(buffer, res); if (len < res) { res = len; } else { len = env_end - env_start; if (len > buflen - res) len = buflen - res; res += access_process_vm(task, env_start, buffer+res, len, FOLL_FORCE); res = strnlen(buffer, res); } } out_mm: mmput(mm); out: return res; } int __weak memcmp_pages(struct page *page1, struct page *page2) { char *addr1, *addr2; int ret; addr1 = kmap_local_page(page1); addr2 = kmap_local_page(page2); ret = memcmp(addr1, addr2, PAGE_SIZE); kunmap_local(addr2); kunmap_local(addr1); return ret; } #ifdef CONFIG_PRINTK /** * mem_dump_obj - Print available provenance information * @object: object for which to find provenance information. * * This function uses pr_cont(), so that the caller is expected to have * printed out whatever preamble is appropriate. The provenance information * depends on the type of object and on how much debugging is enabled. * For example, for a slab-cache object, the slab name is printed, and, * if available, the return address and stack trace from the allocation * and last free path of that object. */ void mem_dump_obj(void *object) { const char *type; if (kmem_dump_obj(object)) return; if (vmalloc_dump_obj(object)) return; if (is_vmalloc_addr(object)) type = "vmalloc memory"; else if (virt_addr_valid(object)) type = "non-slab/vmalloc memory"; else if (object == NULL) type = "NULL pointer"; else if (object == ZERO_SIZE_PTR) type = "zero-size pointer"; else type = "non-paged memory"; pr_cont(" %s\n", type); } EXPORT_SYMBOL_GPL(mem_dump_obj); #endif /* * A driver might set a page logically offline -- PageOffline() -- and * turn the page inaccessible in the hypervisor; after that, access to page * content can be fatal. * * Some special PFN walkers -- i.e., /proc/kcore -- read content of random * pages after checking PageOffline(); however, these PFN walkers can race * with drivers that set PageOffline(). * * page_offline_freeze()/page_offline_thaw() allows for a subsystem to * synchronize with such drivers, achieving that a page cannot be set * PageOffline() while frozen. * * page_offline_begin()/page_offline_end() is used by drivers that care about * such races when setting a page PageOffline(). */ static DECLARE_RWSEM(page_offline_rwsem); void page_offline_freeze(void) { down_read(&page_offline_rwsem); } void page_offline_thaw(void) { up_read(&page_offline_rwsem); } void page_offline_begin(void) { down_write(&page_offline_rwsem); } EXPORT_SYMBOL(page_offline_begin); void page_offline_end(void) { up_write(&page_offline_rwsem); } EXPORT_SYMBOL(page_offline_end); #ifndef flush_dcache_folio void flush_dcache_folio(struct folio *folio) { long i, nr = folio_nr_pages(folio); for (i = 0; i < nr; i++) flush_dcache_page(folio_page(folio, i)); } EXPORT_SYMBOL(flush_dcache_folio); #endif |
54 54 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 | /* SPDX-License-Identifier: GPL-2.0 */ /* Rewritten and vastly simplified by Rusty Russell for in-kernel * module loader: * Copyright 2002 Rusty Russell <rusty@rustcorp.com.au> IBM Corporation */ #ifndef _LINUX_KALLSYMS_H #define _LINUX_KALLSYMS_H #include <linux/errno.h> #include <linux/buildid.h> #include <linux/kernel.h> #include <linux/stddef.h> #include <linux/mm.h> #include <linux/module.h> #include <asm/sections.h> #define KSYM_NAME_LEN 512 #define KSYM_SYMBOL_LEN (sizeof("%s+%#lx/%#lx [%s %s]") + \ (KSYM_NAME_LEN - 1) + \ 2*(BITS_PER_LONG*3/10) + (MODULE_NAME_LEN - 1) + \ (BUILD_ID_SIZE_MAX * 2) + 1) struct cred; struct module; static inline int is_kernel_text(unsigned long addr) { if (__is_kernel_text(addr)) return 1; return in_gate_area_no_mm(addr); } static inline int is_kernel(unsigned long addr) { if (__is_kernel(addr)) return 1; return in_gate_area_no_mm(addr); } static inline int is_ksym_addr(unsigned long addr) { if (IS_ENABLED(CONFIG_KALLSYMS_ALL)) return is_kernel(addr); return is_kernel_text(addr) || is_kernel_inittext(addr); } static inline void *dereference_symbol_descriptor(void *ptr) { #ifdef CONFIG_HAVE_FUNCTION_DESCRIPTORS struct module *mod; ptr = dereference_kernel_function_descriptor(ptr); if (is_ksym_addr((unsigned long)ptr)) return ptr; preempt_disable(); mod = __module_address((unsigned long)ptr); preempt_enable(); if (mod) ptr = dereference_module_function_descriptor(mod, ptr); #endif return ptr; } /* How and when do we show kallsyms values? */ extern bool kallsyms_show_value(const struct cred *cred); #ifdef CONFIG_KALLSYMS unsigned long kallsyms_sym_address(int idx); int kallsyms_on_each_symbol(int (*fn)(void *, const char *, unsigned long), void *data); int kallsyms_on_each_match_symbol(int (*fn)(void *, unsigned long), const char *name, void *data); /* Lookup the address for a symbol. Returns 0 if not found. */ unsigned long kallsyms_lookup_name(const char *name); extern int kallsyms_lookup_size_offset(unsigned long addr, unsigned long *symbolsize, unsigned long *offset); /* Lookup an address. modname is set to NULL if it's in the kernel. */ const char *kallsyms_lookup(unsigned long addr, unsigned long *symbolsize, unsigned long *offset, char **modname, char *namebuf); /* Look up a kernel symbol and return it in a text buffer. */ extern int sprint_symbol(char *buffer, unsigned long address); extern int sprint_symbol_build_id(char *buffer, unsigned long address); extern int sprint_symbol_no_offset(char *buffer, unsigned long address); extern int sprint_backtrace(char *buffer, unsigned long address); extern int sprint_backtrace_build_id(char *buffer, unsigned long address); int lookup_symbol_name(unsigned long addr, char *symname); #else /* !CONFIG_KALLSYMS */ static inline unsigned long kallsyms_lookup_name(const char *name) { return 0; } static inline int kallsyms_lookup_size_offset(unsigned long addr, unsigned long *symbolsize, unsigned long *offset) { return 0; } static inline const char *kallsyms_lookup(unsigned long addr, unsigned long *symbolsize, unsigned long *offset, char **modname, char *namebuf) { return NULL; } static inline int sprint_symbol(char *buffer, unsigned long addr) { *buffer = '\0'; return 0; } static inline int sprint_symbol_build_id(char *buffer, unsigned long address) { *buffer = '\0'; return 0; } static inline int sprint_symbol_no_offset(char *buffer, unsigned long addr) { *buffer = '\0'; return 0; } static inline int sprint_backtrace(char *buffer, unsigned long addr) { *buffer = '\0'; return 0; } static inline int sprint_backtrace_build_id(char *buffer, unsigned long addr) { *buffer = '\0'; return 0; } static inline int lookup_symbol_name(unsigned long addr, char *symname) { return -ERANGE; } static inline int kallsyms_on_each_symbol(int (*fn)(void *, const char *, unsigned long), void *data) { return -EOPNOTSUPP; } static inline int kallsyms_on_each_match_symbol(int (*fn)(void *, unsigned long), const char *name, void *data) { return -EOPNOTSUPP; } #endif /*CONFIG_KALLSYMS*/ static inline void print_ip_sym(const char *loglvl, unsigned long ip) { printk("%s[<%px>] %pS\n", loglvl, (void *) ip, (void *) ip); } #endif /*_LINUX_KALLSYMS_H*/ |
134 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 | /* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (C) 2023 Western Digital Corporation or its affiliates. */ #ifndef BTRFS_RAID_STRIPE_TREE_H #define BTRFS_RAID_STRIPE_TREE_H #define BTRFS_RST_SUPP_BLOCK_GROUP_MASK (BTRFS_BLOCK_GROUP_DUP | \ BTRFS_BLOCK_GROUP_RAID1_MASK | \ BTRFS_BLOCK_GROUP_RAID0 | \ BTRFS_BLOCK_GROUP_RAID10) struct btrfs_io_context; struct btrfs_io_stripe; struct btrfs_ordered_extent; struct btrfs_trans_handle; int btrfs_delete_raid_extent(struct btrfs_trans_handle *trans, u64 start, u64 length); int btrfs_get_raid_extent_offset(struct btrfs_fs_info *fs_info, u64 logical, u64 *length, u64 map_type, u32 stripe_index, struct btrfs_io_stripe *stripe); int btrfs_insert_raid_extent(struct btrfs_trans_handle *trans, struct btrfs_ordered_extent *ordered_extent); static inline bool btrfs_need_stripe_tree_update(struct btrfs_fs_info *fs_info, u64 map_type) { u64 type = map_type & BTRFS_BLOCK_GROUP_TYPE_MASK; u64 profile = map_type & BTRFS_BLOCK_GROUP_PROFILE_MASK; if (!btrfs_fs_incompat(fs_info, RAID_STRIPE_TREE)) return false; if (type != BTRFS_BLOCK_GROUP_DATA) return false; if (profile & BTRFS_RST_SUPP_BLOCK_GROUP_MASK) return true; return false; } static inline int btrfs_num_raid_stripes(u32 item_size) { return (item_size - offsetof(struct btrfs_stripe_extent, strides)) / sizeof(struct btrfs_raid_stride); } #endif |
56 58 243 241 2 243 2 39 39 1 22 21 37 37 37 6 6 6 4 2 10 10 3 6 2 5 5 4 4 5 51 48 4 4 1 3 6 6 1 1 6 10 2 8 8 8 3 5 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 | // SPDX-License-Identifier: GPL-2.0-or-later /* * Spanning tree protocol; interface code * Linux ethernet bridge * * Authors: * Lennert Buytenhek <buytenh@gnu.org> */ #include <linux/kernel.h> #include <linux/kmod.h> #include <linux/etherdevice.h> #include <linux/rtnetlink.h> #include <net/switchdev.h> #include "br_private.h" #include "br_private_stp.h" /* Port id is composed of priority and port number. * NB: some bits of priority are dropped to * make room for more ports. */ static inline port_id br_make_port_id(__u8 priority, __u16 port_no) { return ((u16)priority << BR_PORT_BITS) | (port_no & ((1<<BR_PORT_BITS)-1)); } #define BR_MAX_PORT_PRIORITY ((u16)~0 >> BR_PORT_BITS) /* called under bridge lock */ void br_init_port(struct net_bridge_port *p) { int err; p->port_id = br_make_port_id(p->priority, p->port_no); br_become_designated_port(p); br_set_state(p, BR_STATE_BLOCKING); p->topology_change_ack = 0; p->config_pending = 0; err = __set_ageing_time(p->dev, p->br->ageing_time); if (err) netdev_err(p->dev, "failed to offload ageing time\n"); } /* NO locks held */ void br_stp_enable_bridge(struct net_bridge *br) { struct net_bridge_port *p; spin_lock_bh(&br->lock); if (br->stp_enabled == BR_KERNEL_STP) mod_timer(&br->hello_timer, jiffies + br->hello_time); mod_delayed_work(system_long_wq, &br->gc_work, HZ / 10); br_config_bpdu_generation(br); list_for_each_entry(p, &br->port_list, list) { if (netif_running(p->dev) && netif_oper_up(p->dev)) br_stp_enable_port(p); } spin_unlock_bh(&br->lock); } /* NO locks held */ void br_stp_disable_bridge(struct net_bridge *br) { struct net_bridge_port *p; spin_lock_bh(&br->lock); list_for_each_entry(p, &br->port_list, list) { if (p->state != BR_STATE_DISABLED) br_stp_disable_port(p); } __br_set_topology_change(br, 0); br->topology_change_detected = 0; spin_unlock_bh(&br->lock); del_timer_sync(&br->hello_timer); del_timer_sync(&br->topology_change_timer); del_timer_sync(&br->tcn_timer); cancel_delayed_work_sync(&br->gc_work); } /* called under bridge lock */ void br_stp_enable_port(struct net_bridge_port *p) { br_init_port(p); br_port_state_selection(p->br); br_ifinfo_notify(RTM_NEWLINK, NULL, p); } /* called under bridge lock */ void br_stp_disable_port(struct net_bridge_port *p) { struct net_bridge *br = p->br; int wasroot; wasroot = br_is_root_bridge(br); br_become_designated_port(p); br_set_state(p, BR_STATE_DISABLED); p->topology_change_ack = 0; p->config_pending = 0; br_ifinfo_notify(RTM_NEWLINK, NULL, p); del_timer(&p->message_age_timer); del_timer(&p->forward_delay_timer); del_timer(&p->hold_timer); if (!rcu_access_pointer(p->backup_port)) br_fdb_delete_by_port(br, p, 0, 0); br_multicast_disable_port(p); br_configuration_update(br); br_port_state_selection(br); if (br_is_root_bridge(br) && !wasroot) br_become_root_bridge(br); } static int br_stp_call_user(struct net_bridge *br, char *arg) { char *argv[] = { BR_STP_PROG, br->dev->name, arg, NULL }; char *envp[] = { NULL }; int rc; /* call userspace STP and report program errors */ rc = call_usermodehelper(BR_STP_PROG, argv, envp, UMH_WAIT_PROC); if (rc > 0) { if (rc & 0xff) br_debug(br, BR_STP_PROG " received signal %d\n", rc & 0x7f); else br_debug(br, BR_STP_PROG " exited with code %d\n", (rc >> 8) & 0xff); } return rc; } static void br_stp_start(struct net_bridge *br) { int err = -ENOENT; if (net_eq(dev_net(br->dev), &init_net)) err = br_stp_call_user(br, "start"); if (err && err != -ENOENT) br_err(br, "failed to start userspace STP (%d)\n", err); spin_lock_bh(&br->lock); if (br->bridge_forward_delay < BR_MIN_FORWARD_DELAY) __br_set_forward_delay(br, BR_MIN_FORWARD_DELAY); else if (br->bridge_forward_delay > BR_MAX_FORWARD_DELAY) __br_set_forward_delay(br, BR_MAX_FORWARD_DELAY); if (!err) { br->stp_enabled = BR_USER_STP; br_debug(br, "userspace STP started\n"); } else { br->stp_enabled = BR_KERNEL_STP; br_debug(br, "using kernel STP\n"); /* To start timers on any ports left in blocking */ if (br->dev->flags & IFF_UP) mod_timer(&br->hello_timer, jiffies + br->hello_time); br_port_state_selection(br); } spin_unlock_bh(&br->lock); } static void br_stp_stop(struct net_bridge *br) { int err; if (br->stp_enabled == BR_USER_STP) { err = br_stp_call_user(br, "stop"); if (err) br_err(br, "failed to stop userspace STP (%d)\n", err); /* To start timers on any ports left in blocking */ spin_lock_bh(&br->lock); br_port_state_selection(br); spin_unlock_bh(&br->lock); } br->stp_enabled = BR_NO_STP; } int br_stp_set_enabled(struct net_bridge *br, unsigned long val, struct netlink_ext_ack *extack) { ASSERT_RTNL(); if (br_mrp_enabled(br)) { NL_SET_ERR_MSG_MOD(extack, "STP can't be enabled if MRP is already enabled"); return -EINVAL; } if (val) { if (br->stp_enabled == BR_NO_STP) br_stp_start(br); } else { if (br->stp_enabled != BR_NO_STP) br_stp_stop(br); } return 0; } /* called under bridge lock */ void br_stp_change_bridge_id(struct net_bridge *br, const unsigned char *addr) { /* should be aligned on 2 bytes for ether_addr_equal() */ unsigned short oldaddr_aligned[ETH_ALEN >> 1]; unsigned char *oldaddr = (unsigned char *)oldaddr_aligned; struct net_bridge_port *p; int wasroot; wasroot = br_is_root_bridge(br); br_fdb_change_mac_address(br, addr); memcpy(oldaddr, br->bridge_id.addr, ETH_ALEN); memcpy(br->bridge_id.addr, addr, ETH_ALEN); eth_hw_addr_set(br->dev, addr); list_for_each_entry(p, &br->port_list, list) { if (ether_addr_equal(p->designated_bridge.addr, oldaddr)) memcpy(p->designated_bridge.addr, addr, ETH_ALEN); if (ether_addr_equal(p->designated_root.addr, oldaddr)) memcpy(p->designated_root.addr, addr, ETH_ALEN); } br_configuration_update(br); br_port_state_selection(br); if (br_is_root_bridge(br) && !wasroot) br_become_root_bridge(br); } /* should be aligned on 2 bytes for ether_addr_equal() */ static const unsigned short br_mac_zero_aligned[ETH_ALEN >> 1]; /* called under bridge lock */ bool br_stp_recalculate_bridge_id(struct net_bridge *br) { const unsigned char *br_mac_zero = (const unsigned char *)br_mac_zero_aligned; const unsigned char *addr = br_mac_zero; struct net_bridge_port *p; /* user has chosen a value so keep it */ if (br->dev->addr_assign_type == NET_ADDR_SET) return false; list_for_each_entry(p, &br->port_list, list) { if (addr == br_mac_zero || memcmp(p->dev->dev_addr, addr, ETH_ALEN) < 0) addr = p->dev->dev_addr; } if (ether_addr_equal(br->bridge_id.addr, addr)) return false; /* no change */ br_stp_change_bridge_id(br, addr); return true; } /* Acquires and releases bridge lock */ void br_stp_set_bridge_priority(struct net_bridge *br, u16 newprio) { struct net_bridge_port *p; int wasroot; spin_lock_bh(&br->lock); wasroot = br_is_root_bridge(br); list_for_each_entry(p, &br->port_list, list) { if (p->state != BR_STATE_DISABLED && br_is_designated_port(p)) { p->designated_bridge.prio[0] = (newprio >> 8) & 0xFF; p->designated_bridge.prio[1] = newprio & 0xFF; } } br->bridge_id.prio[0] = (newprio >> 8) & 0xFF; br->bridge_id.prio[1] = newprio & 0xFF; br_configuration_update(br); br_port_state_selection(br); if (br_is_root_bridge(br) && !wasroot) br_become_root_bridge(br); spin_unlock_bh(&br->lock); } /* called under bridge lock */ int br_stp_set_port_priority(struct net_bridge_port *p, unsigned long newprio) { port_id new_port_id; if (newprio > BR_MAX_PORT_PRIORITY) return -ERANGE; new_port_id = br_make_port_id(newprio, p->port_no); if (br_is_designated_port(p)) p->designated_port = new_port_id; p->port_id = new_port_id; p->priority = newprio; if (!memcmp(&p->br->bridge_id, &p->designated_bridge, 8) && p->port_id < p->designated_port) { br_become_designated_port(p); br_port_state_selection(p->br); } return 0; } /* called under bridge lock */ int br_stp_set_path_cost(struct net_bridge_port *p, unsigned long path_cost) { if (path_cost < BR_MIN_PATH_COST || path_cost > BR_MAX_PATH_COST) return -ERANGE; p->flags |= BR_ADMIN_COST; p->path_cost = path_cost; br_configuration_update(p->br); br_port_state_selection(p->br); return 0; } ssize_t br_show_bridge_id(char *buf, const struct bridge_id *id) { return sprintf(buf, "%.2x%.2x.%.2x%.2x%.2x%.2x%.2x%.2x\n", id->prio[0], id->prio[1], id->addr[0], id->addr[1], id->addr[2], id->addr[3], id->addr[4], id->addr[5]); } |
26971 26596 2548 27308 69 220 1040 7458 14999 8 7 11950 4 545 23 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 | /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_SEQLOCK_H #define __LINUX_SEQLOCK_H /* * seqcount_t / seqlock_t - a reader-writer consistency mechanism with * lockless readers (read-only retry loops), and no writer starvation. * * See Documentation/locking/seqlock.rst * * Copyrights: * - Based on x86_64 vsyscall gettimeofday: Keith Owens, Andrea Arcangeli * - Sequence counters with associated locks, (C) 2020 Linutronix GmbH */ #include <linux/compiler.h> #include <linux/kcsan-checks.h> #include <linux/lockdep.h> #include <linux/mutex.h> #include <linux/preempt.h> #include <linux/seqlock_types.h> #include <linux/spinlock.h> #include <asm/processor.h> /* * The seqlock seqcount_t interface does not prescribe a precise sequence of * read begin/retry/end. For readers, typically there is a call to * read_seqcount_begin() and read_seqcount_retry(), however, there are more * esoteric cases which do not follow this pattern. * * As a consequence, we take the following best-effort approach for raw usage * via seqcount_t under KCSAN: upon beginning a seq-reader critical section, * pessimistically mark the next KCSAN_SEQLOCK_REGION_MAX memory accesses as * atomics; if there is a matching read_seqcount_retry() call, no following * memory operations are considered atomic. Usage of the seqlock_t interface * is not affected. */ #define KCSAN_SEQLOCK_REGION_MAX 1000 static inline void __seqcount_init(seqcount_t *s, const char *name, struct lock_class_key *key) { /* * Make sure we are not reinitializing a held lock: */ lockdep_init_map(&s->dep_map, name, key, 0); s->sequence = 0; } #ifdef CONFIG_DEBUG_LOCK_ALLOC # define SEQCOUNT_DEP_MAP_INIT(lockname) \ .dep_map = { .name = #lockname } /** * seqcount_init() - runtime initializer for seqcount_t * @s: Pointer to the seqcount_t instance */ # define seqcount_init(s) \ do { \ static struct lock_class_key __key; \ __seqcount_init((s), #s, &__key); \ } while (0) static inline void seqcount_lockdep_reader_access(const seqcount_t *s) { seqcount_t *l = (seqcount_t *)s; unsigned long flags; local_irq_save(flags); seqcount_acquire_read(&l->dep_map, 0, 0, _RET_IP_); seqcount_release(&l->dep_map, _RET_IP_); local_irq_restore(flags); } #else # define SEQCOUNT_DEP_MAP_INIT(lockname) # define seqcount_init(s) __seqcount_init(s, NULL, NULL) # define seqcount_lockdep_reader_access(x) #endif /** * SEQCNT_ZERO() - static initializer for seqcount_t * @name: Name of the seqcount_t instance */ #define SEQCNT_ZERO(name) { .sequence = 0, SEQCOUNT_DEP_MAP_INIT(name) } /* * Sequence counters with associated locks (seqcount_LOCKNAME_t) * * A sequence counter which associates the lock used for writer * serialization at initialization time. This enables lockdep to validate * that the write side critical section is properly serialized. * * For associated locks which do not implicitly disable preemption, * preemption protection is enforced in the write side function. * * Lockdep is never used in any for the raw write variants. * * See Documentation/locking/seqlock.rst */ /* * typedef seqcount_LOCKNAME_t - sequence counter with LOCKNAME associated * @seqcount: The real sequence counter * @lock: Pointer to the associated lock * * A plain sequence counter with external writer synchronization by * LOCKNAME @lock. The lock is associated to the sequence counter in the * static initializer or init function. This enables lockdep to validate * that the write side critical section is properly serialized. * * LOCKNAME: raw_spinlock, spinlock, rwlock or mutex */ /* * seqcount_LOCKNAME_init() - runtime initializer for seqcount_LOCKNAME_t * @s: Pointer to the seqcount_LOCKNAME_t instance * @lock: Pointer to the associated lock */ #define seqcount_LOCKNAME_init(s, _lock, lockname) \ do { \ seqcount_##lockname##_t *____s = (s); \ seqcount_init(&____s->seqcount); \ __SEQ_LOCK(____s->lock = (_lock)); \ } while (0) #define seqcount_raw_spinlock_init(s, lock) seqcount_LOCKNAME_init(s, lock, raw_spinlock) #define seqcount_spinlock_init(s, lock) seqcount_LOCKNAME_init(s, lock, spinlock) #define seqcount_rwlock_init(s, lock) seqcount_LOCKNAME_init(s, lock, rwlock) #define seqcount_mutex_init(s, lock) seqcount_LOCKNAME_init(s, lock, mutex) /* * SEQCOUNT_LOCKNAME() - Instantiate seqcount_LOCKNAME_t and helpers * seqprop_LOCKNAME_*() - Property accessors for seqcount_LOCKNAME_t * * @lockname: "LOCKNAME" part of seqcount_LOCKNAME_t * @locktype: LOCKNAME canonical C data type * @preemptible: preemptibility of above locktype * @lockbase: prefix for associated lock/unlock */ #define SEQCOUNT_LOCKNAME(lockname, locktype, preemptible, lockbase) \ static __always_inline seqcount_t * \ __seqprop_##lockname##_ptr(seqcount_##lockname##_t *s) \ { \ return &s->seqcount; \ } \ \ static __always_inline const seqcount_t * \ __seqprop_##lockname##_const_ptr(const seqcount_##lockname##_t *s) \ { \ return &s->seqcount; \ } \ \ static __always_inline unsigned \ __seqprop_##lockname##_sequence(const seqcount_##lockname##_t *s) \ { \ unsigned seq = READ_ONCE(s->seqcount.sequence); \ \ if (!IS_ENABLED(CONFIG_PREEMPT_RT)) \ return seq; \ \ if (preemptible && unlikely(seq & 1)) { \ __SEQ_LOCK(lockbase##_lock(s->lock)); \ __SEQ_LOCK(lockbase##_unlock(s->lock)); \ \ /* \ * Re-read the sequence counter since the (possibly \ * preempted) writer made progress. \ */ \ seq = READ_ONCE(s->seqcount.sequence); \ } \ \ return seq; \ } \ \ static __always_inline bool \ __seqprop_##lockname##_preemptible(const seqcount_##lockname##_t *s) \ { \ if (!IS_ENABLED(CONFIG_PREEMPT_RT)) \ return preemptible; \ \ /* PREEMPT_RT relies on the above LOCK+UNLOCK */ \ return false; \ } \ \ static __always_inline void \ __seqprop_##lockname##_assert(const seqcount_##lockname##_t *s) \ { \ __SEQ_LOCK(lockdep_assert_held(s->lock)); \ } /* * __seqprop() for seqcount_t */ static inline seqcount_t *__seqprop_ptr(seqcount_t *s) { return s; } static inline const seqcount_t *__seqprop_const_ptr(const seqcount_t *s) { return s; } static inline unsigned __seqprop_sequence(const seqcount_t *s) { return READ_ONCE(s->sequence); } static inline bool __seqprop_preemptible(const seqcount_t *s) { return false; } static inline void __seqprop_assert(const seqcount_t *s) { lockdep_assert_preemption_disabled(); } #define __SEQ_RT IS_ENABLED(CONFIG_PREEMPT_RT) SEQCOUNT_LOCKNAME(raw_spinlock, raw_spinlock_t, false, raw_spin) SEQCOUNT_LOCKNAME(spinlock, spinlock_t, __SEQ_RT, spin) SEQCOUNT_LOCKNAME(rwlock, rwlock_t, __SEQ_RT, read) SEQCOUNT_LOCKNAME(mutex, struct mutex, true, mutex) #undef SEQCOUNT_LOCKNAME /* * SEQCNT_LOCKNAME_ZERO - static initializer for seqcount_LOCKNAME_t * @name: Name of the seqcount_LOCKNAME_t instance * @lock: Pointer to the associated LOCKNAME */ #define SEQCOUNT_LOCKNAME_ZERO(seq_name, assoc_lock) { \ .seqcount = SEQCNT_ZERO(seq_name.seqcount), \ __SEQ_LOCK(.lock = (assoc_lock)) \ } #define SEQCNT_RAW_SPINLOCK_ZERO(name, lock) SEQCOUNT_LOCKNAME_ZERO(name, lock) #define SEQCNT_SPINLOCK_ZERO(name, lock) SEQCOUNT_LOCKNAME_ZERO(name, lock) #define SEQCNT_RWLOCK_ZERO(name, lock) SEQCOUNT_LOCKNAME_ZERO(name, lock) #define SEQCNT_MUTEX_ZERO(name, lock) SEQCOUNT_LOCKNAME_ZERO(name, lock) #define SEQCNT_WW_MUTEX_ZERO(name, lock) SEQCOUNT_LOCKNAME_ZERO(name, lock) #define __seqprop_case(s, lockname, prop) \ seqcount_##lockname##_t: __seqprop_##lockname##_##prop #define __seqprop(s, prop) _Generic(*(s), \ seqcount_t: __seqprop_##prop, \ __seqprop_case((s), raw_spinlock, prop), \ __seqprop_case((s), spinlock, prop), \ __seqprop_case((s), rwlock, prop), \ __seqprop_case((s), mutex, prop)) #define seqprop_ptr(s) __seqprop(s, ptr)(s) #define seqprop_const_ptr(s) __seqprop(s, const_ptr)(s) #define seqprop_sequence(s) __seqprop(s, sequence)(s) #define seqprop_preemptible(s) __seqprop(s, preemptible)(s) #define seqprop_assert(s) __seqprop(s, assert)(s) /** * __read_seqcount_begin() - begin a seqcount_t read section w/o barrier * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants * * __read_seqcount_begin is like read_seqcount_begin, but has no smp_rmb() * barrier. Callers should ensure that smp_rmb() or equivalent ordering is * provided before actually loading any of the variables that are to be * protected in this critical section. * * Use carefully, only in critical code, and comment how the barrier is * provided. * * Return: count to be passed to read_seqcount_retry() */ #define __read_seqcount_begin(s) \ ({ \ unsigned __seq; \ \ while ((__seq = seqprop_sequence(s)) & 1) \ cpu_relax(); \ \ kcsan_atomic_next(KCSAN_SEQLOCK_REGION_MAX); \ __seq; \ }) /** * raw_read_seqcount_begin() - begin a seqcount_t read section w/o lockdep * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants * * Return: count to be passed to read_seqcount_retry() */ #define raw_read_seqcount_begin(s) \ ({ \ unsigned _seq = __read_seqcount_begin(s); \ \ smp_rmb(); \ _seq; \ }) /** * read_seqcount_begin() - begin a seqcount_t read critical section * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants * * Return: count to be passed to read_seqcount_retry() */ #define read_seqcount_begin(s) \ ({ \ seqcount_lockdep_reader_access(seqprop_const_ptr(s)); \ raw_read_seqcount_begin(s); \ }) /** * raw_read_seqcount() - read the raw seqcount_t counter value * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants * * raw_read_seqcount opens a read critical section of the given * seqcount_t, without any lockdep checking, and without checking or * masking the sequence counter LSB. Calling code is responsible for * handling that. * * Return: count to be passed to read_seqcount_retry() */ #define raw_read_seqcount(s) \ ({ \ unsigned __seq = seqprop_sequence(s); \ \ smp_rmb(); \ kcsan_atomic_next(KCSAN_SEQLOCK_REGION_MAX); \ __seq; \ }) /** * raw_seqcount_begin() - begin a seqcount_t read critical section w/o * lockdep and w/o counter stabilization * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants * * raw_seqcount_begin opens a read critical section of the given * seqcount_t. Unlike read_seqcount_begin(), this function will not wait * for the count to stabilize. If a writer is active when it begins, it * will fail the read_seqcount_retry() at the end of the read critical * section instead of stabilizing at the beginning of it. * * Use this only in special kernel hot paths where the read section is * small and has a high probability of success through other external * means. It will save a single branching instruction. * * Return: count to be passed to read_seqcount_retry() */ #define raw_seqcount_begin(s) \ ({ \ /* \ * If the counter is odd, let read_seqcount_retry() fail \ * by decrementing the counter. \ */ \ raw_read_seqcount(s) & ~1; \ }) /** * __read_seqcount_retry() - end a seqcount_t read section w/o barrier * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants * @start: count, from read_seqcount_begin() * * __read_seqcount_retry is like read_seqcount_retry, but has no smp_rmb() * barrier. Callers should ensure that smp_rmb() or equivalent ordering is * provided before actually loading any of the variables that are to be * protected in this critical section. * * Use carefully, only in critical code, and comment how the barrier is * provided. * * Return: true if a read section retry is required, else false */ #define __read_seqcount_retry(s, start) \ do___read_seqcount_retry(seqprop_const_ptr(s), start) static inline int do___read_seqcount_retry(const seqcount_t *s, unsigned start) { kcsan_atomic_next(0); return unlikely(READ_ONCE(s->sequence) != start); } /** * read_seqcount_retry() - end a seqcount_t read critical section * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants * @start: count, from read_seqcount_begin() * * read_seqcount_retry closes the read critical section of given * seqcount_t. If the critical section was invalid, it must be ignored * (and typically retried). * * Return: true if a read section retry is required, else false */ #define read_seqcount_retry(s, start) \ do_read_seqcount_retry(seqprop_const_ptr(s), start) static inline int do_read_seqcount_retry(const seqcount_t *s, unsigned start) { smp_rmb(); return do___read_seqcount_retry(s, start); } /** * raw_write_seqcount_begin() - start a seqcount_t write section w/o lockdep * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants * * Context: check write_seqcount_begin() */ #define raw_write_seqcount_begin(s) \ do { \ if (seqprop_preemptible(s)) \ preempt_disable(); \ \ do_raw_write_seqcount_begin(seqprop_ptr(s)); \ } while (0) static inline void do_raw_write_seqcount_begin(seqcount_t *s) { kcsan_nestable_atomic_begin(); s->sequence++; smp_wmb(); } /** * raw_write_seqcount_end() - end a seqcount_t write section w/o lockdep * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants * * Context: check write_seqcount_end() */ #define raw_write_seqcount_end(s) \ do { \ do_raw_write_seqcount_end(seqprop_ptr(s)); \ \ if (seqprop_preemptible(s)) \ preempt_enable(); \ } while (0) static inline void do_raw_write_seqcount_end(seqcount_t *s) { smp_wmb(); s->sequence++; kcsan_nestable_atomic_end(); } /** * write_seqcount_begin_nested() - start a seqcount_t write section with * custom lockdep nesting level * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants * @subclass: lockdep nesting level * * See Documentation/locking/lockdep-design.rst * Context: check write_seqcount_begin() */ #define write_seqcount_begin_nested(s, subclass) \ do { \ seqprop_assert(s); \ \ if (seqprop_preemptible(s)) \ preempt_disable(); \ \ do_write_seqcount_begin_nested(seqprop_ptr(s), subclass); \ } while (0) static inline void do_write_seqcount_begin_nested(seqcount_t *s, int subclass) { seqcount_acquire(&s->dep_map, subclass, 0, _RET_IP_); do_raw_write_seqcount_begin(s); } /** * write_seqcount_begin() - start a seqcount_t write side critical section * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants * * Context: sequence counter write side sections must be serialized and * non-preemptible. Preemption will be automatically disabled if and * only if the seqcount write serialization lock is associated, and * preemptible. If readers can be invoked from hardirq or softirq * context, interrupts or bottom halves must be respectively disabled. */ #define write_seqcount_begin(s) \ do { \ seqprop_assert(s); \ \ if (seqprop_preemptible(s)) \ preempt_disable(); \ \ do_write_seqcount_begin(seqprop_ptr(s)); \ } while (0) static inline void do_write_seqcount_begin(seqcount_t *s) { do_write_seqcount_begin_nested(s, 0); } /** * write_seqcount_end() - end a seqcount_t write side critical section * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants * * Context: Preemption will be automatically re-enabled if and only if * the seqcount write serialization lock is associated, and preemptible. */ #define write_seqcount_end(s) \ do { \ do_write_seqcount_end(seqprop_ptr(s)); \ \ if (seqprop_preemptible(s)) \ preempt_enable(); \ } while (0) static inline void do_write_seqcount_end(seqcount_t *s) { seqcount_release(&s->dep_map, _RET_IP_); do_raw_write_seqcount_end(s); } /** * raw_write_seqcount_barrier() - do a seqcount_t write barrier * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants * * This can be used to provide an ordering guarantee instead of the usual * consistency guarantee. It is one wmb cheaper, because it can collapse * the two back-to-back wmb()s. * * Note that writes surrounding the barrier should be declared atomic (e.g. * via WRITE_ONCE): a) to ensure the writes become visible to other threads * atomically, avoiding compiler optimizations; b) to document which writes are * meant to propagate to the reader critical section. This is necessary because * neither writes before nor after the barrier are enclosed in a seq-writer * critical section that would ensure readers are aware of ongoing writes:: * * seqcount_t seq; * bool X = true, Y = false; * * void read(void) * { * bool x, y; * * do { * int s = read_seqcount_begin(&seq); * * x = X; y = Y; * * } while (read_seqcount_retry(&seq, s)); * * BUG_ON(!x && !y); * } * * void write(void) * { * WRITE_ONCE(Y, true); * * raw_write_seqcount_barrier(seq); * * WRITE_ONCE(X, false); * } */ #define raw_write_seqcount_barrier(s) \ do_raw_write_seqcount_barrier(seqprop_ptr(s)) static inline void do_raw_write_seqcount_barrier(seqcount_t *s) { kcsan_nestable_atomic_begin(); s->sequence++; smp_wmb(); s->sequence++; kcsan_nestable_atomic_end(); } /** * write_seqcount_invalidate() - invalidate in-progress seqcount_t read * side operations * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants * * After write_seqcount_invalidate, no seqcount_t read side operations * will complete successfully and see data older than this. */ #define write_seqcount_invalidate(s) \ do_write_seqcount_invalidate(seqprop_ptr(s)) static inline void do_write_seqcount_invalidate(seqcount_t *s) { smp_wmb(); kcsan_nestable_atomic_begin(); s->sequence+=2; kcsan_nestable_atomic_end(); } /* * Latch sequence counters (seqcount_latch_t) * * A sequence counter variant where the counter even/odd value is used to * switch between two copies of protected data. This allows the read path, * typically NMIs, to safely interrupt the write side critical section. * * As the write sections are fully preemptible, no special handling for * PREEMPT_RT is needed. */ typedef struct { seqcount_t seqcount; } seqcount_latch_t; /** * SEQCNT_LATCH_ZERO() - static initializer for seqcount_latch_t * @seq_name: Name of the seqcount_latch_t instance */ #define SEQCNT_LATCH_ZERO(seq_name) { \ .seqcount = SEQCNT_ZERO(seq_name.seqcount), \ } /** * seqcount_latch_init() - runtime initializer for seqcount_latch_t * @s: Pointer to the seqcount_latch_t instance */ #define seqcount_latch_init(s) seqcount_init(&(s)->seqcount) /** * raw_read_seqcount_latch() - pick even/odd latch data copy * @s: Pointer to seqcount_latch_t * * See raw_write_seqcount_latch() for details and a full reader/writer * usage example. * * Return: sequence counter raw value. Use the lowest bit as an index for * picking which data copy to read. The full counter must then be checked * with raw_read_seqcount_latch_retry(). */ static __always_inline unsigned raw_read_seqcount_latch(const seqcount_latch_t *s) { /* * Pairs with the first smp_wmb() in raw_write_seqcount_latch(). * Due to the dependent load, a full smp_rmb() is not needed. */ return READ_ONCE(s->seqcount.sequence); } /** * raw_read_seqcount_latch_retry() - end a seqcount_latch_t read section * @s: Pointer to seqcount_latch_t * @start: count, from raw_read_seqcount_latch() * * Return: true if a read section retry is required, else false */ static __always_inline int raw_read_seqcount_latch_retry(const seqcount_latch_t *s, unsigned start) { smp_rmb(); return unlikely(READ_ONCE(s->seqcount.sequence) != start); } /** * raw_write_seqcount_latch() - redirect latch readers to even/odd copy * @s: Pointer to seqcount_latch_t * * The latch technique is a multiversion concurrency control method that allows * queries during non-atomic modifications. If you can guarantee queries never * interrupt the modification -- e.g. the concurrency is strictly between CPUs * -- you most likely do not need this. * * Where the traditional RCU/lockless data structures rely on atomic * modifications to ensure queries observe either the old or the new state the * latch allows the same for non-atomic updates. The trade-off is doubling the * cost of storage; we have to maintain two copies of the entire data * structure. * * Very simply put: we first modify one copy and then the other. This ensures * there is always one copy in a stable state, ready to give us an answer. * * The basic form is a data structure like:: * * struct latch_struct { * seqcount_latch_t seq; * struct data_struct data[2]; * }; * * Where a modification, which is assumed to be externally serialized, does the * following:: * * void latch_modify(struct latch_struct *latch, ...) * { * smp_wmb(); // Ensure that the last data[1] update is visible * latch->seq.sequence++; * smp_wmb(); // Ensure that the seqcount update is visible * * modify(latch->data[0], ...); * * smp_wmb(); // Ensure that the data[0] update is visible * latch->seq.sequence++; * smp_wmb(); // Ensure that the seqcount update is visible * * modify(latch->data[1], ...); * } * * The query will have a form like:: * * struct entry *latch_query(struct latch_struct *latch, ...) * { * struct entry *entry; * unsigned seq, idx; * * do { * seq = raw_read_seqcount_latch(&latch->seq); * * idx = seq & 0x01; * entry = data_query(latch->data[idx], ...); * * // This includes needed smp_rmb() * } while (raw_read_seqcount_latch_retry(&latch->seq, seq)); * * return entry; * } * * So during the modification, queries are first redirected to data[1]. Then we * modify data[0]. When that is complete, we redirect queries back to data[0] * and we can modify data[1]. * * NOTE: * * The non-requirement for atomic modifications does _NOT_ include * the publishing of new entries in the case where data is a dynamic * data structure. * * An iteration might start in data[0] and get suspended long enough * to miss an entire modification sequence, once it resumes it might * observe the new entry. * * NOTE2: * * When data is a dynamic data structure; one should use regular RCU * patterns to manage the lifetimes of the objects within. */ static inline void raw_write_seqcount_latch(seqcount_latch_t *s) { smp_wmb(); /* prior stores before incrementing "sequence" */ s->seqcount.sequence++; smp_wmb(); /* increment "sequence" before following stores */ } #define __SEQLOCK_UNLOCKED(lockname) \ { \ .seqcount = SEQCNT_SPINLOCK_ZERO(lockname, &(lockname).lock), \ .lock = __SPIN_LOCK_UNLOCKED(lockname) \ } /** * seqlock_init() - dynamic initializer for seqlock_t * @sl: Pointer to the seqlock_t instance */ #define seqlock_init(sl) \ do { \ spin_lock_init(&(sl)->lock); \ seqcount_spinlock_init(&(sl)->seqcount, &(sl)->lock); \ } while (0) /** * DEFINE_SEQLOCK(sl) - Define a statically allocated seqlock_t * @sl: Name of the seqlock_t instance */ #define DEFINE_SEQLOCK(sl) \ seqlock_t sl = __SEQLOCK_UNLOCKED(sl) /** * read_seqbegin() - start a seqlock_t read side critical section * @sl: Pointer to seqlock_t * * Return: count, to be passed to read_seqretry() */ static inline unsigned read_seqbegin(const seqlock_t *sl) { unsigned ret = read_seqcount_begin(&sl->seqcount); kcsan_atomic_next(0); /* non-raw usage, assume closing read_seqretry() */ kcsan_flat_atomic_begin(); return ret; } /** * read_seqretry() - end a seqlock_t read side section * @sl: Pointer to seqlock_t * @start: count, from read_seqbegin() * * read_seqretry closes the read side critical section of given seqlock_t. * If the critical section was invalid, it must be ignored (and typically * retried). * * Return: true if a read section retry is required, else false */ static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start) { /* * Assume not nested: read_seqretry() may be called multiple times when * completing read critical section. */ kcsan_flat_atomic_end(); return read_seqcount_retry(&sl->seqcount, start); } /* * For all seqlock_t write side functions, use the internal * do_write_seqcount_begin() instead of generic write_seqcount_begin(). * This way, no redundant lockdep_assert_held() checks are added. */ /** * write_seqlock() - start a seqlock_t write side critical section * @sl: Pointer to seqlock_t * * write_seqlock opens a write side critical section for the given * seqlock_t. It also implicitly acquires the spinlock_t embedded inside * that sequential lock. All seqlock_t write side sections are thus * automatically serialized and non-preemptible. * * Context: if the seqlock_t read section, or other write side critical * sections, can be invoked from hardirq or softirq contexts, use the * _irqsave or _bh variants of this function instead. */ static inline void write_seqlock(seqlock_t *sl) { spin_lock(&sl->lock); do_write_seqcount_begin(&sl->seqcount.seqcount); } /** * write_sequnlock() - end a seqlock_t write side critical section * @sl: Pointer to seqlock_t * * write_sequnlock closes the (serialized and non-preemptible) write side * critical section of given seqlock_t. */ static inline void write_sequnlock(seqlock_t *sl) { do_write_seqcount_end(&sl->seqcount.seqcount); spin_unlock(&sl->lock); } /** * write_seqlock_bh() - start a softirqs-disabled seqlock_t write section * @sl: Pointer to seqlock_t * * _bh variant of write_seqlock(). Use only if the read side section, or * other write side sections, can be invoked from softirq contexts. */ static inline void write_seqlock_bh(seqlock_t *sl) { spin_lock_bh(&sl->lock); do_write_seqcount_begin(&sl->seqcount.seqcount); } /** * write_sequnlock_bh() - end a softirqs-disabled seqlock_t write section * @sl: Pointer to seqlock_t * * write_sequnlock_bh closes the serialized, non-preemptible, and * softirqs-disabled, seqlock_t write side critical section opened with * write_seqlock_bh(). */ static inline void write_sequnlock_bh(seqlock_t *sl) { do_write_seqcount_end(&sl->seqcount.seqcount); spin_unlock_bh(&sl->lock); } /** * write_seqlock_irq() - start a non-interruptible seqlock_t write section * @sl: Pointer to seqlock_t * * _irq variant of write_seqlock(). Use only if the read side section, or * other write sections, can be invoked from hardirq contexts. */ static inline void write_seqlock_irq(seqlock_t *sl) { spin_lock_irq(&sl->lock); do_write_seqcount_begin(&sl->seqcount.seqcount); } /** * write_sequnlock_irq() - end a non-interruptible seqlock_t write section * @sl: Pointer to seqlock_t * * write_sequnlock_irq closes the serialized and non-interruptible * seqlock_t write side section opened with write_seqlock_irq(). */ static inline void write_sequnlock_irq(seqlock_t *sl) { do_write_seqcount_end(&sl->seqcount.seqcount); spin_unlock_irq(&sl->lock); } static inline unsigned long __write_seqlock_irqsave(seqlock_t *sl) { unsigned long flags; spin_lock_irqsave(&sl->lock, flags); do_write_seqcount_begin(&sl->seqcount.seqcount); return flags; } /** * write_seqlock_irqsave() - start a non-interruptible seqlock_t write * section * @lock: Pointer to seqlock_t * @flags: Stack-allocated storage for saving caller's local interrupt * state, to be passed to write_sequnlock_irqrestore(). * * _irqsave variant of write_seqlock(). Use it only if the read side * section, or other write sections, can be invoked from hardirq context. */ #define write_seqlock_irqsave(lock, flags) \ do { flags = __write_seqlock_irqsave(lock); } while (0) /** * write_sequnlock_irqrestore() - end non-interruptible seqlock_t write * section * @sl: Pointer to seqlock_t * @flags: Caller's saved interrupt state, from write_seqlock_irqsave() * * write_sequnlock_irqrestore closes the serialized and non-interruptible * seqlock_t write section previously opened with write_seqlock_irqsave(). */ static inline void write_sequnlock_irqrestore(seqlock_t *sl, unsigned long flags) { do_write_seqcount_end(&sl->seqcount.seqcount); spin_unlock_irqrestore(&sl->lock, flags); } /** * read_seqlock_excl() - begin a seqlock_t locking reader section * @sl: Pointer to seqlock_t * * read_seqlock_excl opens a seqlock_t locking reader critical section. A * locking reader exclusively locks out *both* other writers *and* other * locking readers, but it does not update the embedded sequence number. * * Locking readers act like a normal spin_lock()/spin_unlock(). * * Context: if the seqlock_t write section, *or other read sections*, can * be invoked from hardirq or softirq contexts, use the _irqsave or _bh * variant of this function instead. * * The opened read section must be closed with read_sequnlock_excl(). */ static inline void read_seqlock_excl(seqlock_t *sl) { spin_lock(&sl->lock); } /** * read_sequnlock_excl() - end a seqlock_t locking reader critical section * @sl: Pointer to seqlock_t */ static inline void read_sequnlock_excl(seqlock_t *sl) { spin_unlock(&sl->lock); } /** * read_seqlock_excl_bh() - start a seqlock_t locking reader section with * softirqs disabled * @sl: Pointer to seqlock_t * * _bh variant of read_seqlock_excl(). Use this variant only if the * seqlock_t write side section, *or other read sections*, can be invoked * from softirq contexts. */ static inline void read_seqlock_excl_bh(seqlock_t *sl) { spin_lock_bh(&sl->lock); } /** * read_sequnlock_excl_bh() - stop a seqlock_t softirq-disabled locking * reader section * @sl: Pointer to seqlock_t */ static inline void read_sequnlock_excl_bh(seqlock_t *sl) { spin_unlock_bh(&sl->lock); } /** * read_seqlock_excl_irq() - start a non-interruptible seqlock_t locking * reader section * @sl: Pointer to seqlock_t * * _irq variant of read_seqlock_excl(). Use this only if the seqlock_t * write side section, *or other read sections*, can be invoked from a * hardirq context. */ static inline void read_seqlock_excl_irq(seqlock_t *sl) { spin_lock_irq(&sl->lock); } /** * read_sequnlock_excl_irq() - end an interrupts-disabled seqlock_t * locking reader section * @sl: Pointer to seqlock_t */ static inline void read_sequnlock_excl_irq(seqlock_t *sl) { spin_unlock_irq(&sl->lock); } static inline unsigned long __read_seqlock_excl_irqsave(seqlock_t *sl) { unsigned long flags; spin_lock_irqsave(&sl->lock, flags); return flags; } /** * read_seqlock_excl_irqsave() - start a non-interruptible seqlock_t * locking reader section * @lock: Pointer to seqlock_t * @flags: Stack-allocated storage for saving caller's local interrupt * state, to be passed to read_sequnlock_excl_irqrestore(). * * _irqsave variant of read_seqlock_excl(). Use this only if the seqlock_t * write side section, *or other read sections*, can be invoked from a * hardirq context. */ #define read_seqlock_excl_irqsave(lock, flags) \ do { flags = __read_seqlock_excl_irqsave(lock); } while (0) /** * read_sequnlock_excl_irqrestore() - end non-interruptible seqlock_t * locking reader section * @sl: Pointer to seqlock_t * @flags: Caller saved interrupt state, from read_seqlock_excl_irqsave() */ static inline void read_sequnlock_excl_irqrestore(seqlock_t *sl, unsigned long flags) { spin_unlock_irqrestore(&sl->lock, flags); } /** * read_seqbegin_or_lock() - begin a seqlock_t lockless or locking reader * @lock: Pointer to seqlock_t * @seq : Marker and return parameter. If the passed value is even, the * reader will become a *lockless* seqlock_t reader as in read_seqbegin(). * If the passed value is odd, the reader will become a *locking* reader * as in read_seqlock_excl(). In the first call to this function, the * caller *must* initialize and pass an even value to @seq; this way, a * lockless read can be optimistically tried first. * * read_seqbegin_or_lock is an API designed to optimistically try a normal * lockless seqlock_t read section first. If an odd counter is found, the * lockless read trial has failed, and the next read iteration transforms * itself into a full seqlock_t locking reader. * * This is typically used to avoid seqlock_t lockless readers starvation * (too much retry loops) in the case of a sharp spike in write side * activity. * * Context: if the seqlock_t write section, *or other read sections*, can * be invoked from hardirq or softirq contexts, use the _irqsave or _bh * variant of this function instead. * * Check Documentation/locking/seqlock.rst for template example code. * * Return: the encountered sequence counter value, through the @seq * parameter, which is overloaded as a return parameter. This returned * value must be checked with need_seqretry(). If the read section need to * be retried, this returned value must also be passed as the @seq * parameter of the next read_seqbegin_or_lock() iteration. */ static inline void read_seqbegin_or_lock(seqlock_t *lock, int *seq) { if (!(*seq & 1)) /* Even */ *seq = read_seqbegin(lock); else /* Odd */ read_seqlock_excl(lock); } /** * need_seqretry() - validate seqlock_t "locking or lockless" read section * @lock: Pointer to seqlock_t * @seq: sequence count, from read_seqbegin_or_lock() * * Return: true if a read section retry is required, false otherwise */ static inline int need_seqretry(seqlock_t *lock, int seq) { return !(seq & 1) && read_seqretry(lock, seq); } /** * done_seqretry() - end seqlock_t "locking or lockless" reader section * @lock: Pointer to seqlock_t * @seq: count, from read_seqbegin_or_lock() * * done_seqretry finishes the seqlock_t read side critical section started * with read_seqbegin_or_lock() and validated by need_seqretry(). */ static inline void done_seqretry(seqlock_t *lock, int seq) { if (seq & 1) read_sequnlock_excl(lock); } /** * read_seqbegin_or_lock_irqsave() - begin a seqlock_t lockless reader, or * a non-interruptible locking reader * @lock: Pointer to seqlock_t * @seq: Marker and return parameter. Check read_seqbegin_or_lock(). * * This is the _irqsave variant of read_seqbegin_or_lock(). Use it only if * the seqlock_t write section, *or other read sections*, can be invoked * from hardirq context. * * Note: Interrupts will be disabled only for "locking reader" mode. * * Return: * * 1. The saved local interrupts state in case of a locking reader, to * be passed to done_seqretry_irqrestore(). * * 2. The encountered sequence counter value, returned through @seq * overloaded as a return parameter. Check read_seqbegin_or_lock(). */ static inline unsigned long read_seqbegin_or_lock_irqsave(seqlock_t *lock, int *seq) { unsigned long flags = 0; if (!(*seq & 1)) /* Even */ *seq = read_seqbegin(lock); else /* Odd */ read_seqlock_excl_irqsave(lock, flags); return flags; } /** * done_seqretry_irqrestore() - end a seqlock_t lockless reader, or a * non-interruptible locking reader section * @lock: Pointer to seqlock_t * @seq: Count, from read_seqbegin_or_lock_irqsave() * @flags: Caller's saved local interrupt state in case of a locking * reader, also from read_seqbegin_or_lock_irqsave() * * This is the _irqrestore variant of done_seqretry(). The read section * must've been opened with read_seqbegin_or_lock_irqsave(), and validated * by need_seqretry(). */ static inline void done_seqretry_irqrestore(seqlock_t *lock, int seq, unsigned long flags) { if (seq & 1) read_sequnlock_excl_irqrestore(lock, flags); } #endif /* __LINUX_SEQLOCK_H */ |
1 1 1 1 1 3 1 1 1 1 1 1 1 1 1 1 3 3 3 3 3 1 2 2 1 1 1 1 3 3 3 3 3 1 1 1 2 2 3 1 1 2 3 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 3422 3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434 3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456 3457 3458 3459 3460 3461 3462 3463 3464 3465 3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479 3480 3481 3482 3483 3484 3485 3486 3487 3488 3489 3490 3491 3492 3493 3494 3495 3496 3497 3498 3499 3500 3501 3502 3503 3504 3505 3506 3507 3508 3509 3510 3511 3512 3513 3514 3515 3516 3517 3518 3519 3520 3521 3522 3523 3524 3525 3526 3527 3528 3529 3530 3531 3532 3533 3534 3535 3536 3537 3538 3539 3540 3541 3542 3543 3544 3545 3546 3547 3548 3549 3550 3551 3552 3553 3554 3555 3556 3557 3558 3559 3560 3561 3562 3563 3564 3565 3566 3567 3568 3569 3570 3571 3572 3573 3574 3575 3576 3577 3578 3579 3580 3581 3582 3583 3584 3585 3586 3587 3588 3589 3590 3591 3592 3593 3594 3595 3596 3597 3598 3599 3600 3601 3602 3603 3604 3605 3606 3607 3608 3609 3610 3611 3612 3613 3614 3615 3616 3617 3618 3619 3620 3621 3622 3623 3624 3625 3626 3627 3628 3629 3630 3631 3632 3633 3634 3635 3636 3637 3638 3639 3640 3641 3642 3643 3644 3645 3646 3647 3648 3649 3650 3651 3652 3653 3654 3655 3656 3657 3658 3659 3660 3661 3662 3663 3664 3665 3666 3667 3668 3669 3670 3671 3672 3673 3674 3675 3676 3677 3678 3679 3680 3681 3682 3683 3684 3685 3686 3687 3688 3689 3690 3691 3692 3693 3694 3695 3696 3697 3698 3699 3700 3701 3702 3703 3704 3705 3706 3707 3708 3709 3710 3711 3712 3713 3714 3715 3716 3717 3718 3719 3720 3721 3722 3723 3724 3725 3726 3727 3728 3729 3730 3731 3732 3733 3734 3735 3736 3737 3738 3739 3740 3741 3742 3743 3744 3745 3746 3747 3748 3749 3750 3751 3752 3753 3754 3755 3756 3757 3758 3759 3760 3761 3762 3763 3764 3765 3766 3767 3768 3769 3770 3771 3772 3773 3774 3775 3776 3777 3778 3779 3780 3781 3782 3783 3784 3785 3786 3787 3788 3789 3790 3791 3792 3793 3794 3795 3796 3797 3798 3799 3800 3801 3802 3803 3804 3805 3806 3807 3808 3809 3810 3811 3812 3813 3814 3815 3816 3817 3818 3819 3820 3821 3822 3823 3824 3825 3826 3827 3828 3829 3830 3831 3832 3833 3834 3835 3836 3837 3838 3839 3840 3841 3842 3843 3844 3845 3846 3847 3848 3849 3850 3851 3852 3853 3854 3855 3856 3857 3858 3859 3860 3861 3862 3863 3864 3865 3866 3867 3868 3869 3870 3871 3872 3873 3874 3875 3876 3877 3878 3879 3880 3881 3882 3883 3884 3885 3886 3887 3888 3889 3890 3891 3892 3893 3894 3895 3896 3897 3898 3899 3900 3901 3902 3903 3904 3905 3906 3907 3908 3909 3910 3911 3912 3913 3914 3915 3916 3917 3918 3919 3920 3921 3922 3923 3924 3925 3926 3927 3928 3929 3930 3931 3932 3933 3934 3935 3936 3937 3938 3939 3940 3941 3942 3943 3944 3945 3946 3947 3948 3949 3950 3951 3952 3953 3954 3955 3956 3957 3958 3959 3960 3961 3962 3963 3964 3965 3966 3967 3968 3969 3970 3971 3972 3973 3974 3975 3976 3977 3978 3979 3980 3981 3982 3983 3984 3985 3986 3987 3988 3989 3990 3991 3992 3993 3994 3995 3996 3997 3998 3999 4000 4001 4002 4003 4004 4005 4006 4007 4008 4009 4010 4011 4012 4013 4014 4015 4016 4017 4018 4019 4020 4021 4022 4023 4024 4025 4026 4027 4028 4029 4030 4031 4032 4033 4034 4035 4036 4037 4038 4039 4040 4041 4042 4043 4044 4045 4046 4047 4048 4049 4050 4051 4052 4053 4054 4055 4056 4057 4058 4059 4060 4061 4062 4063 4064 4065 4066 4067 4068 4069 4070 4071 4072 4073 4074 4075 4076 4077 4078 4079 4080 4081 4082 4083 4084 4085 4086 4087 4088 4089 4090 4091 4092 4093 4094 4095 4096 4097 4098 4099 4100 4101 4102 4103 4104 4105 4106 4107 4108 4109 4110 4111 4112 4113 4114 4115 4116 4117 4118 4119 4120 4121 4122 4123 4124 4125 4126 4127 4128 4129 4130 4131 4132 4133 4134 4135 4136 4137 4138 4139 4140 4141 4142 4143 4144 4145 4146 4147 4148 4149 4150 4151 4152 4153 4154 4155 4156 4157 4158 4159 4160 4161 4162 4163 4164 4165 4166 4167 4168 4169 4170 4171 4172 4173 4174 4175 4176 4177 4178 4179 4180 4181 4182 4183 4184 4185 4186 4187 4188 4189 4190 4191 4192 4193 4194 4195 4196 4197 4198 4199 4200 4201 4202 4203 4204 4205 4206 4207 4208 4209 4210 4211 4212 4213 4214 4215 4216 4217 4218 4219 4220 4221 4222 4223 4224 4225 4226 4227 4228 4229 4230 4231 4232 4233 4234 4235 4236 4237 4238 4239 4240 4241 4242 4243 4244 4245 4246 4247 4248 4249 4250 4251 4252 4253 4254 4255 4256 4257 4258 4259 4260 4261 4262 4263 4264 4265 4266 4267 4268 4269 4270 4271 4272 4273 4274 4275 4276 4277 4278 4279 4280 4281 4282 4283 4284 4285 4286 4287 4288 4289 4290 4291 4292 4293 4294 4295 4296 4297 4298 4299 4300 4301 4302 4303 4304 4305 4306 4307 4308 4309 4310 4311 4312 4313 4314 4315 4316 4317 4318 4319 4320 4321 4322 4323 4324 4325 4326 4327 4328 4329 4330 4331 4332 4333 4334 4335 4336 4337 4338 4339 4340 4341 4342 4343 4344 4345 4346 4347 4348 4349 4350 4351 4352 4353 4354 4355 4356 4357 4358 4359 4360 4361 4362 4363 4364 4365 4366 4367 4368 4369 4370 4371 4372 4373 4374 4375 4376 4377 4378 4379 4380 4381 4382 4383 4384 4385 4386 4387 4388 4389 4390 4391 4392 4393 4394 4395 4396 4397 4398 4399 4400 4401 4402 4403 4404 4405 4406 4407 4408 4409 4410 4411 4412 4413 4414 4415 4416 4417 4418 4419 4420 4421 4422 4423 4424 4425 4426 4427 4428 4429 4430 4431 4432 4433 4434 4435 4436 4437 4438 4439 4440 4441 4442 4443 4444 4445 4446 4447 4448 4449 4450 4451 4452 4453 4454 4455 4456 4457 4458 4459 4460 4461 4462 4463 4464 4465 4466 4467 4468 4469 4470 4471 4472 4473 4474 4475 4476 4477 4478 4479 4480 4481 4482 4483 4484 4485 4486 4487 4488 4489 4490 4491 4492 4493 4494 4495 4496 4497 4498 4499 4500 4501 4502 4503 4504 4505 4506 4507 4508 4509 4510 4511 4512 4513 4514 4515 4516 4517 4518 4519 4520 4521 4522 4523 4524 4525 4526 4527 4528 4529 4530 4531 4532 4533 4534 4535 4536 4537 4538 4539 4540 4541 4542 4543 4544 4545 4546 4547 4548 4549 4550 4551 4552 4553 4554 4555 4556 4557 4558 4559 4560 4561 4562 4563 4564 4565 4566 4567 4568 4569 4570 4571 4572 4573 4574 4575 4576 4577 4578 4579 4580 4581 4582 4583 4584 4585 4586 4587 4588 4589 4590 4591 4592 4593 4594 4595 4596 4597 4598 4599 4600 4601 4602 4603 4604 4605 4606 4607 4608 4609 4610 4611 4612 4613 4614 4615 4616 4617 4618 4619 4620 4621 4622 4623 4624 4625 4626 4627 4628 4629 4630 4631 4632 4633 4634 4635 4636 4637 4638 4639 4640 4641 4642 4643 4644 4645 4646 4647 4648 4649 4650 4651 4652 4653 4654 4655 4656 4657 4658 4659 4660 4661 4662 4663 4664 4665 4666 4667 4668 4669 4670 4671 4672 4673 4674 4675 4676 4677 4678 4679 4680 4681 4682 4683 4684 4685 4686 4687 4688 4689 4690 4691 4692 4693 4694 4695 4696 4697 4698 4699 4700 4701 4702 4703 4704 4705 4706 4707 4708 4709 4710 4711 4712 4713 4714 4715 4716 4717 4718 4719 4720 4721 4722 4723 4724 4725 4726 4727 4728 4729 4730 4731 4732 4733 4734 4735 4736 4737 4738 4739 4740 4741 4742 4743 4744 4745 4746 4747 4748 4749 4750 4751 4752 4753 4754 4755 4756 4757 4758 4759 4760 4761 4762 4763 4764 4765 4766 4767 4768 4769 4770 4771 4772 4773 4774 4775 4776 4777 4778 4779 4780 4781 4782 4783 4784 4785 4786 4787 4788 4789 4790 4791 4792 4793 4794 4795 4796 4797 4798 4799 4800 4801 4802 4803 4804 4805 4806 4807 4808 4809 4810 4811 4812 4813 4814 4815 4816 4817 4818 4819 4820 4821 4822 4823 4824 4825 4826 4827 4828 4829 4830 4831 4832 4833 4834 4835 4836 4837 4838 4839 4840 4841 4842 4843 4844 4845 4846 4847 4848 4849 4850 4851 4852 4853 4854 4855 4856 4857 4858 4859 4860 4861 4862 4863 4864 4865 4866 4867 4868 4869 4870 4871 4872 4873 4874 4875 4876 4877 4878 4879 4880 4881 4882 4883 4884 4885 4886 4887 4888 4889 4890 4891 4892 4893 4894 4895 4896 4897 4898 4899 4900 4901 4902 4903 4904 4905 4906 4907 4908 4909 4910 4911 4912 4913 4914 4915 4916 4917 4918 4919 4920 4921 4922 4923 4924 4925 4926 4927 4928 4929 4930 4931 4932 4933 4934 4935 4936 4937 4938 4939 4940 4941 4942 4943 4944 4945 4946 4947 4948 4949 4950 4951 4952 4953 4954 4955 4956 4957 4958 4959 4960 4961 4962 4963 4964 4965 4966 4967 4968 4969 4970 4971 4972 4973 4974 4975 4976 4977 4978 4979 4980 4981 4982 4983 4984 4985 4986 4987 4988 4989 4990 4991 4992 4993 4994 4995 4996 4997 4998 4999 5000 5001 5002 5003 5004 5005 5006 5007 5008 5009 5010 5011 5012 5013 5014 5015 5016 5017 5018 5019 5020 5021 5022 5023 5024 5025 5026 5027 5028 5029 5030 5031 5032 5033 5034 5035 5036 5037 5038 5039 5040 5041 5042 5043 5044 5045 5046 5047 5048 5049 5050 5051 5052 5053 5054 5055 5056 5057 5058 5059 5060 5061 5062 5063 5064 5065 5066 5067 5068 5069 5070 5071 5072 5073 5074 5075 5076 5077 5078 5079 5080 5081 5082 5083 5084 5085 5086 5087 5088 5089 5090 5091 5092 5093 5094 5095 5096 5097 5098 5099 5100 5101 5102 5103 5104 5105 5106 5107 5108 5109 5110 5111 5112 5113 5114 5115 5116 5117 5118 5119 5120 5121 5122 5123 5124 5125 5126 5127 5128 5129 5130 5131 5132 5133 5134 5135 5136 5137 5138 5139 5140 5141 5142 5143 5144 5145 5146 5147 5148 5149 5150 5151 5152 5153 5154 5155 5156 5157 5158 5159 5160 5161 5162 5163 5164 5165 5166 5167 5168 5169 5170 5171 5172 5173 5174 5175 5176 5177 5178 5179 5180 5181 5182 5183 5184 5185 5186 5187 5188 5189 5190 5191 5192 5193 5194 5195 5196 5197 5198 5199 5200 5201 5202 5203 5204 5205 5206 5207 5208 5209 5210 5211 5212 5213 5214 5215 5216 5217 5218 5219 5220 5221 5222 5223 5224 5225 5226 5227 5228 5229 5230 5231 5232 5233 5234 5235 5236 5237 5238 5239 5240 5241 5242 5243 5244 5245 5246 5247 5248 5249 5250 5251 5252 5253 5254 5255 5256 5257 5258 5259 5260 5261 5262 5263 5264 5265 5266 5267 5268 5269 5270 5271 5272 5273 5274 5275 5276 5277 5278 5279 5280 5281 5282 5283 5284 5285 5286 5287 5288 5289 5290 5291 5292 5293 5294 5295 5296 5297 5298 5299 5300 5301 5302 5303 5304 5305 5306 5307 5308 5309 5310 5311 5312 5313 5314 5315 5316 5317 5318 5319 5320 5321 5322 5323 5324 5325 5326 5327 5328 5329 5330 5331 5332 5333 5334 5335 5336 5337 5338 5339 5340 5341 5342 5343 5344 5345 5346 5347 5348 5349 5350 5351 5352 5353 5354 5355 5356 5357 5358 5359 5360 5361 5362 5363 5364 5365 5366 5367 5368 5369 5370 5371 5372 5373 5374 5375 5376 5377 5378 5379 5380 5381 5382 5383 5384 5385 5386 5387 5388 5389 5390 5391 5392 5393 5394 5395 5396 5397 5398 5399 5400 5401 5402 5403 5404 5405 5406 5407 5408 5409 5410 5411 5412 5413 5414 5415 5416 5417 5418 5419 5420 5421 5422 5423 5424 5425 5426 5427 5428 5429 5430 5431 5432 5433 5434 5435 5436 5437 5438 5439 5440 5441 5442 5443 5444 5445 5446 5447 5448 5449 5450 5451 5452 5453 5454 5455 5456 5457 5458 5459 5460 5461 5462 5463 5464 5465 5466 5467 5468 5469 5470 5471 5472 5473 5474 5475 5476 5477 5478 5479 5480 5481 5482 5483 5484 5485 5486 5487 5488 5489 5490 5491 5492 5493 5494 5495 5496 5497 5498 5499 5500 5501 5502 5503 5504 5505 5506 5507 5508 5509 5510 5511 5512 5513 5514 5515 5516 5517 5518 5519 5520 5521 5522 5523 5524 5525 5526 5527 5528 5529 5530 5531 5532 5533 5534 5535 5536 5537 5538 5539 5540 5541 5542 5543 5544 5545 5546 5547 5548 5549 5550 5551 5552 5553 5554 5555 5556 5557 5558 5559 5560 5561 5562 5563 5564 5565 5566 5567 5568 5569 5570 5571 5572 5573 5574 5575 5576 5577 5578 5579 5580 5581 5582 5583 5584 5585 5586 5587 5588 5589 5590 5591 5592 5593 5594 5595 5596 5597 5598 5599 5600 5601 5602 5603 5604 5605 5606 5607 5608 5609 5610 5611 5612 5613 5614 5615 5616 5617 5618 5619 5620 5621 5622 5623 5624 5625 5626 5627 5628 5629 5630 5631 5632 5633 5634 5635 5636 5637 5638 5639 5640 5641 5642 5643 5644 5645 5646 5647 5648 5649 5650 5651 5652 5653 5654 5655 5656 5657 5658 5659 5660 5661 5662 5663 5664 5665 5666 5667 5668 5669 5670 5671 5672 5673 5674 5675 5676 5677 5678 5679 5680 5681 5682 5683 5684 5685 5686 5687 5688 5689 5690 5691 5692 5693 5694 5695 5696 5697 5698 5699 5700 5701 5702 5703 5704 5705 5706 5707 5708 5709 5710 5711 5712 5713 5714 5715 5716 5717 5718 5719 5720 5721 5722 5723 5724 5725 5726 5727 5728 5729 5730 5731 5732 5733 5734 5735 5736 5737 5738 5739 5740 5741 5742 5743 5744 5745 5746 5747 5748 5749 5750 5751 5752 5753 5754 5755 5756 5757 5758 5759 5760 5761 5762 5763 5764 5765 5766 5767 5768 5769 5770 5771 5772 5773 5774 5775 5776 5777 5778 5779 5780 5781 5782 5783 5784 5785 5786 5787 5788 5789 5790 5791 5792 5793 5794 5795 5796 5797 5798 5799 5800 5801 5802 5803 5804 5805 5806 5807 5808 5809 5810 5811 5812 5813 5814 5815 5816 5817 5818 5819 5820 5821 5822 5823 5824 5825 5826 5827 5828 5829 5830 5831 5832 5833 5834 5835 5836 5837 5838 5839 5840 5841 5842 5843 5844 5845 5846 5847 5848 5849 5850 5851 5852 5853 5854 5855 5856 5857 5858 5859 5860 5861 5862 5863 5864 5865 5866 5867 5868 5869 5870 5871 5872 5873 5874 5875 5876 5877 5878 5879 5880 5881 5882 5883 5884 5885 5886 5887 5888 5889 5890 5891 5892 5893 5894 5895 5896 5897 5898 5899 5900 5901 5902 5903 5904 5905 5906 5907 5908 5909 5910 5911 5912 5913 5914 5915 5916 5917 5918 5919 5920 5921 5922 5923 5924 5925 5926 5927 5928 5929 5930 5931 5932 5933 5934 5935 5936 5937 5938 5939 5940 5941 5942 5943 5944 5945 5946 5947 5948 5949 5950 5951 5952 5953 5954 5955 5956 5957 5958 5959 5960 5961 5962 5963 5964 5965 5966 5967 5968 5969 5970 5971 5972 5973 5974 5975 5976 5977 5978 5979 5980 5981 5982 5983 5984 5985 5986 5987 5988 5989 5990 5991 5992 5993 5994 5995 5996 5997 5998 5999 6000 6001 6002 6003 6004 6005 6006 6007 6008 6009 6010 6011 6012 6013 6014 6015 6016 6017 6018 6019 6020 6021 6022 6023 6024 6025 6026 6027 6028 6029 6030 6031 6032 6033 6034 6035 6036 6037 6038 6039 6040 6041 6042 6043 6044 6045 6046 6047 6048 6049 6050 6051 6052 6053 6054 6055 6056 6057 6058 6059 6060 6061 6062 6063 6064 6065 6066 6067 6068 6069 6070 6071 6072 6073 6074 6075 6076 6077 6078 6079 6080 6081 6082 6083 6084 6085 6086 6087 6088 6089 6090 6091 6092 6093 6094 6095 6096 6097 6098 6099 6100 6101 6102 6103 6104 6105 6106 6107 6108 6109 6110 6111 6112 6113 6114 6115 6116 6117 6118 6119 6120 6121 6122 6123 6124 6125 6126 6127 6128 6129 6130 6131 6132 6133 6134 6135 6136 6137 6138 6139 6140 6141 6142 6143 6144 6145 6146 6147 6148 6149 6150 6151 6152 6153 6154 6155 6156 6157 6158 6159 6160 6161 6162 6163 6164 6165 6166 6167 6168 6169 6170 6171 6172 6173 6174 6175 6176 6177 6178 6179 6180 6181 6182 6183 6184 6185 6186 6187 6188 6189 6190 6191 6192 6193 6194 6195 6196 6197 6198 6199 6200 6201 6202 6203 6204 6205 6206 6207 6208 6209 6210 6211 6212 6213 6214 6215 6216 6217 6218 6219 6220 6221 6222 6223 6224 6225 6226 6227 6228 6229 6230 6231 6232 6233 6234 6235 6236 6237 6238 6239 6240 6241 6242 6243 6244 6245 6246 6247 6248 6249 6250 6251 6252 6253 6254 6255 6256 6257 6258 6259 6260 6261 6262 6263 6264 6265 6266 6267 6268 6269 6270 6271 6272 6273 6274 6275 6276 6277 6278 6279 6280 6281 6282 6283 6284 6285 6286 6287 6288 6289 6290 6291 6292 6293 6294 6295 6296 6297 6298 6299 6300 6301 6302 6303 6304 6305 6306 6307 6308 6309 6310 6311 6312 6313 6314 6315 6316 6317 6318 6319 6320 6321 6322 6323 6324 6325 6326 6327 6328 6329 6330 6331 6332 6333 6334 6335 6336 6337 6338 6339 6340 6341 6342 6343 6344 6345 6346 6347 6348 6349 6350 6351 6352 6353 6354 6355 6356 6357 6358 6359 6360 6361 6362 6363 6364 6365 6366 6367 6368 6369 6370 6371 6372 6373 6374 6375 6376 6377 6378 6379 6380 6381 6382 6383 6384 6385 6386 6387 6388 6389 6390 6391 6392 6393 6394 6395 6396 6397 6398 6399 6400 6401 6402 6403 6404 6405 6406 6407 6408 6409 6410 6411 6412 6413 6414 6415 6416 6417 6418 6419 6420 6421 6422 6423 6424 6425 6426 6427 6428 6429 6430 6431 6432 6433 6434 6435 6436 6437 6438 6439 6440 6441 6442 6443 6444 6445 6446 6447 6448 6449 6450 6451 6452 6453 6454 6455 6456 6457 6458 6459 6460 6461 6462 6463 6464 6465 6466 6467 6468 6469 6470 6471 6472 6473 6474 6475 6476 6477 6478 6479 6480 6481 6482 6483 6484 6485 6486 6487 6488 6489 6490 6491 6492 6493 6494 6495 6496 6497 6498 6499 6500 6501 6502 6503 6504 6505 6506 6507 6508 6509 6510 6511 6512 6513 6514 6515 6516 6517 6518 6519 6520 6521 6522 6523 6524 6525 6526 6527 6528 6529 6530 6531 6532 6533 6534 6535 6536 6537 6538 6539 6540 6541 6542 6543 6544 6545 6546 6547 6548 6549 6550 6551 6552 6553 6554 6555 6556 6557 6558 6559 6560 6561 6562 6563 6564 6565 6566 6567 6568 6569 6570 6571 6572 6573 6574 6575 6576 6577 6578 6579 6580 6581 6582 6583 6584 6585 6586 6587 6588 6589 6590 6591 6592 6593 6594 6595 6596 6597 6598 6599 6600 6601 6602 6603 6604 6605 6606 6607 6608 6609 6610 6611 6612 6613 6614 6615 6616 6617 6618 6619 6620 6621 6622 6623 6624 6625 6626 6627 6628 6629 6630 6631 6632 6633 6634 6635 6636 6637 6638 6639 6640 6641 6642 6643 6644 6645 6646 6647 6648 6649 6650 6651 6652 6653 6654 6655 6656 6657 6658 6659 6660 6661 6662 6663 6664 6665 6666 6667 6668 6669 6670 6671 6672 6673 6674 6675 6676 6677 6678 6679 6680 6681 6682 6683 6684 6685 6686 6687 6688 6689 6690 6691 6692 6693 6694 6695 6696 6697 6698 6699 6700 6701 6702 6703 6704 6705 6706 6707 6708 6709 6710 6711 6712 6713 6714 6715 6716 6717 6718 6719 6720 6721 6722 6723 6724 6725 6726 6727 6728 6729 6730 6731 6732 6733 6734 6735 6736 6737 6738 6739 6740 6741 6742 6743 6744 6745 6746 6747 6748 6749 6750 6751 6752 6753 6754 6755 6756 6757 6758 6759 6760 6761 6762 6763 6764 6765 6766 6767 6768 6769 6770 6771 6772 6773 6774 6775 6776 6777 6778 6779 6780 6781 6782 6783 6784 6785 6786 6787 6788 6789 6790 6791 6792 6793 6794 6795 6796 6797 6798 6799 6800 6801 6802 6803 6804 6805 6806 6807 6808 6809 6810 6811 6812 6813 6814 6815 6816 6817 6818 6819 6820 6821 6822 6823 6824 6825 6826 6827 6828 6829 6830 6831 6832 6833 6834 6835 6836 6837 6838 6839 6840 6841 6842 6843 6844 6845 6846 6847 6848 6849 6850 6851 6852 6853 6854 6855 6856 6857 6858 6859 6860 6861 6862 6863 6864 6865 6866 6867 6868 6869 6870 6871 6872 6873 6874 6875 6876 6877 6878 6879 6880 6881 6882 6883 6884 6885 6886 6887 6888 6889 6890 6891 6892 6893 6894 6895 6896 6897 6898 6899 6900 6901 6902 6903 6904 6905 6906 6907 6908 6909 6910 6911 6912 6913 6914 6915 6916 6917 6918 6919 6920 6921 6922 6923 6924 6925 6926 6927 6928 6929 6930 6931 6932 6933 6934 6935 6936 6937 6938 6939 6940 6941 6942 6943 6944 6945 6946 6947 6948 6949 6950 6951 6952 6953 6954 6955 6956 6957 6958 6959 6960 6961 6962 6963 6964 6965 6966 6967 6968 6969 6970 6971 6972 6973 6974 6975 6976 6977 6978 6979 6980 6981 6982 6983 6984 6985 6986 6987 6988 6989 6990 6991 6992 6993 6994 6995 6996 6997 6998 6999 7000 7001 7002 7003 7004 7005 7006 7007 7008 7009 7010 7011 7012 7013 7014 7015 7016 7017 7018 7019 7020 7021 7022 7023 7024 7025 7026 7027 7028 7029 7030 7031 7032 7033 7034 7035 7036 7037 7038 7039 7040 7041 7042 7043 7044 7045 7046 7047 7048 7049 7050 7051 7052 7053 7054 7055 7056 7057 7058 7059 7060 7061 7062 7063 7064 7065 7066 7067 7068 7069 7070 7071 7072 7073 7074 7075 7076 7077 7078 7079 7080 7081 7082 7083 7084 7085 7086 7087 7088 7089 7090 7091 7092 7093 7094 7095 7096 7097 7098 7099 7100 7101 7102 7103 7104 7105 7106 7107 7108 7109 7110 7111 7112 7113 7114 7115 7116 7117 7118 7119 7120 7121 7122 7123 7124 7125 7126 7127 7128 7129 7130 7131 7132 7133 7134 7135 7136 7137 7138 7139 7140 7141 7142 7143 7144 7145 7146 7147 7148 7149 7150 7151 7152 7153 7154 7155 7156 7157 7158 7159 7160 7161 7162 7163 7164 7165 7166 7167 7168 7169 7170 7171 7172 7173 7174 7175 7176 7177 7178 7179 7180 7181 7182 7183 7184 7185 7186 7187 7188 7189 7190 7191 7192 7193 7194 7195 7196 7197 7198 7199 7200 7201 7202 7203 7204 7205 7206 7207 7208 7209 7210 7211 7212 7213 7214 7215 7216 7217 7218 7219 7220 7221 7222 7223 7224 7225 7226 7227 7228 7229 7230 7231 7232 7233 7234 7235 7236 7237 7238 7239 7240 7241 7242 7243 7244 7245 7246 7247 7248 7249 7250 7251 7252 7253 7254 7255 7256 7257 7258 7259 7260 7261 7262 7263 7264 7265 7266 7267 7268 7269 7270 7271 7272 7273 7274 7275 7276 7277 7278 7279 7280 7281 7282 7283 7284 7285 7286 7287 7288 7289 7290 7291 7292 7293 7294 7295 7296 7297 7298 7299 7300 7301 7302 7303 7304 7305 7306 7307 7308 7309 7310 7311 7312 7313 7314 7315 7316 7317 7318 7319 7320 7321 7322 7323 7324 7325 7326 7327 7328 7329 7330 7331 7332 7333 7334 7335 7336 7337 7338 7339 7340 7341 7342 7343 7344 7345 7346 7347 7348 7349 7350 7351 7352 7353 7354 7355 7356 7357 7358 7359 7360 7361 7362 7363 7364 7365 7366 7367 7368 7369 7370 7371 7372 7373 7374 7375 7376 7377 7378 7379 7380 7381 7382 7383 7384 7385 7386 7387 7388 7389 7390 7391 7392 7393 7394 7395 7396 7397 7398 7399 7400 7401 7402 7403 7404 7405 7406 7407 7408 7409 7410 7411 7412 7413 7414 7415 7416 7417 7418 7419 7420 7421 7422 7423 7424 7425 7426 7427 7428 7429 7430 7431 7432 7433 7434 7435 7436 7437 7438 7439 7440 7441 7442 7443 7444 7445 7446 7447 7448 7449 7450 7451 7452 7453 7454 7455 7456 7457 7458 7459 7460 7461 7462 7463 7464 7465 7466 7467 7468 7469 7470 7471 7472 7473 7474 7475 7476 7477 7478 7479 7480 7481 7482 7483 7484 7485 7486 7487 7488 7489 7490 7491 7492 7493 7494 7495 7496 7497 7498 7499 7500 7501 7502 7503 7504 7505 7506 7507 7508 7509 7510 7511 7512 7513 7514 7515 7516 7517 7518 7519 7520 7521 7522 7523 7524 7525 7526 7527 7528 7529 7530 7531 7532 7533 7534 7535 7536 7537 7538 7539 7540 7541 7542 7543 7544 7545 7546 7547 7548 7549 7550 7551 7552 7553 7554 7555 7556 7557 7558 7559 7560 7561 7562 7563 7564 7565 7566 7567 7568 7569 7570 7571 7572 7573 7574 7575 7576 7577 7578 7579 7580 7581 7582 7583 7584 7585 7586 7587 7588 7589 7590 7591 7592 7593 7594 7595 7596 7597 7598 7599 7600 7601 7602 7603 7604 7605 7606 7607 7608 7609 7610 7611 7612 7613 7614 7615 7616 7617 7618 7619 7620 7621 7622 7623 7624 7625 7626 7627 7628 7629 7630 7631 7632 7633 7634 7635 7636 7637 7638 7639 7640 7641 7642 7643 7644 7645 7646 7647 7648 7649 7650 7651 7652 7653 7654 7655 7656 7657 7658 7659 7660 7661 7662 7663 7664 7665 7666 7667 7668 7669 7670 7671 7672 7673 7674 7675 7676 7677 7678 7679 7680 7681 7682 7683 7684 7685 7686 7687 7688 7689 7690 7691 7692 7693 7694 7695 7696 7697 7698 7699 7700 7701 7702 7703 7704 7705 7706 7707 7708 7709 7710 7711 7712 7713 7714 7715 7716 7717 7718 7719 7720 7721 7722 7723 7724 7725 7726 7727 7728 7729 7730 7731 7732 7733 7734 7735 7736 7737 7738 7739 7740 7741 7742 7743 7744 7745 7746 7747 7748 7749 7750 7751 7752 7753 7754 7755 7756 7757 7758 7759 7760 7761 7762 7763 7764 7765 7766 7767 7768 7769 7770 7771 7772 7773 7774 7775 7776 7777 7778 7779 7780 7781 7782 7783 7784 7785 7786 7787 7788 7789 7790 7791 7792 7793 7794 7795 7796 7797 7798 7799 7800 7801 7802 7803 7804 7805 7806 7807 7808 7809 7810 7811 7812 7813 7814 7815 7816 7817 7818 7819 7820 7821 7822 7823 7824 7825 7826 7827 7828 7829 7830 7831 7832 7833 7834 7835 7836 7837 7838 7839 7840 7841 7842 7843 7844 7845 7846 7847 7848 7849 7850 7851 7852 7853 7854 7855 7856 7857 7858 7859 7860 7861 7862 7863 7864 7865 7866 7867 7868 7869 7870 7871 7872 7873 7874 7875 7876 7877 7878 7879 7880 7881 7882 7883 7884 7885 7886 7887 7888 7889 7890 7891 7892 7893 7894 7895 7896 7897 7898 7899 7900 7901 7902 7903 7904 7905 7906 7907 7908 7909 7910 7911 7912 7913 7914 7915 7916 7917 7918 7919 7920 7921 7922 7923 7924 7925 7926 7927 7928 7929 7930 7931 7932 7933 7934 7935 7936 7937 7938 7939 7940 7941 7942 7943 7944 7945 7946 7947 7948 7949 7950 7951 7952 7953 7954 7955 7956 7957 7958 7959 7960 7961 7962 7963 7964 7965 7966 7967 7968 7969 7970 7971 7972 7973 7974 7975 7976 7977 7978 7979 7980 7981 7982 7983 7984 7985 7986 7987 7988 7989 7990 7991 7992 7993 7994 7995 7996 7997 7998 7999 8000 8001 8002 8003 8004 8005 8006 8007 8008 8009 8010 8011 8012 8013 8014 8015 8016 8017 8018 8019 8020 8021 8022 8023 8024 8025 8026 8027 8028 8029 8030 8031 8032 8033 8034 8035 8036 8037 8038 8039 8040 8041 8042 8043 8044 8045 8046 8047 8048 8049 8050 8051 8052 8053 8054 8055 8056 8057 8058 8059 8060 8061 8062 8063 8064 8065 8066 8067 8068 8069 8070 8071 8072 8073 8074 8075 8076 8077 8078 8079 8080 8081 8082 8083 8084 8085 8086 8087 8088 8089 8090 8091 8092 8093 8094 8095 8096 8097 8098 8099 8100 8101 8102 8103 8104 8105 8106 8107 8108 8109 8110 8111 8112 8113 8114 8115 8116 8117 8118 8119 8120 8121 8122 8123 8124 8125 8126 8127 8128 8129 8130 8131 8132 8133 8134 8135 8136 8137 8138 8139 8140 8141 8142 8143 8144 8145 8146 8147 8148 8149 8150 8151 8152 8153 8154 8155 8156 8157 8158 8159 8160 8161 8162 8163 8164 8165 8166 8167 8168 8169 8170 8171 8172 8173 8174 8175 8176 8177 8178 8179 8180 8181 8182 8183 8184 8185 8186 8187 8188 8189 8190 8191 8192 8193 8194 8195 8196 8197 8198 8199 8200 8201 8202 8203 8204 8205 8206 8207 8208 8209 8210 8211 8212 8213 8214 8215 8216 8217 8218 8219 8220 8221 8222 8223 8224 8225 8226 8227 8228 8229 8230 8231 8232 8233 8234 8235 8236 8237 8238 8239 8240 8241 8242 8243 8244 8245 8246 8247 8248 8249 8250 8251 8252 8253 8254 8255 8256 8257 8258 8259 8260 8261 8262 8263 8264 8265 8266 8267 8268 8269 8270 8271 8272 8273 8274 8275 8276 8277 8278 8279 8280 8281 8282 8283 8284 8285 8286 8287 8288 8289 8290 8291 8292 8293 8294 8295 8296 8297 8298 8299 8300 8301 8302 8303 8304 8305 8306 8307 8308 8309 8310 8311 8312 8313 8314 8315 8316 8317 8318 8319 8320 8321 8322 8323 8324 8325 8326 8327 8328 8329 8330 8331 8332 8333 8334 8335 8336 8337 8338 8339 8340 8341 8342 8343 8344 8345 8346 8347 8348 8349 8350 8351 8352 8353 8354 8355 8356 8357 8358 8359 8360 8361 8362 8363 8364 8365 8366 8367 8368 8369 8370 8371 8372 8373 8374 8375 8376 8377 8378 8379 8380 8381 8382 8383 8384 8385 8386 8387 8388 8389 8390 8391 8392 8393 8394 8395 8396 8397 8398 8399 8400 8401 8402 8403 8404 8405 8406 8407 8408 8409 8410 8411 8412 8413 | // SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2012 Alexander Block. All rights reserved. */ #include <linux/bsearch.h> #include <linux/fs.h> #include <linux/file.h> #include <linux/sort.h> #include <linux/mount.h> #include <linux/xattr.h> #include <linux/posix_acl_xattr.h> #include <linux/radix-tree.h> #include <linux/vmalloc.h> #include <linux/string.h> #include <linux/compat.h> #include <linux/crc32c.h> #include <linux/fsverity.h> #include "send.h" #include "ctree.h" #include "backref.h" #include "locking.h" #include "disk-io.h" #include "btrfs_inode.h" #include "transaction.h" #include "compression.h" #include "xattr.h" #include "print-tree.h" #include "accessors.h" #include "dir-item.h" #include "file-item.h" #include "ioctl.h" #include "verity.h" #include "lru_cache.h" /* * Maximum number of references an extent can have in order for us to attempt to * issue clone operations instead of write operations. This currently exists to * avoid hitting limitations of the backreference walking code (taking a lot of * time and using too much memory for extents with large number of references). */ #define SEND_MAX_EXTENT_REFS 1024 /* * A fs_path is a helper to dynamically build path names with unknown size. * It reallocates the internal buffer on demand. * It allows fast adding of path elements on the right side (normal path) and * fast adding to the left side (reversed path). A reversed path can also be * unreversed if needed. */ struct fs_path { union { struct { char *start; char *end; char *buf; unsigned short buf_len:15; unsigned short reversed:1; char inline_buf[]; }; /* * Average path length does not exceed 200 bytes, we'll have * better packing in the slab and higher chance to satisfy * a allocation later during send. */ char pad[256]; }; }; #define FS_PATH_INLINE_SIZE \ (sizeof(struct fs_path) - offsetof(struct fs_path, inline_buf)) /* reused for each extent */ struct clone_root { struct btrfs_root *root; u64 ino; u64 offset; u64 num_bytes; bool found_ref; }; #define SEND_MAX_NAME_CACHE_SIZE 256 /* * Limit the root_ids array of struct backref_cache_entry to 17 elements. * This makes the size of a cache entry to be exactly 192 bytes on x86_64, which * can be satisfied from the kmalloc-192 slab, without wasting any space. * The most common case is to have a single root for cloning, which corresponds * to the send root. Having the user specify more than 16 clone roots is not * common, and in such rare cases we simply don't use caching if the number of * cloning roots that lead down to a leaf is more than 17. */ #define SEND_MAX_BACKREF_CACHE_ROOTS 17 /* * Max number of entries in the cache. * With SEND_MAX_BACKREF_CACHE_ROOTS as 17, the size in bytes, excluding * maple tree's internal nodes, is 24K. */ #define SEND_MAX_BACKREF_CACHE_SIZE 128 /* * A backref cache entry maps a leaf to a list of IDs of roots from which the * leaf is accessible and we can use for clone operations. * With SEND_MAX_BACKREF_CACHE_ROOTS as 12, each cache entry is 128 bytes (on * x86_64). */ struct backref_cache_entry { struct btrfs_lru_cache_entry entry; u64 root_ids[SEND_MAX_BACKREF_CACHE_ROOTS]; /* Number of valid elements in the root_ids array. */ int num_roots; }; /* See the comment at lru_cache.h about struct btrfs_lru_cache_entry. */ static_assert(offsetof(struct backref_cache_entry, entry) == 0); /* * Max number of entries in the cache that stores directories that were already * created. The cache uses raw struct btrfs_lru_cache_entry entries, so it uses * at most 4096 bytes - sizeof(struct btrfs_lru_cache_entry) is 48 bytes, but * the kmalloc-64 slab is used, so we get 4096 bytes (64 bytes * 64). */ #define SEND_MAX_DIR_CREATED_CACHE_SIZE 64 /* * Max number of entries in the cache that stores directories that were already * created. The cache uses raw struct btrfs_lru_cache_entry entries, so it uses * at most 4096 bytes - sizeof(struct btrfs_lru_cache_entry) is 48 bytes, but * the kmalloc-64 slab is used, so we get 4096 bytes (64 bytes * 64). */ #define SEND_MAX_DIR_UTIMES_CACHE_SIZE 64 struct send_ctx { struct file *send_filp; loff_t send_off; char *send_buf; u32 send_size; u32 send_max_size; /* * Whether BTRFS_SEND_A_DATA attribute was already added to current * command (since protocol v2, data must be the last attribute). */ bool put_data; struct page **send_buf_pages; u64 flags; /* 'flags' member of btrfs_ioctl_send_args is u64 */ /* Protocol version compatibility requested */ u32 proto; struct btrfs_root *send_root; struct btrfs_root *parent_root; struct clone_root *clone_roots; int clone_roots_cnt; /* current state of the compare_tree call */ struct btrfs_path *left_path; struct btrfs_path *right_path; struct btrfs_key *cmp_key; /* * Keep track of the generation of the last transaction that was used * for relocating a block group. This is periodically checked in order * to detect if a relocation happened since the last check, so that we * don't operate on stale extent buffers for nodes (level >= 1) or on * stale disk_bytenr values of file extent items. */ u64 last_reloc_trans; /* * infos of the currently processed inode. In case of deleted inodes, * these are the values from the deleted inode. */ u64 cur_ino; u64 cur_inode_gen; u64 cur_inode_size; u64 cur_inode_mode; u64 cur_inode_rdev; u64 cur_inode_last_extent; u64 cur_inode_next_write_offset; bool cur_inode_new; bool cur_inode_new_gen; bool cur_inode_deleted; bool ignore_cur_inode; bool cur_inode_needs_verity; void *verity_descriptor; u64 send_progress; struct list_head new_refs; struct list_head deleted_refs; struct btrfs_lru_cache name_cache; /* * The inode we are currently processing. It's not NULL only when we * need to issue write commands for data extents from this inode. */ struct inode *cur_inode; struct file_ra_state ra; u64 page_cache_clear_start; bool clean_page_cache; /* * We process inodes by their increasing order, so if before an * incremental send we reverse the parent/child relationship of * directories such that a directory with a lower inode number was * the parent of a directory with a higher inode number, and the one * becoming the new parent got renamed too, we can't rename/move the * directory with lower inode number when we finish processing it - we * must process the directory with higher inode number first, then * rename/move it and then rename/move the directory with lower inode * number. Example follows. * * Tree state when the first send was performed: * * . * |-- a (ino 257) * |-- b (ino 258) * | * | * |-- c (ino 259) * | |-- d (ino 260) * | * |-- c2 (ino 261) * * Tree state when the second (incremental) send is performed: * * . * |-- a (ino 257) * |-- b (ino 258) * |-- c2 (ino 261) * |-- d2 (ino 260) * |-- cc (ino 259) * * The sequence of steps that lead to the second state was: * * mv /a/b/c/d /a/b/c2/d2 * mv /a/b/c /a/b/c2/d2/cc * * "c" has lower inode number, but we can't move it (2nd mv operation) * before we move "d", which has higher inode number. * * So we just memorize which move/rename operations must be performed * later when their respective parent is processed and moved/renamed. */ /* Indexed by parent directory inode number. */ struct rb_root pending_dir_moves; /* * Reverse index, indexed by the inode number of a directory that * is waiting for the move/rename of its immediate parent before its * own move/rename can be performed. */ struct rb_root waiting_dir_moves; /* * A directory that is going to be rm'ed might have a child directory * which is in the pending directory moves index above. In this case, * the directory can only be removed after the move/rename of its child * is performed. Example: * * Parent snapshot: * * . (ino 256) * |-- a/ (ino 257) * |-- b/ (ino 258) * |-- c/ (ino 259) * | |-- x/ (ino 260) * | * |-- y/ (ino 261) * * Send snapshot: * * . (ino 256) * |-- a/ (ino 257) * |-- b/ (ino 258) * |-- YY/ (ino 261) * |-- x/ (ino 260) * * Sequence of steps that lead to the send snapshot: * rm -f /a/b/c/foo.txt * mv /a/b/y /a/b/YY * mv /a/b/c/x /a/b/YY * rmdir /a/b/c * * When the child is processed, its move/rename is delayed until its * parent is processed (as explained above), but all other operations * like update utimes, chown, chgrp, etc, are performed and the paths * that it uses for those operations must use the orphanized name of * its parent (the directory we're going to rm later), so we need to * memorize that name. * * Indexed by the inode number of the directory to be deleted. */ struct rb_root orphan_dirs; struct rb_root rbtree_new_refs; struct rb_root rbtree_deleted_refs; struct btrfs_lru_cache backref_cache; u64 backref_cache_last_reloc_trans; struct btrfs_lru_cache dir_created_cache; struct btrfs_lru_cache dir_utimes_cache; }; struct pending_dir_move { struct rb_node node; struct list_head list; u64 parent_ino; u64 ino; u64 gen; struct list_head update_refs; }; struct waiting_dir_move { struct rb_node node; u64 ino; /* * There might be some directory that could not be removed because it * was waiting for this directory inode to be moved first. Therefore * after this directory is moved, we can try to rmdir the ino rmdir_ino. */ u64 rmdir_ino; u64 rmdir_gen; bool orphanized; }; struct orphan_dir_info { struct rb_node node; u64 ino; u64 gen; u64 last_dir_index_offset; u64 dir_high_seq_ino; }; struct name_cache_entry { /* * The key in the entry is an inode number, and the generation matches * the inode's generation. */ struct btrfs_lru_cache_entry entry; u64 parent_ino; u64 parent_gen; int ret; int need_later_update; int name_len; char name[]; }; /* See the comment at lru_cache.h about struct btrfs_lru_cache_entry. */ static_assert(offsetof(struct name_cache_entry, entry) == 0); #define ADVANCE 1 #define ADVANCE_ONLY_NEXT -1 enum btrfs_compare_tree_result { BTRFS_COMPARE_TREE_NEW, BTRFS_COMPARE_TREE_DELETED, BTRFS_COMPARE_TREE_CHANGED, BTRFS_COMPARE_TREE_SAME, }; __cold static void inconsistent_snapshot_error(struct send_ctx *sctx, enum btrfs_compare_tree_result result, const char *what) { const char *result_string; switch (result) { case BTRFS_COMPARE_TREE_NEW: result_string = "new"; break; case BTRFS_COMPARE_TREE_DELETED: result_string = "deleted"; break; case BTRFS_COMPARE_TREE_CHANGED: result_string = "updated"; break; case BTRFS_COMPARE_TREE_SAME: ASSERT(0); result_string = "unchanged"; break; default: ASSERT(0); result_string = "unexpected"; } btrfs_err(sctx->send_root->fs_info, "Send: inconsistent snapshot, found %s %s for inode %llu without updated inode item, send root is %llu, parent root is %llu", result_string, what, sctx->cmp_key->objectid, sctx->send_root->root_key.objectid, (sctx->parent_root ? sctx->parent_root->root_key.objectid : 0)); } __maybe_unused static bool proto_cmd_ok(const struct send_ctx *sctx, int cmd) { switch (sctx->proto) { case 1: return cmd <= BTRFS_SEND_C_MAX_V1; case 2: return cmd <= BTRFS_SEND_C_MAX_V2; case 3: return cmd <= BTRFS_SEND_C_MAX_V3; default: return false; } } static int is_waiting_for_move(struct send_ctx *sctx, u64 ino); static struct waiting_dir_move * get_waiting_dir_move(struct send_ctx *sctx, u64 ino); static int is_waiting_for_rm(struct send_ctx *sctx, u64 dir_ino, u64 gen); static int need_send_hole(struct send_ctx *sctx) { return (sctx->parent_root && !sctx->cur_inode_new && !sctx->cur_inode_new_gen && !sctx->cur_inode_deleted && S_ISREG(sctx->cur_inode_mode)); } static void fs_path_reset(struct fs_path *p) { if (p->reversed) { p->start = p->buf + p->buf_len - 1; p->end = p->start; *p->start = 0; } else { p->start = p->buf; p->end = p->start; *p->start = 0; } } static struct fs_path *fs_path_alloc(void) { struct fs_path *p; p = kmalloc(sizeof(*p), GFP_KERNEL); if (!p) return NULL; p->reversed = 0; p->buf = p->inline_buf; p->buf_len = FS_PATH_INLINE_SIZE; fs_path_reset(p); return p; } static struct fs_path *fs_path_alloc_reversed(void) { struct fs_path *p; p = fs_path_alloc(); if (!p) return NULL; p->reversed = 1; fs_path_reset(p); return p; } static void fs_path_free(struct fs_path *p) { if (!p) return; if (p->buf != p->inline_buf) kfree(p->buf); kfree(p); } static int fs_path_len(struct fs_path *p) { return p->end - p->start; } static int fs_path_ensure_buf(struct fs_path *p, int len) { char *tmp_buf; int path_len; int old_buf_len; len++; if (p->buf_len >= len) return 0; if (len > PATH_MAX) { WARN_ON(1); return -ENOMEM; } path_len = p->end - p->start; old_buf_len = p->buf_len; /* * Allocate to the next largest kmalloc bucket size, to let * the fast path happen most of the time. */ len = kmalloc_size_roundup(len); /* * First time the inline_buf does not suffice */ if (p->buf == p->inline_buf) { tmp_buf = kmalloc(len, GFP_KERNEL); if (tmp_buf) memcpy(tmp_buf, p->buf, old_buf_len); } else { tmp_buf = krealloc(p->buf, len, GFP_KERNEL); } if (!tmp_buf) return -ENOMEM; p->buf = tmp_buf; p->buf_len = len; if (p->reversed) { tmp_buf = p->buf + old_buf_len - path_len - 1; p->end = p->buf + p->buf_len - 1; p->start = p->end - path_len; memmove(p->start, tmp_buf, path_len + 1); } else { p->start = p->buf; p->end = p->start + path_len; } return 0; } static int fs_path_prepare_for_add(struct fs_path *p, int name_len, char **prepared) { int ret; int new_len; new_len = p->end - p->start + name_len; if (p->start != p->end) new_len++; ret = fs_path_ensure_buf(p, new_len); if (ret < 0) goto out; if (p->reversed) { if (p->start != p->end) *--p->start = '/'; p->start -= name_len; *prepared = p->start; } else { if (p->start != p->end) *p->end++ = '/'; *prepared = p->end; p->end += name_len; *p->end = 0; } out: return ret; } static int fs_path_add(struct fs_path *p, const char *name, int name_len) { int ret; char *prepared; ret = fs_path_prepare_for_add(p, name_len, &prepared); if (ret < 0) goto out; memcpy(prepared, name, name_len); out: return ret; } static int fs_path_add_path(struct fs_path *p, struct fs_path *p2) { int ret; char *prepared; ret = fs_path_prepare_for_add(p, p2->end - p2->start, &prepared); if (ret < 0) goto out; memcpy(prepared, p2->start, p2->end - p2->start); out: return ret; } static int fs_path_add_from_extent_buffer(struct fs_path *p, struct extent_buffer *eb, unsigned long off, int len) { int ret; char *prepared; ret = fs_path_prepare_for_add(p, len, &prepared); if (ret < 0) goto out; read_extent_buffer(eb, prepared, off, len); out: return ret; } static int fs_path_copy(struct fs_path *p, struct fs_path *from) { p->reversed = from->reversed; fs_path_reset(p); return fs_path_add_path(p, from); } static void fs_path_unreverse(struct fs_path *p) { char *tmp; int len; if (!p->reversed) return; tmp = p->start; len = p->end - p->start; p->start = p->buf; p->end = p->start + len; memmove(p->start, tmp, len + 1); p->reversed = 0; } static struct btrfs_path *alloc_path_for_send(void) { struct btrfs_path *path; path = btrfs_alloc_path(); if (!path) return NULL; path->search_commit_root = 1; path->skip_locking = 1; path->need_commit_sem = 1; return path; } static int write_buf(struct file *filp, const void *buf, u32 len, loff_t *off) { int ret; u32 pos = 0; while (pos < len) { ret = kernel_write(filp, buf + pos, len - pos, off); if (ret < 0) return ret; if (ret == 0) return -EIO; pos += ret; } return 0; } static int tlv_put(struct send_ctx *sctx, u16 attr, const void *data, int len) { struct btrfs_tlv_header *hdr; int total_len = sizeof(*hdr) + len; int left = sctx->send_max_size - sctx->send_size; if (WARN_ON_ONCE(sctx->put_data)) return -EINVAL; if (unlikely(left < total_len)) return -EOVERFLOW; hdr = (struct btrfs_tlv_header *) (sctx->send_buf + sctx->send_size); put_unaligned_le16(attr, &hdr->tlv_type); put_unaligned_le16(len, &hdr->tlv_len); memcpy(hdr + 1, data, len); sctx->send_size += total_len; return 0; } #define TLV_PUT_DEFINE_INT(bits) \ static int tlv_put_u##bits(struct send_ctx *sctx, \ u##bits attr, u##bits value) \ { \ __le##bits __tmp = cpu_to_le##bits(value); \ return tlv_put(sctx, attr, &__tmp, sizeof(__tmp)); \ } TLV_PUT_DEFINE_INT(8) TLV_PUT_DEFINE_INT(32) TLV_PUT_DEFINE_INT(64) static int tlv_put_string(struct send_ctx *sctx, u16 attr, const char *str, int len) { if (len == -1) len = strlen(str); return tlv_put(sctx, attr, str, len); } static int tlv_put_uuid(struct send_ctx *sctx, u16 attr, const u8 *uuid) { return tlv_put(sctx, attr, uuid, BTRFS_UUID_SIZE); } static int tlv_put_btrfs_timespec(struct send_ctx *sctx, u16 attr, struct extent_buffer *eb, struct btrfs_timespec *ts) { struct btrfs_timespec bts; read_extent_buffer(eb, &bts, (unsigned long)ts, sizeof(bts)); return tlv_put(sctx, attr, &bts, sizeof(bts)); } #define TLV_PUT(sctx, attrtype, data, attrlen) \ do { \ ret = tlv_put(sctx, attrtype, data, attrlen); \ if (ret < 0) \ goto tlv_put_failure; \ } while (0) #define TLV_PUT_INT(sctx, attrtype, bits, value) \ do { \ ret = tlv_put_u##bits(sctx, attrtype, value); \ if (ret < 0) \ goto tlv_put_failure; \ } while (0) #define TLV_PUT_U8(sctx, attrtype, data) TLV_PUT_INT(sctx, attrtype, 8, data) #define TLV_PUT_U16(sctx, attrtype, data) TLV_PUT_INT(sctx, attrtype, 16, data) #define TLV_PUT_U32(sctx, attrtype, data) TLV_PUT_INT(sctx, attrtype, 32, data) #define TLV_PUT_U64(sctx, attrtype, data) TLV_PUT_INT(sctx, attrtype, 64, data) #define TLV_PUT_STRING(sctx, attrtype, str, len) \ do { \ ret = tlv_put_string(sctx, attrtype, str, len); \ if (ret < 0) \ goto tlv_put_failure; \ } while (0) #define TLV_PUT_PATH(sctx, attrtype, p) \ do { \ ret = tlv_put_string(sctx, attrtype, p->start, \ p->end - p->start); \ if (ret < 0) \ goto tlv_put_failure; \ } while(0) #define TLV_PUT_UUID(sctx, attrtype, uuid) \ do { \ ret = tlv_put_uuid(sctx, attrtype, uuid); \ if (ret < 0) \ goto tlv_put_failure; \ } while (0) #define TLV_PUT_BTRFS_TIMESPEC(sctx, attrtype, eb, ts) \ do { \ ret = tlv_put_btrfs_timespec(sctx, attrtype, eb, ts); \ if (ret < 0) \ goto tlv_put_failure; \ } while (0) static int send_header(struct send_ctx *sctx) { struct btrfs_stream_header hdr; strcpy(hdr.magic, BTRFS_SEND_STREAM_MAGIC); hdr.version = cpu_to_le32(sctx->proto); return write_buf(sctx->send_filp, &hdr, sizeof(hdr), &sctx->send_off); } /* * For each command/item we want to send to userspace, we call this function. */ static int begin_cmd(struct send_ctx *sctx, int cmd) { struct btrfs_cmd_header *hdr; if (WARN_ON(!sctx->send_buf)) return -EINVAL; BUG_ON(sctx->send_size); sctx->send_size += sizeof(*hdr); hdr = (struct btrfs_cmd_header *)sctx->send_buf; put_unaligned_le16(cmd, &hdr->cmd); return 0; } static int send_cmd(struct send_ctx *sctx) { int ret; struct btrfs_cmd_header *hdr; u32 crc; hdr = (struct btrfs_cmd_header *)sctx->send_buf; put_unaligned_le32(sctx->send_size - sizeof(*hdr), &hdr->len); put_unaligned_le32(0, &hdr->crc); crc = crc32c(0, (unsigned char *)sctx->send_buf, sctx->send_size); put_unaligned_le32(crc, &hdr->crc); ret = write_buf(sctx->send_filp, sctx->send_buf, sctx->send_size, &sctx->send_off); sctx->send_size = 0; sctx->put_data = false; return ret; } /* * Sends a move instruction to user space */ static int send_rename(struct send_ctx *sctx, struct fs_path *from, struct fs_path *to) { struct btrfs_fs_info *fs_info = sctx->send_root->fs_info; int ret; btrfs_debug(fs_info, "send_rename %s -> %s", from->start, to->start); ret = begin_cmd(sctx, BTRFS_SEND_C_RENAME); if (ret < 0) goto out; TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, from); TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH_TO, to); ret = send_cmd(sctx); tlv_put_failure: out: return ret; } /* * Sends a link instruction to user space */ static int send_link(struct send_ctx *sctx, struct fs_path *path, struct fs_path *lnk) { struct btrfs_fs_info *fs_info = sctx->send_root->fs_info; int ret; btrfs_debug(fs_info, "send_link %s -> %s", path->start, lnk->start); ret = begin_cmd(sctx, BTRFS_SEND_C_LINK); if (ret < 0) goto out; TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path); TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH_LINK, lnk); ret = send_cmd(sctx); tlv_put_failure: out: return ret; } /* * Sends an unlink instruction to user space */ static int send_unlink(struct send_ctx *sctx, struct fs_path *path) { struct btrfs_fs_info *fs_info = sctx->send_root->fs_info; int ret; btrfs_debug(fs_info, "send_unlink %s", path->start); ret = begin_cmd(sctx, BTRFS_SEND_C_UNLINK); if (ret < 0) goto out; TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path); ret = send_cmd(sctx); tlv_put_failure: out: return ret; } /* * Sends a rmdir instruction to user space */ static int send_rmdir(struct send_ctx *sctx, struct fs_path *path) { struct btrfs_fs_info *fs_info = sctx->send_root->fs_info; int ret; btrfs_debug(fs_info, "send_rmdir %s", path->start); ret = begin_cmd(sctx, BTRFS_SEND_C_RMDIR); if (ret < 0) goto out; TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path); ret = send_cmd(sctx); tlv_put_failure: out: return ret; } struct btrfs_inode_info { u64 size; u64 gen; u64 mode; u64 uid; u64 gid; u64 rdev; u64 fileattr; u64 nlink; }; /* * Helper function to retrieve some fields from an inode item. */ static int get_inode_info(struct btrfs_root *root, u64 ino, struct btrfs_inode_info *info) { int ret; struct btrfs_path *path; struct btrfs_inode_item *ii; struct btrfs_key key; path = alloc_path_for_send(); if (!path) return -ENOMEM; key.objectid = ino; key.type = BTRFS_INODE_ITEM_KEY; key.offset = 0; ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); if (ret) { if (ret > 0) ret = -ENOENT; goto out; } if (!info) goto out; ii = btrfs_item_ptr(path->nodes[0], path->slots[0], struct btrfs_inode_item); info->size = btrfs_inode_size(path->nodes[0], ii); info->gen = btrfs_inode_generation(path->nodes[0], ii); info->mode = btrfs_inode_mode(path->nodes[0], ii); info->uid = btrfs_inode_uid(path->nodes[0], ii); info->gid = btrfs_inode_gid(path->nodes[0], ii); info->rdev = btrfs_inode_rdev(path->nodes[0], ii); info->nlink = btrfs_inode_nlink(path->nodes[0], ii); /* * Transfer the unchanged u64 value of btrfs_inode_item::flags, that's * otherwise logically split to 32/32 parts. */ info->fileattr = btrfs_inode_flags(path->nodes[0], ii); out: btrfs_free_path(path); return ret; } static int get_inode_gen(struct btrfs_root *root, u64 ino, u64 *gen) { int ret; struct btrfs_inode_info info = { 0 }; ASSERT(gen); ret = get_inode_info(root, ino, &info); *gen = info.gen; return ret; } typedef int (*iterate_inode_ref_t)(int num, u64 dir, int index, struct fs_path *p, void *ctx); /* * Helper function to iterate the entries in ONE btrfs_inode_ref or * btrfs_inode_extref. * The iterate callback may return a non zero value to stop iteration. This can * be a negative value for error codes or 1 to simply stop it. * * path must point to the INODE_REF or INODE_EXTREF when called. */ static int iterate_inode_ref(struct btrfs_root *root, struct btrfs_path *path, struct btrfs_key *found_key, int resolve, iterate_inode_ref_t iterate, void *ctx) { struct extent_buffer *eb = path->nodes[0]; struct btrfs_inode_ref *iref; struct btrfs_inode_extref *extref; struct btrfs_path *tmp_path; struct fs_path *p; u32 cur = 0; u32 total; int slot = path->slots[0]; u32 name_len; char *start; int ret = 0; int num = 0; int index; u64 dir; unsigned long name_off; unsigned long elem_size; unsigned long ptr; p = fs_path_alloc_reversed(); if (!p) return -ENOMEM; tmp_path = alloc_path_for_send(); if (!tmp_path) { fs_path_free(p); return -ENOMEM; } if (found_key->type == BTRFS_INODE_REF_KEY) { ptr = (unsigned long)btrfs_item_ptr(eb, slot, struct btrfs_inode_ref); total = btrfs_item_size(eb, slot); elem_size = sizeof(*iref); } else { ptr = btrfs_item_ptr_offset(eb, slot); total = btrfs_item_size(eb, slot); elem_size = sizeof(*extref); } while (cur < total) { fs_path_reset(p); if (found_key->type == BTRFS_INODE_REF_KEY) { iref = (struct btrfs_inode_ref *)(ptr + cur); name_len = btrfs_inode_ref_name_len(eb, iref); name_off = (unsigned long)(iref + 1); index = btrfs_inode_ref_index(eb, iref); dir = found_key->offset; } else { extref = (struct btrfs_inode_extref *)(ptr + cur); name_len = btrfs_inode_extref_name_len(eb, extref); name_off = (unsigned long)&extref->name; index = btrfs_inode_extref_index(eb, extref); dir = btrfs_inode_extref_parent(eb, extref); } if (resolve) { start = btrfs_ref_to_path(root, tmp_path, name_len, name_off, eb, dir, p->buf, p->buf_len); if (IS_ERR(start)) { ret = PTR_ERR(start); goto out; } if (start < p->buf) { /* overflow , try again with larger buffer */ ret = fs_path_ensure_buf(p, p->buf_len + p->buf - start); if (ret < 0) goto out; start = btrfs_ref_to_path(root, tmp_path, name_len, name_off, eb, dir, p->buf, p->buf_len); if (IS_ERR(start)) { ret = PTR_ERR(start); goto out; } BUG_ON(start < p->buf); } p->start = start; } else { ret = fs_path_add_from_extent_buffer(p, eb, name_off, name_len); if (ret < 0) goto out; } cur += elem_size + name_len; ret = iterate(num, dir, index, p, ctx); if (ret) goto out; num++; } out: btrfs_free_path(tmp_path); fs_path_free(p); return ret; } typedef int (*iterate_dir_item_t)(int num, struct btrfs_key *di_key, const char *name, int name_len, const char *data, int data_len, void *ctx); /* * Helper function to iterate the entries in ONE btrfs_dir_item. * The iterate callback may return a non zero value to stop iteration. This can * be a negative value for error codes or 1 to simply stop it. * * path must point to the dir item when called. */ static int iterate_dir_item(struct btrfs_root *root, struct btrfs_path *path, iterate_dir_item_t iterate, void *ctx) { int ret = 0; struct extent_buffer *eb; struct btrfs_dir_item *di; struct btrfs_key di_key; char *buf = NULL; int buf_len; u32 name_len; u32 data_len; u32 cur; u32 len; u32 total; int slot; int num; /* * Start with a small buffer (1 page). If later we end up needing more * space, which can happen for xattrs on a fs with a leaf size greater * then the page size, attempt to increase the buffer. Typically xattr * values are small. */ buf_len = PATH_MAX; buf = kmalloc(buf_len, GFP_KERNEL); if (!buf) { ret = -ENOMEM; goto out; } eb = path->nodes[0]; slot = path->slots[0]; di = btrfs_item_ptr(eb, slot, struct btrfs_dir_item); cur = 0; len = 0; total = btrfs_item_size(eb, slot); num = 0; while (cur < total) { name_len = btrfs_dir_name_len(eb, di); data_len = btrfs_dir_data_len(eb, di); btrfs_dir_item_key_to_cpu(eb, di, &di_key); if (btrfs_dir_ftype(eb, di) == BTRFS_FT_XATTR) { if (name_len > XATTR_NAME_MAX) { ret = -ENAMETOOLONG; goto out; } if (name_len + data_len > BTRFS_MAX_XATTR_SIZE(root->fs_info)) { ret = -E2BIG; goto out; } } else { /* * Path too long */ if (name_len + data_len > PATH_MAX) { ret = -ENAMETOOLONG; goto out; } } if (name_len + data_len > buf_len) { buf_len = name_len + data_len; if (is_vmalloc_addr(buf)) { vfree(buf); buf = NULL; } else { char *tmp = krealloc(buf, buf_len, GFP_KERNEL | __GFP_NOWARN); if (!tmp) kfree(buf); buf = tmp; } if (!buf) { buf = kvmalloc(buf_len, GFP_KERNEL); if (!buf) { ret = -ENOMEM; goto out; } } } read_extent_buffer(eb, buf, (unsigned long)(di + 1), name_len + data_len); len = sizeof(*di) + name_len + data_len; di = (struct btrfs_dir_item *)((char *)di + len); cur += len; ret = iterate(num, &di_key, buf, name_len, buf + name_len, data_len, ctx); if (ret < 0) goto out; if (ret) { ret = 0; goto out; } num++; } out: kvfree(buf); return ret; } static int __copy_first_ref(int num, u64 dir, int index, struct fs_path *p, void *ctx) { int ret; struct fs_path *pt = ctx; ret = fs_path_copy(pt, p); if (ret < 0) return ret; /* we want the first only */ return 1; } /* * Retrieve the first path of an inode. If an inode has more then one * ref/hardlink, this is ignored. */ static int get_inode_path(struct btrfs_root *root, u64 ino, struct fs_path *path) { int ret; struct btrfs_key key, found_key; struct btrfs_path *p; p = alloc_path_for_send(); if (!p) return -ENOMEM; fs_path_reset(path); key.objectid = ino; key.type = BTRFS_INODE_REF_KEY; key.offset = 0; ret = btrfs_search_slot_for_read(root, &key, p, 1, 0); if (ret < 0) goto out; if (ret) { ret = 1; goto out; } btrfs_item_key_to_cpu(p->nodes[0], &found_key, p->slots[0]); if (found_key.objectid != ino || (found_key.type != BTRFS_INODE_REF_KEY && found_key.type != BTRFS_INODE_EXTREF_KEY)) { ret = -ENOENT; goto out; } ret = iterate_inode_ref(root, p, &found_key, 1, __copy_first_ref, path); if (ret < 0) goto out; ret = 0; out: btrfs_free_path(p); return ret; } struct backref_ctx { struct send_ctx *sctx; /* number of total found references */ u64 found; /* * used for clones found in send_root. clones found behind cur_objectid * and cur_offset are not considered as allowed clones. */ u64 cur_objectid; u64 cur_offset; /* may be truncated in case it's the last extent in a file */ u64 extent_len; /* The bytenr the file extent item we are processing refers to. */ u64 bytenr; /* The owner (root id) of the data backref for the current extent. */ u64 backref_owner; /* The offset of the data backref for the current extent. */ u64 backref_offset; }; static int __clone_root_cmp_bsearch(const void *key, const void *elt) { u64 root = (u64)(uintptr_t)key; const struct clone_root *cr = elt; if (root < cr->root->root_key.objectid) return -1; if (root > cr->root->root_key.objectid) return 1; return 0; } static int __clone_root_cmp_sort(const void *e1, const void *e2) { const struct clone_root *cr1 = e1; const struct clone_root *cr2 = e2; if (cr1->root->root_key.objectid < cr2->root->root_key.objectid) return -1; if (cr1->root->root_key.objectid > cr2->root->root_key.objectid) return 1; return 0; } /* * Called for every backref that is found for the current extent. * Results are collected in sctx->clone_roots->ino/offset. */ static int iterate_backrefs(u64 ino, u64 offset, u64 num_bytes, u64 root_id, void *ctx_) { struct backref_ctx *bctx = ctx_; struct clone_root *clone_root; /* First check if the root is in the list of accepted clone sources */ clone_root = bsearch((void *)(uintptr_t)root_id, bctx->sctx->clone_roots, bctx->sctx->clone_roots_cnt, sizeof(struct clone_root), __clone_root_cmp_bsearch); if (!clone_root) return 0; /* This is our own reference, bail out as we can't clone from it. */ if (clone_root->root == bctx->sctx->send_root && ino == bctx->cur_objectid && offset == bctx->cur_offset) return 0; /* * Make sure we don't consider clones from send_root that are * behind the current inode/offset. */ if (clone_root->root == bctx->sctx->send_root) { /* * If the source inode was not yet processed we can't issue a * clone operation, as the source extent does not exist yet at * the destination of the stream. */ if (ino > bctx->cur_objectid) return 0; /* * We clone from the inode currently being sent as long as the * source extent is already processed, otherwise we could try * to clone from an extent that does not exist yet at the * destination of the stream. */ if (ino == bctx->cur_objectid && offset + bctx->extent_len > bctx->sctx->cur_inode_next_write_offset) return 0; } bctx->found++; clone_root->found_ref = true; /* * If the given backref refers to a file extent item with a larger * number of bytes than what we found before, use the new one so that * we clone more optimally and end up doing less writes and getting * less exclusive, non-shared extents at the destination. */ if (num_bytes > clone_root->num_bytes) { clone_root->ino = ino; clone_root->offset = offset; clone_root->num_bytes = num_bytes; /* * Found a perfect candidate, so there's no need to continue * backref walking. */ if (num_bytes >= bctx->extent_len) return BTRFS_ITERATE_EXTENT_INODES_STOP; } return 0; } static bool lookup_backref_cache(u64 leaf_bytenr, void *ctx, const u64 **root_ids_ret, int *root_count_ret) { struct backref_ctx *bctx = ctx; struct send_ctx *sctx = bctx->sctx; struct btrfs_fs_info *fs_info = sctx->send_root->fs_info; const u64 key = leaf_bytenr >> fs_info->sectorsize_bits; struct btrfs_lru_cache_entry *raw_entry; struct backref_cache_entry *entry; if (btrfs_lru_cache_size(&sctx->backref_cache) == 0) return false; /* * If relocation happened since we first filled the cache, then we must * empty the cache and can not use it, because even though we operate on * read-only roots, their leaves and nodes may have been reallocated and * now be used for different nodes/leaves of the same tree or some other * tree. * * We are called from iterate_extent_inodes() while either holding a * transaction handle or holding fs_info->commit_root_sem, so no need * to take any lock here. */ if (fs_info->last_reloc_trans > sctx->backref_cache_last_reloc_trans) { btrfs_lru_cache_clear(&sctx->backref_cache); return false; } raw_entry = btrfs_lru_cache_lookup(&sctx->backref_cache, key, 0); if (!raw_entry) return false; entry = container_of(raw_entry, struct backref_cache_entry, entry); *root_ids_ret = entry->root_ids; *root_count_ret = entry->num_roots; return true; } static void store_backref_cache(u64 leaf_bytenr, const struct ulist *root_ids, void *ctx) { struct backref_ctx *bctx = ctx; struct send_ctx *sctx = bctx->sctx; struct btrfs_fs_info *fs_info = sctx->send_root->fs_info; struct backref_cache_entry *new_entry; struct ulist_iterator uiter; struct ulist_node *node; int ret; /* * We're called while holding a transaction handle or while holding * fs_info->commit_root_sem (at iterate_extent_inodes()), so must do a * NOFS allocation. */ new_entry = kmalloc(sizeof(struct backref_cache_entry), GFP_NOFS); /* No worries, cache is optional. */ if (!new_entry) return; new_entry->entry.key = leaf_bytenr >> fs_info->sectorsize_bits; new_entry->entry.gen = 0; new_entry->num_roots = 0; ULIST_ITER_INIT(&uiter); while ((node = ulist_next(root_ids, &uiter)) != NULL) { const u64 root_id = node->val; struct clone_root *root; root = bsearch((void *)(uintptr_t)root_id, sctx->clone_roots, sctx->clone_roots_cnt, sizeof(struct clone_root), __clone_root_cmp_bsearch); if (!root) continue; /* Too many roots, just exit, no worries as caching is optional. */ if (new_entry->num_roots >= SEND_MAX_BACKREF_CACHE_ROOTS) { kfree(new_entry); return; } new_entry->root_ids[new_entry->num_roots] = root_id; new_entry->num_roots++; } /* * We may have not added any roots to the new cache entry, which means * none of the roots is part of the list of roots from which we are * allowed to clone. Cache the new entry as it's still useful to avoid * backref walking to determine which roots have a path to the leaf. * * Also use GFP_NOFS because we're called while holding a transaction * handle or while holding fs_info->commit_root_sem. */ ret = btrfs_lru_cache_store(&sctx->backref_cache, &new_entry->entry, GFP_NOFS); ASSERT(ret == 0 || ret == -ENOMEM); if (ret) { /* Caching is optional, no worries. */ kfree(new_entry); return; } /* * We are called from iterate_extent_inodes() while either holding a * transaction handle or holding fs_info->commit_root_sem, so no need * to take any lock here. */ if (btrfs_lru_cache_size(&sctx->backref_cache) == 1) sctx->backref_cache_last_reloc_trans = fs_info->last_reloc_trans; } static int check_extent_item(u64 bytenr, const struct btrfs_extent_item *ei, const struct extent_buffer *leaf, void *ctx) { const u64 refs = btrfs_extent_refs(leaf, ei); const struct backref_ctx *bctx = ctx; const struct send_ctx *sctx = bctx->sctx; if (bytenr == bctx->bytenr) { const u64 flags = btrfs_extent_flags(leaf, ei); if (WARN_ON(flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)) return -EUCLEAN; /* * If we have only one reference and only the send root as a * clone source - meaning no clone roots were given in the * struct btrfs_ioctl_send_args passed to the send ioctl - then * it's our reference and there's no point in doing backref * walking which is expensive, so exit early. */ if (refs == 1 && sctx->clone_roots_cnt == 1) return -ENOENT; } /* * Backreference walking (iterate_extent_inodes() below) is currently * too expensive when an extent has a large number of references, both * in time spent and used memory. So for now just fallback to write * operations instead of clone operations when an extent has more than * a certain amount of references. */ if (refs > SEND_MAX_EXTENT_REFS) return -ENOENT; return 0; } static bool skip_self_data_ref(u64 root, u64 ino, u64 offset, void *ctx) { const struct backref_ctx *bctx = ctx; if (ino == bctx->cur_objectid && root == bctx->backref_owner && offset == bctx->backref_offset) return true; return false; } /* * Given an inode, offset and extent item, it finds a good clone for a clone * instruction. Returns -ENOENT when none could be found. The function makes * sure that the returned clone is usable at the point where sending is at the * moment. This means, that no clones are accepted which lie behind the current * inode+offset. * * path must point to the extent item when called. */ static int find_extent_clone(struct send_ctx *sctx, struct btrfs_path *path, u64 ino, u64 data_offset, u64 ino_size, struct clone_root **found) { struct btrfs_fs_info *fs_info = sctx->send_root->fs_info; int ret; int extent_type; u64 logical; u64 disk_byte; u64 num_bytes; struct btrfs_file_extent_item *fi; struct extent_buffer *eb = path->nodes[0]; struct backref_ctx backref_ctx = { 0 }; struct btrfs_backref_walk_ctx backref_walk_ctx = { 0 }; struct clone_root *cur_clone_root; int compressed; u32 i; /* * With fallocate we can get prealloc extents beyond the inode's i_size, * so we don't do anything here because clone operations can not clone * to a range beyond i_size without increasing the i_size of the * destination inode. */ if (data_offset >= ino_size) return 0; fi = btrfs_item_ptr(eb, path->slots[0], struct btrfs_file_extent_item); extent_type = btrfs_file_extent_type(eb, fi); if (extent_type == BTRFS_FILE_EXTENT_INLINE) return -ENOENT; disk_byte = btrfs_file_extent_disk_bytenr(eb, fi); if (disk_byte == 0) return -ENOENT; compressed = btrfs_file_extent_compression(eb, fi); num_bytes = btrfs_file_extent_num_bytes(eb, fi); logical = disk_byte + btrfs_file_extent_offset(eb, fi); /* * Setup the clone roots. */ for (i = 0; i < sctx->clone_roots_cnt; i++) { cur_clone_root = sctx->clone_roots + i; cur_clone_root->ino = (u64)-1; cur_clone_root->offset = 0; cur_clone_root->num_bytes = 0; cur_clone_root->found_ref = false; } backref_ctx.sctx = sctx; backref_ctx.cur_objectid = ino; backref_ctx.cur_offset = data_offset; backref_ctx.bytenr = disk_byte; /* * Use the header owner and not the send root's id, because in case of a * snapshot we can have shared subtrees. */ backref_ctx.backref_owner = btrfs_header_owner(eb); backref_ctx.backref_offset = data_offset - btrfs_file_extent_offset(eb, fi); /* * The last extent of a file may be too large due to page alignment. * We need to adjust extent_len in this case so that the checks in * iterate_backrefs() work. */ if (data_offset + num_bytes >= ino_size) backref_ctx.extent_len = ino_size - data_offset; else backref_ctx.extent_len = num_bytes; /* * Now collect all backrefs. */ backref_walk_ctx.bytenr = disk_byte; if (compressed == BTRFS_COMPRESS_NONE) backref_walk_ctx.extent_item_pos = btrfs_file_extent_offset(eb, fi); backref_walk_ctx.fs_info = fs_info; backref_walk_ctx.cache_lookup = lookup_backref_cache; backref_walk_ctx.cache_store = store_backref_cache; backref_walk_ctx.indirect_ref_iterator = iterate_backrefs; backref_walk_ctx.check_extent_item = check_extent_item; backref_walk_ctx.user_ctx = &backref_ctx; /* * If have a single clone root, then it's the send root and we can tell * the backref walking code to skip our own backref and not resolve it, * since we can not use it for cloning - the source and destination * ranges can't overlap and in case the leaf is shared through a subtree * due to snapshots, we can't use those other roots since they are not * in the list of clone roots. */ if (sctx->clone_roots_cnt == 1) backref_walk_ctx.skip_data_ref = skip_self_data_ref; ret = iterate_extent_inodes(&backref_walk_ctx, true, iterate_backrefs, &backref_ctx); if (ret < 0) return ret; down_read(&fs_info->commit_root_sem); if (fs_info->last_reloc_trans > sctx->last_reloc_trans) { /* * A transaction commit for a transaction in which block group * relocation was done just happened. * The disk_bytenr of the file extent item we processed is * possibly stale, referring to the extent's location before * relocation. So act as if we haven't found any clone sources * and fallback to write commands, which will read the correct * data from the new extent location. Otherwise we will fail * below because we haven't found our own back reference or we * could be getting incorrect sources in case the old extent * was already reallocated after the relocation. */ up_read(&fs_info->commit_root_sem); return -ENOENT; } up_read(&fs_info->commit_root_sem); btrfs_debug(fs_info, "find_extent_clone: data_offset=%llu, ino=%llu, num_bytes=%llu, logical=%llu", data_offset, ino, num_bytes, logical); if (!backref_ctx.found) { btrfs_debug(fs_info, "no clones found"); return -ENOENT; } cur_clone_root = NULL; for (i = 0; i < sctx->clone_roots_cnt; i++) { struct clone_root *clone_root = &sctx->clone_roots[i]; if (!clone_root->found_ref) continue; /* * Choose the root from which we can clone more bytes, to * minimize write operations and therefore have more extent * sharing at the destination (the same as in the source). */ if (!cur_clone_root || clone_root->num_bytes > cur_clone_root->num_bytes) { cur_clone_root = clone_root; /* * We found an optimal clone candidate (any inode from * any root is fine), so we're done. */ if (clone_root->num_bytes >= backref_ctx.extent_len) break; } } if (cur_clone_root) { *found = cur_clone_root; ret = 0; } else { ret = -ENOENT; } return ret; } static int read_symlink(struct btrfs_root *root, u64 ino, struct fs_path *dest) { int ret; struct btrfs_path *path; struct btrfs_key key; struct btrfs_file_extent_item *ei; u8 type; u8 compression; unsigned long off; int len; path = alloc_path_for_send(); if (!path) return -ENOMEM; key.objectid = ino; key.type = BTRFS_EXTENT_DATA_KEY; key.offset = 0; ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); if (ret < 0) goto out; if (ret) { /* * An empty symlink inode. Can happen in rare error paths when * creating a symlink (transaction committed before the inode * eviction handler removed the symlink inode items and a crash * happened in between or the subvol was snapshoted in between). * Print an informative message to dmesg/syslog so that the user * can delete the symlink. */ btrfs_err(root->fs_info, "Found empty symlink inode %llu at root %llu", ino, root->root_key.objectid); ret = -EIO; goto out; } ei = btrfs_item_ptr(path->nodes[0], path->slots[0], struct btrfs_file_extent_item); type = btrfs_file_extent_type(path->nodes[0], ei); if (unlikely(type != BTRFS_FILE_EXTENT_INLINE)) { ret = -EUCLEAN; btrfs_crit(root->fs_info, "send: found symlink extent that is not inline, ino %llu root %llu extent type %d", ino, btrfs_root_id(root), type); goto out; } compression = btrfs_file_extent_compression(path->nodes[0], ei); if (unlikely(compression != BTRFS_COMPRESS_NONE)) { ret = -EUCLEAN; btrfs_crit(root->fs_info, "send: found symlink extent with compression, ino %llu root %llu compression type %d", ino, btrfs_root_id(root), compression); goto out; } off = btrfs_file_extent_inline_start(ei); len = btrfs_file_extent_ram_bytes(path->nodes[0], ei); ret = fs_path_add_from_extent_buffer(dest, path->nodes[0], off, len); out: btrfs_free_path(path); return ret; } /* * Helper function to generate a file name that is unique in the root of * send_root and parent_root. This is used to generate names for orphan inodes. */ static int gen_unique_name(struct send_ctx *sctx, u64 ino, u64 gen, struct fs_path *dest) { int ret = 0; struct btrfs_path *path; struct btrfs_dir_item *di; char tmp[64]; int len; u64 idx = 0; path = alloc_path_for_send(); if (!path) return -ENOMEM; while (1) { struct fscrypt_str tmp_name; len = snprintf(tmp, sizeof(tmp), "o%llu-%llu-%llu", ino, gen, idx); ASSERT(len < sizeof(tmp)); tmp_name.name = tmp; tmp_name.len = strlen(tmp); di = btrfs_lookup_dir_item(NULL, sctx->send_root, path, BTRFS_FIRST_FREE_OBJECTID, &tmp_name, 0); btrfs_release_path(path); if (IS_ERR(di)) { ret = PTR_ERR(di); goto out; } if (di) { /* not unique, try again */ idx++; continue; } if (!sctx->parent_root) { /* unique */ ret = 0; break; } di = btrfs_lookup_dir_item(NULL, sctx->parent_root, path, BTRFS_FIRST_FREE_OBJECTID, &tmp_name, 0); btrfs_release_path(path); if (IS_ERR(di)) { ret = PTR_ERR(di); goto out; } if (di) { /* not unique, try again */ idx++; continue; } /* unique */ break; } ret = fs_path_add(dest, tmp, strlen(tmp)); out: btrfs_free_path(path); return ret; } enum inode_state { inode_state_no_change, inode_state_will_create, inode_state_did_create, inode_state_will_delete, inode_state_did_delete, }; static int get_cur_inode_state(struct send_ctx *sctx, u64 ino, u64 gen, u64 *send_gen, u64 *parent_gen) { int ret; int left_ret; int right_ret; u64 left_gen; u64 right_gen = 0; struct btrfs_inode_info info; ret = get_inode_info(sctx->send_root, ino, &info); if (ret < 0 && ret != -ENOENT) goto out; left_ret = (info.nlink == 0) ? -ENOENT : ret; left_gen = info.gen; if (send_gen) *send_gen = ((left_ret == -ENOENT) ? 0 : info.gen); if (!sctx->parent_root) { right_ret = -ENOENT; } else { ret = get_inode_info(sctx->parent_root, ino, &info); if (ret < 0 && ret != -ENOENT) goto out; right_ret = (info.nlink == 0) ? -ENOENT : ret; right_gen = info.gen; if (parent_gen) *parent_gen = ((right_ret == -ENOENT) ? 0 : info.gen); } if (!left_ret && !right_ret) { if (left_gen == gen && right_gen == gen) { ret = inode_state_no_change; } else if (left_gen == gen) { if (ino < sctx->send_progress) ret = inode_state_did_create; else ret = inode_state_will_create; } else if (right_gen == gen) { if (ino < sctx->send_progress) ret = inode_state_did_delete; else ret = inode_state_will_delete; } else { ret = -ENOENT; } } else if (!left_ret) { if (left_gen == gen) { if (ino < sctx->send_progress) ret = inode_state_did_create; else ret = inode_state_will_create; } else { ret = -ENOENT; } } else if (!right_ret) { if (right_gen == gen) { if (ino < sctx->send_progress) ret = inode_state_did_delete; else ret = inode_state_will_delete; } else { ret = -ENOENT; } } else { ret = -ENOENT; } out: return ret; } static int is_inode_existent(struct send_ctx *sctx, u64 ino, u64 gen, u64 *send_gen, u64 *parent_gen) { int ret; if (ino == BTRFS_FIRST_FREE_OBJECTID) return 1; ret = get_cur_inode_state(sctx, ino, gen, send_gen, parent_gen); if (ret < 0) goto out; if (ret == inode_state_no_change || ret == inode_state_did_create || ret == inode_state_will_delete) ret = 1; else ret = 0; out: return ret; } /* * Helper function to lookup a dir item in a dir. */ static int lookup_dir_item_inode(struct btrfs_root *root, u64 dir, const char *name, int name_len, u64 *found_inode) { int ret = 0; struct btrfs_dir_item *di; struct btrfs_key key; struct btrfs_path *path; struct fscrypt_str name_str = FSTR_INIT((char *)name, name_len); path = alloc_path_for_send(); if (!path) return -ENOMEM; di = btrfs_lookup_dir_item(NULL, root, path, dir, &name_str, 0); if (IS_ERR_OR_NULL(di)) { ret = di ? PTR_ERR(di) : -ENOENT; goto out; } btrfs_dir_item_key_to_cpu(path->nodes[0], di, &key); if (key.type == BTRFS_ROOT_ITEM_KEY) { ret = -ENOENT; goto out; } *found_inode = key.objectid; out: btrfs_free_path(path); return ret; } /* * Looks up the first btrfs_inode_ref of a given ino. It returns the parent dir, * generation of the parent dir and the name of the dir entry. */ static int get_first_ref(struct btrfs_root *root, u64 ino, u64 *dir, u64 *dir_gen, struct fs_path *name) { int ret; struct btrfs_key key; struct btrfs_key found_key; struct btrfs_path *path; int len; u64 parent_dir; path = alloc_path_for_send(); if (!path) return -ENOMEM; key.objectid = ino; key.type = BTRFS_INODE_REF_KEY; key.offset = 0; ret = btrfs_search_slot_for_read(root, &key, path, 1, 0); if (ret < 0) goto out; if (!ret) btrfs_item_key_to_cpu(path->nodes[0], &found_key, path->slots[0]); if (ret || found_key.objectid != ino || (found_key.type != BTRFS_INODE_REF_KEY && found_key.type != BTRFS_INODE_EXTREF_KEY)) { ret = -ENOENT; goto out; } if (found_key.type == BTRFS_INODE_REF_KEY) { struct btrfs_inode_ref *iref; iref = btrfs_item_ptr(path->nodes[0], path->slots[0], struct btrfs_inode_ref); len = btrfs_inode_ref_name_len(path->nodes[0], iref); ret = fs_path_add_from_extent_buffer(name, path->nodes[0], (unsigned long)(iref + 1), len); parent_dir = found_key.offset; } else { struct btrfs_inode_extref *extref; extref = btrfs_item_ptr(path->nodes[0], path->slots[0], struct btrfs_inode_extref); len = btrfs_inode_extref_name_len(path->nodes[0], extref); ret = fs_path_add_from_extent_buffer(name, path->nodes[0], (unsigned long)&extref->name, len); parent_dir = btrfs_inode_extref_parent(path->nodes[0], extref); } if (ret < 0) goto out; btrfs_release_path(path); if (dir_gen) { ret = get_inode_gen(root, parent_dir, dir_gen); if (ret < 0) goto out; } *dir = parent_dir; out: btrfs_free_path(path); return ret; } static int is_first_ref(struct btrfs_root *root, u64 ino, u64 dir, const char *name, int name_len) { int ret; struct fs_path *tmp_name; u64 tmp_dir; tmp_name = fs_path_alloc(); if (!tmp_name) return -ENOMEM; ret = get_first_ref(root, ino, &tmp_dir, NULL, tmp_name); if (ret < 0) goto out; if (dir != tmp_dir || name_len != fs_path_len(tmp_name)) { ret = 0; goto out; } ret = !memcmp(tmp_name->start, name, name_len); out: fs_path_free(tmp_name); return ret; } /* * Used by process_recorded_refs to determine if a new ref would overwrite an * already existing ref. In case it detects an overwrite, it returns the * inode/gen in who_ino/who_gen. * When an overwrite is detected, process_recorded_refs does proper orphanizing * to make sure later references to the overwritten inode are possible. * Orphanizing is however only required for the first ref of an inode. * process_recorded_refs does an additional is_first_ref check to see if * orphanizing is really required. */ static int will_overwrite_ref(struct send_ctx *sctx, u64 dir, u64 dir_gen, const char *name, int name_len, u64 *who_ino, u64 *who_gen, u64 *who_mode) { int ret; u64 parent_root_dir_gen; u64 other_inode = 0; struct btrfs_inode_info info; if (!sctx->parent_root) return 0; ret = is_inode_existent(sctx, dir, dir_gen, NULL, &parent_root_dir_gen); if (ret <= 0) return 0; /* * If we have a parent root we need to verify that the parent dir was * not deleted and then re-created, if it was then we have no overwrite * and we can just unlink this entry. * * @parent_root_dir_gen was set to 0 if the inode does not exist in the * parent root. */ if (sctx->parent_root && dir != BTRFS_FIRST_FREE_OBJECTID && parent_root_dir_gen != dir_gen) return 0; ret = lookup_dir_item_inode(sctx->parent_root, dir, name, name_len, &other_inode); if (ret == -ENOENT) return 0; else if (ret < 0) return ret; /* * Check if the overwritten ref was already processed. If yes, the ref * was already unlinked/moved, so we can safely assume that we will not * overwrite anything at this point in time. */ if (other_inode > sctx->send_progress || is_waiting_for_move(sctx, other_inode)) { ret = get_inode_info(sctx->parent_root, other_inode, &info); if (ret < 0) return ret; *who_ino = other_inode; *who_gen = info.gen; *who_mode = info.mode; return 1; } return 0; } /* * Checks if the ref was overwritten by an already processed inode. This is * used by __get_cur_name_and_parent to find out if the ref was orphanized and * thus the orphan name needs be used. * process_recorded_refs also uses it to avoid unlinking of refs that were * overwritten. */ static int did_overwrite_ref(struct send_ctx *sctx, u64 dir, u64 dir_gen, u64 ino, u64 ino_gen, const char *name, int name_len) { int ret; u64 ow_inode; u64 ow_gen = 0; u64 send_root_dir_gen; if (!sctx->parent_root) return 0; ret = is_inode_existent(sctx, dir, dir_gen, &send_root_dir_gen, NULL); if (ret <= 0) return ret; /* * @send_root_dir_gen was set to 0 if the inode does not exist in the * send root. */ if (dir != BTRFS_FIRST_FREE_OBJECTID && send_root_dir_gen != dir_gen) return 0; /* check if the ref was overwritten by another ref */ ret = lookup_dir_item_inode(sctx->send_root, dir, name, name_len, &ow_inode); if (ret == -ENOENT) { /* was never and will never be overwritten */ return 0; } else if (ret < 0) { return ret; } if (ow_inode == ino) { ret = get_inode_gen(sctx->send_root, ow_inode, &ow_gen); if (ret < 0) return ret; /* It's the same inode, so no overwrite happened. */ if (ow_gen == ino_gen) return 0; } /* * We know that it is or will be overwritten. Check this now. * The current inode being processed might have been the one that caused * inode 'ino' to be orphanized, therefore check if ow_inode matches * the current inode being processed. */ if (ow_inode < sctx->send_progress) return 1; if (ino != sctx->cur_ino && ow_inode == sctx->cur_ino) { if (ow_gen == 0) { ret = get_inode_gen(sctx->send_root, ow_inode, &ow_gen); if (ret < 0) return ret; } if (ow_gen == sctx->cur_inode_gen) return 1; } return 0; } /* * Same as did_overwrite_ref, but also checks if it is the first ref of an inode * that got overwritten. This is used by process_recorded_refs to determine * if it has to use the path as returned by get_cur_path or the orphan name. */ static int did_overwrite_first_ref(struct send_ctx *sctx, u64 ino, u64 gen) { int ret = 0; struct fs_path *name = NULL; u64 dir; u64 dir_gen; if (!sctx->parent_root) goto out; name = fs_path_alloc(); if (!name) return -ENOMEM; ret = get_first_ref(sctx->parent_root, ino, &dir, &dir_gen, name); if (ret < 0) goto out; ret = did_overwrite_ref(sctx, dir, dir_gen, ino, gen, name->start, fs_path_len(name)); out: fs_path_free(name); return ret; } static inline struct name_cache_entry *name_cache_search(struct send_ctx *sctx, u64 ino, u64 gen) { struct btrfs_lru_cache_entry *entry; entry = btrfs_lru_cache_lookup(&sctx->name_cache, ino, gen); if (!entry) return NULL; return container_of(entry, struct name_cache_entry, entry); } /* * Used by get_cur_path for each ref up to the root. * Returns 0 if it succeeded. * Returns 1 if the inode is not existent or got overwritten. In that case, the * name is an orphan name. This instructs get_cur_path to stop iterating. If 1 * is returned, parent_ino/parent_gen are not guaranteed to be valid. * Returns <0 in case of error. */ static int __get_cur_name_and_parent(struct send_ctx *sctx, u64 ino, u64 gen, u64 *parent_ino, u64 *parent_gen, struct fs_path *dest) { int ret; int nce_ret; struct name_cache_entry *nce; /* * First check if we already did a call to this function with the same * ino/gen. If yes, check if the cache entry is still up-to-date. If yes * return the cached result. */ nce = name_cache_search(sctx, ino, gen); if (nce) { if (ino < sctx->send_progress && nce->need_later_update) { btrfs_lru_cache_remove(&sctx->name_cache, &nce->entry); nce = NULL; } else { *parent_ino = nce->parent_ino; *parent_gen = nce->parent_gen; ret = fs_path_add(dest, nce->name, nce->name_len); if (ret < 0) goto out; ret = nce->ret; goto out; } } /* * If the inode is not existent yet, add the orphan name and return 1. * This should only happen for the parent dir that we determine in * record_new_ref_if_needed(). */ ret = is_inode_existent(sctx, ino, gen, NULL, NULL); if (ret < 0) goto out; if (!ret) { ret = gen_unique_name(sctx, ino, gen, dest); if (ret < 0) goto out; ret = 1; goto out_cache; } /* * Depending on whether the inode was already processed or not, use * send_root or parent_root for ref lookup. */ if (ino < sctx->send_progress) ret = get_first_ref(sctx->send_root, ino, parent_ino, parent_gen, dest); else ret = get_first_ref(sctx->parent_root, ino, parent_ino, parent_gen, dest); if (ret < 0) goto out; /* * Check if the ref was overwritten by an inode's ref that was processed * earlier. If yes, treat as orphan and return 1. */ ret = did_overwrite_ref(sctx, *parent_ino, *parent_gen, ino, gen, dest->start, dest->end - dest->start); if (ret < 0) goto out; if (ret) { fs_path_reset(dest); ret = gen_unique_name(sctx, ino, gen, dest); if (ret < 0) goto out; ret = 1; } out_cache: /* * Store the result of the lookup in the name cache. */ nce = kmalloc(sizeof(*nce) + fs_path_len(dest) + 1, GFP_KERNEL); if (!nce) { ret = -ENOMEM; goto out; } nce->entry.key = ino; nce->entry.gen = gen; nce->parent_ino = *parent_ino; nce->parent_gen = *parent_gen; nce->name_len = fs_path_len(dest); nce->ret = ret; strcpy(nce->name, dest->start); if (ino < sctx->send_progress) nce->need_later_update = 0; else nce->need_later_update = 1; nce_ret = btrfs_lru_cache_store(&sctx->name_cache, &nce->entry, GFP_KERNEL); if (nce_ret < 0) { kfree(nce); ret = nce_ret; } out: return ret; } /* * Magic happens here. This function returns the first ref to an inode as it * would look like while receiving the stream at this point in time. * We walk the path up to the root. For every inode in between, we check if it * was already processed/sent. If yes, we continue with the parent as found * in send_root. If not, we continue with the parent as found in parent_root. * If we encounter an inode that was deleted at this point in time, we use the * inodes "orphan" name instead of the real name and stop. Same with new inodes * that were not created yet and overwritten inodes/refs. * * When do we have orphan inodes: * 1. When an inode is freshly created and thus no valid refs are available yet * 2. When a directory lost all it's refs (deleted) but still has dir items * inside which were not processed yet (pending for move/delete). If anyone * tried to get the path to the dir items, it would get a path inside that * orphan directory. * 3. When an inode is moved around or gets new links, it may overwrite the ref * of an unprocessed inode. If in that case the first ref would be * overwritten, the overwritten inode gets "orphanized". Later when we * process this overwritten inode, it is restored at a new place by moving * the orphan inode. * * sctx->send_progress tells this function at which point in time receiving * would be. */ static int get_cur_path(struct send_ctx *sctx, u64 ino, u64 gen, struct fs_path *dest) { int ret = 0; struct fs_path *name = NULL; u64 parent_inode = 0; u64 parent_gen = 0; int stop = 0; name = fs_path_alloc(); if (!name) { ret = -ENOMEM; goto out; } dest->reversed = 1; fs_path_reset(dest); while (!stop && ino != BTRFS_FIRST_FREE_OBJECTID) { struct waiting_dir_move *wdm; fs_path_reset(name); if (is_waiting_for_rm(sctx, ino, gen)) { ret = gen_unique_name(sctx, ino, gen, name); if (ret < 0) goto out; ret = fs_path_add_path(dest, name); break; } wdm = get_waiting_dir_move(sctx, ino); if (wdm && wdm->orphanized) { ret = gen_unique_name(sctx, ino, gen, name); stop = 1; } else if (wdm) { ret = get_first_ref(sctx->parent_root, ino, &parent_inode, &parent_gen, name); } else { ret = __get_cur_name_and_parent(sctx, ino, gen, &parent_inode, &parent_gen, name); if (ret) stop = 1; } if (ret < 0) goto out; ret = fs_path_add_path(dest, name); if (ret < 0) goto out; ino = parent_inode; gen = parent_gen; } out: fs_path_free(name); if (!ret) fs_path_unreverse(dest); return ret; } /* * Sends a BTRFS_SEND_C_SUBVOL command/item to userspace */ static int send_subvol_begin(struct send_ctx *sctx) { int ret; struct btrfs_root *send_root = sctx->send_root; struct btrfs_root *parent_root = sctx->parent_root; struct btrfs_path *path; struct btrfs_key key; struct btrfs_root_ref *ref; struct extent_buffer *leaf; char *name = NULL; int namelen; path = btrfs_alloc_path(); if (!path) return -ENOMEM; name = kmalloc(BTRFS_PATH_NAME_MAX, GFP_KERNEL); if (!name) { btrfs_free_path(path); return -ENOMEM; } key.objectid = send_root->root_key.objectid; key.type = BTRFS_ROOT_BACKREF_KEY; key.offset = 0; ret = btrfs_search_slot_for_read(send_root->fs_info->tree_root, &key, path, 1, 0); if (ret < 0) goto out; if (ret) { ret = -ENOENT; goto out; } leaf = path->nodes[0]; btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); if (key.type != BTRFS_ROOT_BACKREF_KEY || key.objectid != send_root->root_key.objectid) { ret = -ENOENT; goto out; } ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_ref); namelen = btrfs_root_ref_name_len(leaf, ref); read_extent_buffer(leaf, name, (unsigned long)(ref + 1), namelen); btrfs_release_path(path); if (parent_root) { ret = begin_cmd(sctx, BTRFS_SEND_C_SNAPSHOT); if (ret < 0) goto out; } else { ret = begin_cmd(sctx, BTRFS_SEND_C_SUBVOL); if (ret < 0) goto out; } TLV_PUT_STRING(sctx, BTRFS_SEND_A_PATH, name, namelen); if (!btrfs_is_empty_uuid(sctx->send_root->root_item.received_uuid)) TLV_PUT_UUID(sctx, BTRFS_SEND_A_UUID, sctx->send_root->root_item.received_uuid); else TLV_PUT_UUID(sctx, BTRFS_SEND_A_UUID, sctx->send_root->root_item.uuid); TLV_PUT_U64(sctx, BTRFS_SEND_A_CTRANSID, btrfs_root_ctransid(&sctx->send_root->root_item)); if (parent_root) { if (!btrfs_is_empty_uuid(parent_root->root_item.received_uuid)) TLV_PUT_UUID(sctx, BTRFS_SEND_A_CLONE_UUID, parent_root->root_item.received_uuid); else TLV_PUT_UUID(sctx, BTRFS_SEND_A_CLONE_UUID, parent_root->root_item.uuid); TLV_PUT_U64(sctx, BTRFS_SEND_A_CLONE_CTRANSID, btrfs_root_ctransid(&sctx->parent_root->root_item)); } ret = send_cmd(sctx); tlv_put_failure: out: btrfs_free_path(path); kfree(name); return ret; } static int send_truncate(struct send_ctx *sctx, u64 ino, u64 gen, u64 size) { struct btrfs_fs_info *fs_info = sctx->send_root->fs_info; int ret = 0; struct fs_path *p; btrfs_debug(fs_info, "send_truncate %llu size=%llu", ino, size); p = fs_path_alloc(); if (!p) return -ENOMEM; ret = begin_cmd(sctx, BTRFS_SEND_C_TRUNCATE); if (ret < 0) goto out; ret = get_cur_path(sctx, ino, gen, p); if (ret < 0) goto out; TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p); TLV_PUT_U64(sctx, BTRFS_SEND_A_SIZE, size); ret = send_cmd(sctx); tlv_put_failure: out: fs_path_free(p); return ret; } static int send_chmod(struct send_ctx *sctx, u64 ino, u64 gen, u64 mode) { struct btrfs_fs_info *fs_info = sctx->send_root->fs_info; int ret = 0; struct fs_path *p; btrfs_debug(fs_info, "send_chmod %llu mode=%llu", ino, mode); p = fs_path_alloc(); if (!p) return -ENOMEM; ret = begin_cmd(sctx, BTRFS_SEND_C_CHMOD); if (ret < 0) goto out; ret = get_cur_path(sctx, ino, gen, p); if (ret < 0) goto out; TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p); TLV_PUT_U64(sctx, BTRFS_SEND_A_MODE, mode & 07777); ret = send_cmd(sctx); tlv_put_failure: out: fs_path_free(p); return ret; } static int send_fileattr(struct send_ctx *sctx, u64 ino, u64 gen, u64 fileattr) { struct btrfs_fs_info *fs_info = sctx->send_root->fs_info; int ret = 0; struct fs_path *p; if (sctx->proto < 2) return 0; btrfs_debug(fs_info, "send_fileattr %llu fileattr=%llu", ino, fileattr); p = fs_path_alloc(); if (!p) return -ENOMEM; ret = begin_cmd(sctx, BTRFS_SEND_C_FILEATTR); if (ret < 0) goto out; ret = get_cur_path(sctx, ino, gen, p); if (ret < 0) goto out; TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p); TLV_PUT_U64(sctx, BTRFS_SEND_A_FILEATTR, fileattr); ret = send_cmd(sctx); tlv_put_failure: out: fs_path_free(p); return ret; } static int send_chown(struct send_ctx *sctx, u64 ino, u64 gen, u64 uid, u64 gid) { struct btrfs_fs_info *fs_info = sctx->send_root->fs_info; int ret = 0; struct fs_path *p; btrfs_debug(fs_info, "send_chown %llu uid=%llu, gid=%llu", ino, uid, gid); p = fs_path_alloc(); if (!p) return -ENOMEM; ret = begin_cmd(sctx, BTRFS_SEND_C_CHOWN); if (ret < 0) goto out; ret = get_cur_path(sctx, ino, gen, p); if (ret < 0) goto out; TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p); TLV_PUT_U64(sctx, BTRFS_SEND_A_UID, uid); TLV_PUT_U64(sctx, BTRFS_SEND_A_GID, gid); ret = send_cmd(sctx); tlv_put_failure: out: fs_path_free(p); return ret; } static int send_utimes(struct send_ctx *sctx, u64 ino, u64 gen) { struct btrfs_fs_info *fs_info = sctx->send_root->fs_info; int ret = 0; struct fs_path *p = NULL; struct btrfs_inode_item *ii; struct btrfs_path *path = NULL; struct extent_buffer *eb; struct btrfs_key key; int slot; btrfs_debug(fs_info, "send_utimes %llu", ino); p = fs_path_alloc(); if (!p) return -ENOMEM; path = alloc_path_for_send(); if (!path) { ret = -ENOMEM; goto out; } key.objectid = ino; key.type = BTRFS_INODE_ITEM_KEY; key.offset = 0; ret = btrfs_search_slot(NULL, sctx->send_root, &key, path, 0, 0); if (ret > 0) ret = -ENOENT; if (ret < 0) goto out; eb = path->nodes[0]; slot = path->slots[0]; ii = btrfs_item_ptr(eb, slot, struct btrfs_inode_item); ret = begin_cmd(sctx, BTRFS_SEND_C_UTIMES); if (ret < 0) goto out; ret = get_cur_path(sctx, ino, gen, p); if (ret < 0) goto out; TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p); TLV_PUT_BTRFS_TIMESPEC(sctx, BTRFS_SEND_A_ATIME, eb, &ii->atime); TLV_PUT_BTRFS_TIMESPEC(sctx, BTRFS_SEND_A_MTIME, eb, &ii->mtime); TLV_PUT_BTRFS_TIMESPEC(sctx, BTRFS_SEND_A_CTIME, eb, &ii->ctime); if (sctx->proto >= 2) TLV_PUT_BTRFS_TIMESPEC(sctx, BTRFS_SEND_A_OTIME, eb, &ii->otime); ret = send_cmd(sctx); tlv_put_failure: out: fs_path_free(p); btrfs_free_path(path); return ret; } /* * If the cache is full, we can't remove entries from it and do a call to * send_utimes() for each respective inode, because we might be finishing * processing an inode that is a directory and it just got renamed, and existing * entries in the cache may refer to inodes that have the directory in their * full path - in which case we would generate outdated paths (pre-rename) * for the inodes that the cache entries point to. Instead of prunning the * cache when inserting, do it after we finish processing each inode at * finish_inode_if_needed(). */ static int cache_dir_utimes(struct send_ctx *sctx, u64 dir, u64 gen) { struct btrfs_lru_cache_entry *entry; int ret; entry = btrfs_lru_cache_lookup(&sctx->dir_utimes_cache, dir, gen); if (entry != NULL) return 0; /* Caching is optional, don't fail if we can't allocate memory. */ entry = kmalloc(sizeof(*entry), GFP_KERNEL); if (!entry) return send_utimes(sctx, dir, gen); entry->key = dir; entry->gen = gen; ret = btrfs_lru_cache_store(&sctx->dir_utimes_cache, entry, GFP_KERNEL); ASSERT(ret != -EEXIST); if (ret) { kfree(entry); return send_utimes(sctx, dir, gen); } return 0; } static int trim_dir_utimes_cache(struct send_ctx *sctx) { while (btrfs_lru_cache_size(&sctx->dir_utimes_cache) > SEND_MAX_DIR_UTIMES_CACHE_SIZE) { struct btrfs_lru_cache_entry *lru; int ret; lru = btrfs_lru_cache_lru_entry(&sctx->dir_utimes_cache); ASSERT(lru != NULL); ret = send_utimes(sctx, lru->key, lru->gen); if (ret) return ret; btrfs_lru_cache_remove(&sctx->dir_utimes_cache, lru); } return 0; } /* * Sends a BTRFS_SEND_C_MKXXX or SYMLINK command to user space. We don't have * a valid path yet because we did not process the refs yet. So, the inode * is created as orphan. */ static int send_create_inode(struct send_ctx *sctx, u64 ino) { struct btrfs_fs_info *fs_info = sctx->send_root->fs_info; int ret = 0; struct fs_path *p; int cmd; struct btrfs_inode_info info; u64 gen; u64 mode; u64 rdev; btrfs_debug(fs_info, "send_create_inode %llu", ino); p = fs_path_alloc(); if (!p) return -ENOMEM; if (ino != sctx->cur_ino) { ret = get_inode_info(sctx->send_root, ino, &info); if (ret < 0) goto out; gen = info.gen; mode = info.mode; rdev = info.rdev; } else { gen = sctx->cur_inode_gen; mode = sctx->cur_inode_mode; rdev = sctx->cur_inode_rdev; } if (S_ISREG(mode)) { cmd = BTRFS_SEND_C_MKFILE; } else if (S_ISDIR(mode)) { cmd = BTRFS_SEND_C_MKDIR; } else if (S_ISLNK(mode)) { cmd = BTRFS_SEND_C_SYMLINK; } else if (S_ISCHR(mode) || S_ISBLK(mode)) { cmd = BTRFS_SEND_C_MKNOD; } else if (S_ISFIFO(mode)) { cmd = BTRFS_SEND_C_MKFIFO; } else if (S_ISSOCK(mode)) { cmd = BTRFS_SEND_C_MKSOCK; } else { btrfs_warn(sctx->send_root->fs_info, "unexpected inode type %o", (int)(mode & S_IFMT)); ret = -EOPNOTSUPP; goto out; } ret = begin_cmd(sctx, cmd); if (ret < 0) goto out; ret = gen_unique_name(sctx, ino, gen, p); if (ret < 0) goto out; TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p); TLV_PUT_U64(sctx, BTRFS_SEND_A_INO, ino); if (S_ISLNK(mode)) { fs_path_reset(p); ret = read_symlink(sctx->send_root, ino, p); if (ret < 0) goto out; TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH_LINK, p); } else if (S_ISCHR(mode) || S_ISBLK(mode) || S_ISFIFO(mode) || S_ISSOCK(mode)) { TLV_PUT_U64(sctx, BTRFS_SEND_A_RDEV, new_encode_dev(rdev)); TLV_PUT_U64(sctx, BTRFS_SEND_A_MODE, mode); } ret = send_cmd(sctx); if (ret < 0) goto out; tlv_put_failure: out: fs_path_free(p); return ret; } static void cache_dir_created(struct send_ctx *sctx, u64 dir) { struct btrfs_lru_cache_entry *entry; int ret; /* Caching is optional, ignore any failures. */ entry = kmalloc(sizeof(*entry), GFP_KERNEL); if (!entry) return; entry->key = dir; entry->gen = 0; ret = btrfs_lru_cache_store(&sctx->dir_created_cache, entry, GFP_KERNEL); if (ret < 0) kfree(entry); } /* * We need some special handling for inodes that get processed before the parent * directory got created. See process_recorded_refs for details. * This function does the check if we already created the dir out of order. */ static int did_create_dir(struct send_ctx *sctx, u64 dir) { int ret = 0; int iter_ret = 0; struct btrfs_path *path = NULL; struct btrfs_key key; struct btrfs_key found_key; struct btrfs_key di_key; struct btrfs_dir_item *di; if (btrfs_lru_cache_lookup(&sctx->dir_created_cache, dir, 0)) return 1; path = alloc_path_for_send(); if (!path) return -ENOMEM; key.objectid = dir; key.type = BTRFS_DIR_INDEX_KEY; key.offset = 0; btrfs_for_each_slot(sctx->send_root, &key, &found_key, path, iter_ret) { struct extent_buffer *eb = path->nodes[0]; if (found_key.objectid != key.objectid || found_key.type != key.type) { ret = 0; break; } di = btrfs_item_ptr(eb, path->slots[0], struct btrfs_dir_item); btrfs_dir_item_key_to_cpu(eb, di, &di_key); if (di_key.type != BTRFS_ROOT_ITEM_KEY && di_key.objectid < sctx->send_progress) { ret = 1; cache_dir_created(sctx, dir); break; } } /* Catch error found during iteration */ if (iter_ret < 0) ret = iter_ret; btrfs_free_path(path); return ret; } /* * Only creates the inode if it is: * 1. Not a directory * 2. Or a directory which was not created already due to out of order * directories. See did_create_dir and process_recorded_refs for details. */ static int send_create_inode_if_needed(struct send_ctx *sctx) { int ret; if (S_ISDIR(sctx->cur_inode_mode)) { ret = did_create_dir(sctx, sctx->cur_ino); if (ret < 0) return ret; else if (ret > 0) return 0; } ret = send_create_inode(sctx, sctx->cur_ino); if (ret == 0 && S_ISDIR(sctx->cur_inode_mode)) cache_dir_created(sctx, sctx->cur_ino); return ret; } struct recorded_ref { struct list_head list; char *name; struct fs_path *full_path; u64 dir; u64 dir_gen; int name_len; struct rb_node node; struct rb_root *root; }; static struct recorded_ref *recorded_ref_alloc(void) { struct recorded_ref *ref; ref = kzalloc(sizeof(*ref), GFP_KERNEL); if (!ref) return NULL; RB_CLEAR_NODE(&ref->node); INIT_LIST_HEAD(&ref->list); return ref; } static void recorded_ref_free(struct recorded_ref *ref) { if (!ref) return; if (!RB_EMPTY_NODE(&ref->node)) rb_erase(&ref->node, ref->root); list_del(&ref->list); fs_path_free(ref->full_path); kfree(ref); } static void set_ref_path(struct recorded_ref *ref, struct fs_path *path) { ref->full_path = path; ref->name = (char *)kbasename(ref->full_path->start); ref->name_len = ref->full_path->end - ref->name; } static int dup_ref(struct recorded_ref *ref, struct list_head *list) { struct recorded_ref *new; new = recorded_ref_alloc(); if (!new) return -ENOMEM; new->dir = ref->dir; new->dir_gen = ref->dir_gen; list_add_tail(&new->list, list); return 0; } static void __free_recorded_refs(struct list_head *head) { struct recorded_ref *cur; while (!list_empty(head)) { cur = list_entry(head->next, struct recorded_ref, list); recorded_ref_free(cur); } } static void free_recorded_refs(struct send_ctx *sctx) { __free_recorded_refs(&sctx->new_refs); __free_recorded_refs(&sctx->deleted_refs); } /* * Renames/moves a file/dir to its orphan name. Used when the first * ref of an unprocessed inode gets overwritten and for all non empty * directories. */ static int orphanize_inode(struct send_ctx *sctx, u64 ino, u64 gen, struct fs_path *path) { int ret; struct fs_path *orphan; orphan = fs_path_alloc(); if (!orphan) return -ENOMEM; ret = gen_unique_name(sctx, ino, gen, orphan); if (ret < 0) goto out; ret = send_rename(sctx, path, orphan); out: fs_path_free(orphan); return ret; } static struct orphan_dir_info *add_orphan_dir_info(struct send_ctx *sctx, u64 dir_ino, u64 dir_gen) { struct rb_node **p = &sctx->orphan_dirs.rb_node; struct rb_node *parent = NULL; struct orphan_dir_info *entry, *odi; while (*p) { parent = *p; entry = rb_entry(parent, struct orphan_dir_info, node); if (dir_ino < entry->ino) p = &(*p)->rb_left; else if (dir_ino > entry->ino) p = &(*p)->rb_right; else if (dir_gen < entry->gen) p = &(*p)->rb_left; else if (dir_gen > entry->gen) p = &(*p)->rb_right; else return entry; } odi = kmalloc(sizeof(*odi), GFP_KERNEL); if (!odi) return ERR_PTR(-ENOMEM); odi->ino = dir_ino; odi->gen = dir_gen; odi->last_dir_index_offset = 0; odi->dir_high_seq_ino = 0; rb_link_node(&odi->node, parent, p); rb_insert_color(&odi->node, &sctx->orphan_dirs); return odi; } static struct orphan_dir_info *get_orphan_dir_info(struct send_ctx *sctx, u64 dir_ino, u64 gen) { struct rb_node *n = sctx->orphan_dirs.rb_node; struct orphan_dir_info *entry; while (n) { entry = rb_entry(n, struct orphan_dir_info, node); if (dir_ino < entry->ino) n = n->rb_left; else if (dir_ino > entry->ino) n = n->rb_right; else if (gen < entry->gen) n = n->rb_left; else if (gen > entry->gen) n = n->rb_right; else return entry; } return NULL; } static int is_waiting_for_rm(struct send_ctx *sctx, u64 dir_ino, u64 gen) { struct orphan_dir_info *odi = get_orphan_dir_info(sctx, dir_ino, gen); return odi != NULL; } static void free_orphan_dir_info(struct send_ctx *sctx, struct orphan_dir_info *odi) { if (!odi) return; rb_erase(&odi->node, &sctx->orphan_dirs); kfree(odi); } /* * Returns 1 if a directory can be removed at this point in time. * We check this by iterating all dir items and checking if the inode behind * the dir item was already processed. */ static int can_rmdir(struct send_ctx *sctx, u64 dir, u64 dir_gen) { int ret = 0; int iter_ret = 0; struct btrfs_root *root = sctx->parent_root; struct btrfs_path *path; struct btrfs_key key; struct btrfs_key found_key; struct btrfs_key loc; struct btrfs_dir_item *di; struct orphan_dir_info *odi = NULL; u64 dir_high_seq_ino = 0; u64 last_dir_index_offset = 0; /* * Don't try to rmdir the top/root subvolume dir. */ if (dir == BTRFS_FIRST_FREE_OBJECTID) return 0; odi = get_orphan_dir_info(sctx, dir, dir_gen); if (odi && sctx->cur_ino < odi->dir_high_seq_ino) return 0; path = alloc_path_for_send(); if (!path) return -ENOMEM; if (!odi) { /* * Find the inode number associated with the last dir index * entry. This is very likely the inode with the highest number * of all inodes that have an entry in the directory. We can * then use it to avoid future calls to can_rmdir(), when * processing inodes with a lower number, from having to search * the parent root b+tree for dir index keys. */ key.objectid = dir; key.type = BTRFS_DIR_INDEX_KEY; key.offset = (u64)-1; ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); if (ret < 0) { goto out; } else if (ret > 0) { /* Can't happen, the root is never empty. */ ASSERT(path->slots[0] > 0); if (WARN_ON(path->slots[0] == 0)) { ret = -EUCLEAN; goto out; } path->slots[0]--; } btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); if (key.objectid != dir || key.type != BTRFS_DIR_INDEX_KEY) { /* No index keys, dir can be removed. */ ret = 1; goto out; } di = btrfs_item_ptr(path->nodes[0], path->slots[0], struct btrfs_dir_item); btrfs_dir_item_key_to_cpu(path->nodes[0], di, &loc); dir_high_seq_ino = loc.objectid; if (sctx->cur_ino < dir_high_seq_ino) { ret = 0; goto out; } btrfs_release_path(path); } key.objectid = dir; key.type = BTRFS_DIR_INDEX_KEY; key.offset = (odi ? odi->last_dir_index_offset : 0); btrfs_for_each_slot(root, &key, &found_key, path, iter_ret) { struct waiting_dir_move *dm; if (found_key.objectid != key.objectid || found_key.type != key.type) break; di = btrfs_item_ptr(path->nodes[0], path->slots[0], struct btrfs_dir_item); btrfs_dir_item_key_to_cpu(path->nodes[0], di, &loc); dir_high_seq_ino = max(dir_high_seq_ino, loc.objectid); last_dir_index_offset = found_key.offset; dm = get_waiting_dir_move(sctx, loc.objectid); if (dm) { dm->rmdir_ino = dir; dm->rmdir_gen = dir_gen; ret = 0; goto out; } if (loc.objectid > sctx->cur_ino) { ret = 0; goto out; } } if (iter_ret < 0) { ret = iter_ret; goto out; } free_orphan_dir_info(sctx, odi); ret = 1; out: btrfs_free_path(path); if (ret) return ret; if (!odi) { odi = add_orphan_dir_info(sctx, dir, dir_gen); if (IS_ERR(odi)) return PTR_ERR(odi); odi->gen = dir_gen; } odi->last_dir_index_offset = last_dir_index_offset; odi->dir_high_seq_ino = max(odi->dir_high_seq_ino, dir_high_seq_ino); return 0; } static int is_waiting_for_move(struct send_ctx *sctx, u64 ino) { struct waiting_dir_move *entry = get_waiting_dir_move(sctx, ino); return entry != NULL; } static int add_waiting_dir_move(struct send_ctx *sctx, u64 ino, bool orphanized) { struct rb_node **p = &sctx->waiting_dir_moves.rb_node; struct rb_node *parent = NULL; struct waiting_dir_move *entry, *dm; dm = kmalloc(sizeof(*dm), GFP_KERNEL); if (!dm) return -ENOMEM; dm->ino = ino; dm->rmdir_ino = 0; dm->rmdir_gen = 0; dm->orphanized = orphanized; while (*p) { parent = *p; entry = rb_entry(parent, struct waiting_dir_move, node); if (ino < entry->ino) { p = &(*p)->rb_left; } else if (ino > entry->ino) { p = &(*p)->rb_right; } else { kfree(dm); return -EEXIST; } } rb_link_node(&dm->node, parent, p); rb_insert_color(&dm->node, &sctx->waiting_dir_moves); return 0; } static struct waiting_dir_move * get_waiting_dir_move(struct send_ctx *sctx, u64 ino) { struct rb_node *n = sctx->waiting_dir_moves.rb_node; struct waiting_dir_move *entry; while (n) { entry = rb_entry(n, struct waiting_dir_move, node); if (ino < entry->ino) n = n->rb_left; else if (ino > entry->ino) n = n->rb_right; else return entry; } return NULL; } static void free_waiting_dir_move(struct send_ctx *sctx, struct waiting_dir_move *dm) { if (!dm) return; rb_erase(&dm->node, &sctx->waiting_dir_moves); kfree(dm); } static int add_pending_dir_move(struct send_ctx *sctx, u64 ino, u64 ino_gen, u64 parent_ino, struct list_head *new_refs, struct list_head *deleted_refs, const bool is_orphan) { struct rb_node **p = &sctx->pending_dir_moves.rb_node; struct rb_node *parent = NULL; struct pending_dir_move *entry = NULL, *pm; struct recorded_ref *cur; int exists = 0; int ret; pm = kmalloc(sizeof(*pm), GFP_KERNEL); if (!pm) return -ENOMEM; pm->parent_ino = parent_ino; pm->ino = ino; pm->gen = ino_gen; INIT_LIST_HEAD(&pm->list); INIT_LIST_HEAD(&pm->update_refs); RB_CLEAR_NODE(&pm->node); while (*p) { parent = *p; entry = rb_entry(parent, struct pending_dir_move, node); if (parent_ino < entry->parent_ino) { p = &(*p)->rb_left; } else if (parent_ino > entry->parent_ino) { p = &(*p)->rb_right; } else { exists = 1; break; } } list_for_each_entry(cur, deleted_refs, list) { ret = dup_ref(cur, &pm->update_refs); if (ret < 0) goto out; } list_for_each_entry(cur, new_refs, list) { ret = dup_ref(cur, &pm->update_refs); if (ret < 0) goto out; } ret = add_waiting_dir_move(sctx, pm->ino, is_orphan); if (ret) goto out; if (exists) { list_add_tail(&pm->list, &entry->list); } else { rb_link_node(&pm->node, parent, p); rb_insert_color(&pm->node, &sctx->pending_dir_moves); } ret = 0; out: if (ret) { __free_recorded_refs(&pm->update_refs); kfree(pm); } return ret; } static struct pending_dir_move *get_pending_dir_moves(struct send_ctx *sctx, u64 parent_ino) { struct rb_node *n = sctx->pending_dir_moves.rb_node; struct pending_dir_move *entry; while (n) { entry = rb_entry(n, struct pending_dir_move, node); if (parent_ino < entry->parent_ino) n = n->rb_left; else if (parent_ino > entry->parent_ino) n = n->rb_right; else return entry; } return NULL; } static int path_loop(struct send_ctx *sctx, struct fs_path *name, u64 ino, u64 gen, u64 *ancestor_ino) { int ret = 0; u64 parent_inode = 0; u64 parent_gen = 0; u64 start_ino = ino; *ancestor_ino = 0; while (ino != BTRFS_FIRST_FREE_OBJECTID) { fs_path_reset(name); if (is_waiting_for_rm(sctx, ino, gen)) break; if (is_waiting_for_move(sctx, ino)) { if (*ancestor_ino == 0) *ancestor_ino = ino; ret = get_first_ref(sctx->parent_root, ino, &parent_inode, &parent_gen, name); } else { ret = __get_cur_name_and_parent(sctx, ino, gen, &parent_inode, &parent_gen, name); if (ret > 0) { ret = 0; break; } } if (ret < 0) break; if (parent_inode == start_ino) { ret = 1; if (*ancestor_ino == 0) *ancestor_ino = ino; break; } ino = parent_inode; gen = parent_gen; } return ret; } static int apply_dir_move(struct send_ctx *sctx, struct pending_dir_move *pm) { struct fs_path *from_path = NULL; struct fs_path *to_path = NULL; struct fs_path *name = NULL; u64 orig_progress = sctx->send_progress; struct recorded_ref *cur; u64 parent_ino, parent_gen; struct waiting_dir_move *dm = NULL; u64 rmdir_ino = 0; u64 rmdir_gen; u64 ancestor; bool is_orphan; int ret; name = fs_path_alloc(); from_path = fs_path_alloc(); if (!name || !from_path) { ret = -ENOMEM; goto out; } dm = get_waiting_dir_move(sctx, pm->ino); ASSERT(dm); rmdir_ino = dm->rmdir_ino; rmdir_gen = dm->rmdir_gen; is_orphan = dm->orphanized; free_waiting_dir_move(sctx, dm); if (is_orphan) { ret = gen_unique_name(sctx, pm->ino, pm->gen, from_path); } else { ret = get_first_ref(sctx->parent_root, pm->ino, &parent_ino, &parent_gen, name); if (ret < 0) goto out; ret = get_cur_path(sctx, parent_ino, parent_gen, from_path); if (ret < 0) goto out; ret = fs_path_add_path(from_path, name); } if (ret < 0) goto out; sctx->send_progress = sctx->cur_ino + 1; ret = path_loop(sctx, name, pm->ino, pm->gen, &ancestor); if (ret < 0) goto out; if (ret) { LIST_HEAD(deleted_refs); ASSERT(ancestor > BTRFS_FIRST_FREE_OBJECTID); ret = add_pending_dir_move(sctx, pm->ino, pm->gen, ancestor, &pm->update_refs, &deleted_refs, is_orphan); if (ret < 0) goto out; if (rmdir_ino) { dm = get_waiting_dir_move(sctx, pm->ino); ASSERT(dm); dm->rmdir_ino = rmdir_ino; dm->rmdir_gen = rmdir_gen; } goto out; } fs_path_reset(name); to_path = name; name = NULL; ret = get_cur_path(sctx, pm->ino, pm->gen, to_path); if (ret < 0) goto out; ret = send_rename(sctx, from_path, to_path); if (ret < 0) goto out; if (rmdir_ino) { struct orphan_dir_info *odi; u64 gen; odi = get_orphan_dir_info(sctx, rmdir_ino, rmdir_gen); if (!odi) { /* already deleted */ goto finish; } gen = odi->gen; ret = can_rmdir(sctx, rmdir_ino, gen); if (ret < 0) goto out; if (!ret) goto finish; name = fs_path_alloc(); if (!name) { ret = -ENOMEM; goto out; } ret = get_cur_path(sctx, rmdir_ino, gen, name); if (ret < 0) goto out; ret = send_rmdir(sctx, name); if (ret < 0) goto out; } finish: ret = cache_dir_utimes(sctx, pm->ino, pm->gen); if (ret < 0) goto out; /* * After rename/move, need to update the utimes of both new parent(s) * and old parent(s). */ list_for_each_entry(cur, &pm->update_refs, list) { /* * The parent inode might have been deleted in the send snapshot */ ret = get_inode_info(sctx->send_root, cur->dir, NULL); if (ret == -ENOENT) { ret = 0; continue; } if (ret < 0) goto out; ret = cache_dir_utimes(sctx, cur->dir, cur->dir_gen); if (ret < 0) goto out; } out: fs_path_free(name); fs_path_free(from_path); fs_path_free(to_path); sctx->send_progress = orig_progress; return ret; } static void free_pending_move(struct send_ctx *sctx, struct pending_dir_move *m) { if (!list_empty(&m->list)) list_del(&m->list); if (!RB_EMPTY_NODE(&m->node)) rb_erase(&m->node, &sctx->pending_dir_moves); __free_recorded_refs(&m->update_refs); kfree(m); } static void tail_append_pending_moves(struct send_ctx *sctx, struct pending_dir_move *moves, struct list_head *stack) { if (list_empty(&moves->list)) { list_add_tail(&moves->list, stack); } else { LIST_HEAD(list); list_splice_init(&moves->list, &list); list_add_tail(&moves->list, stack); list_splice_tail(&list, stack); } if (!RB_EMPTY_NODE(&moves->node)) { rb_erase(&moves->node, &sctx->pending_dir_moves); RB_CLEAR_NODE(&moves->node); } } static int apply_children_dir_moves(struct send_ctx *sctx) { struct pending_dir_move *pm; LIST_HEAD(stack); u64 parent_ino = sctx->cur_ino; int ret = 0; pm = get_pending_dir_moves(sctx, parent_ino); if (!pm) return 0; tail_append_pending_moves(sctx, pm, &stack); while (!list_empty(&stack)) { pm = list_first_entry(&stack, struct pending_dir_move, list); parent_ino = pm->ino; ret = apply_dir_move(sctx, pm); free_pending_move(sctx, pm); if (ret) goto out; pm = get_pending_dir_moves(sctx, parent_ino); if (pm) tail_append_pending_moves(sctx, pm, &stack); } return 0; out: while (!list_empty(&stack)) { pm = list_first_entry(&stack, struct pending_dir_move, list); free_pending_move(sctx, pm); } return ret; } /* * We might need to delay a directory rename even when no ancestor directory * (in the send root) with a higher inode number than ours (sctx->cur_ino) was * renamed. This happens when we rename a directory to the old name (the name * in the parent root) of some other unrelated directory that got its rename * delayed due to some ancestor with higher number that got renamed. * * Example: * * Parent snapshot: * . (ino 256) * |---- a/ (ino 257) * | |---- file (ino 260) * | * |---- b/ (ino 258) * |---- c/ (ino 259) * * Send snapshot: * . (ino 256) * |---- a/ (ino 258) * |---- x/ (ino 259) * |---- y/ (ino 257) * |----- file (ino 260) * * Here we can not rename 258 from 'b' to 'a' without the rename of inode 257 * from 'a' to 'x/y' happening first, which in turn depends on the rename of * inode 259 from 'c' to 'x'. So the order of rename commands the send stream * must issue is: * * 1 - rename 259 from 'c' to 'x' * 2 - rename 257 from 'a' to 'x/y' * 3 - rename 258 from 'b' to 'a' * * Returns 1 if the rename of sctx->cur_ino needs to be delayed, 0 if it can * be done right away and < 0 on error. */ static int wait_for_dest_dir_move(struct send_ctx *sctx, struct recorded_ref *parent_ref, const bool is_orphan) { struct btrfs_fs_info *fs_info = sctx->parent_root->fs_info; struct btrfs_path *path; struct btrfs_key key; struct btrfs_key di_key; struct btrfs_dir_item *di; u64 left_gen; u64 right_gen; int ret = 0; struct waiting_dir_move *wdm; if (RB_EMPTY_ROOT(&sctx->waiting_dir_moves)) return 0; path = alloc_path_for_send(); if (!path) return -ENOMEM; key.objectid = parent_ref->dir; key.type = BTRFS_DIR_ITEM_KEY; key.offset = btrfs_name_hash(parent_ref->name, parent_ref->name_len); ret = btrfs_search_slot(NULL, sctx->parent_root, &key, path, 0, 0); if (ret < 0) { goto out; } else if (ret > 0) { ret = 0; goto out; } di = btrfs_match_dir_item_name(fs_info, path, parent_ref->name, parent_ref->name_len); if (!di) { ret = 0; goto out; } /* * di_key.objectid has the number of the inode that has a dentry in the * parent directory with the same name that sctx->cur_ino is being * renamed to. We need to check if that inode is in the send root as * well and if it is currently marked as an inode with a pending rename, * if it is, we need to delay the rename of sctx->cur_ino as well, so * that it happens after that other inode is renamed. */ btrfs_dir_item_key_to_cpu(path->nodes[0], di, &di_key); if (di_key.type != BTRFS_INODE_ITEM_KEY) { ret = 0; goto out; } ret = get_inode_gen(sctx->parent_root, di_key.objectid, &left_gen); if (ret < 0) goto out; ret = get_inode_gen(sctx->send_root, di_key.objectid, &right_gen); if (ret < 0) { if (ret == -ENOENT) ret = 0; goto out; } /* Different inode, no need to delay the rename of sctx->cur_ino */ if (right_gen != left_gen) { ret = 0; goto out; } wdm = get_waiting_dir_move(sctx, di_key.objectid); if (wdm && !wdm->orphanized) { ret = add_pending_dir_move(sctx, sctx->cur_ino, sctx->cur_inode_gen, di_key.objectid, &sctx->new_refs, &sctx->deleted_refs, is_orphan); if (!ret) ret = 1; } out: btrfs_free_path(path); return ret; } /* * Check if inode ino2, or any of its ancestors, is inode ino1. * Return 1 if true, 0 if false and < 0 on error. */ static int check_ino_in_path(struct btrfs_root *root, const u64 ino1, const u64 ino1_gen, const u64 ino2, const u64 ino2_gen, struct fs_path *fs_path) { u64 ino = ino2; if (ino1 == ino2) return ino1_gen == ino2_gen; while (ino > BTRFS_FIRST_FREE_OBJECTID) { u64 parent; u64 parent_gen; int ret; fs_path_reset(fs_path); ret = get_first_ref(root, ino, &parent, &parent_gen, fs_path); if (ret < 0) return ret; if (parent == ino1) return parent_gen == ino1_gen; ino = parent; } return 0; } /* * Check if inode ino1 is an ancestor of inode ino2 in the given root for any * possible path (in case ino2 is not a directory and has multiple hard links). * Return 1 if true, 0 if false and < 0 on error. */ static int is_ancestor(struct btrfs_root *root, const u64 ino1, const u64 ino1_gen, const u64 ino2, struct fs_path *fs_path) { bool free_fs_path = false; int ret = 0; int iter_ret = 0; struct btrfs_path *path = NULL; struct btrfs_key key; if (!fs_path) { fs_path = fs_path_alloc(); if (!fs_path) return -ENOMEM; free_fs_path = true; } path = alloc_path_for_send(); if (!path) { ret = -ENOMEM; goto out; } key.objectid = ino2; key.type = BTRFS_INODE_REF_KEY; key.offset = 0; btrfs_for_each_slot(root, &key, &key, path, iter_ret) { struct extent_buffer *leaf = path->nodes[0]; int slot = path->slots[0]; u32 cur_offset = 0; u32 item_size; if (key.objectid != ino2) break; if (key.type != BTRFS_INODE_REF_KEY && key.type != BTRFS_INODE_EXTREF_KEY) break; item_size = btrfs_item_size(leaf, slot); while (cur_offset < item_size) { u64 parent; u64 parent_gen; if (key.type == BTRFS_INODE_EXTREF_KEY) { unsigned long ptr; struct btrfs_inode_extref *extref; ptr = btrfs_item_ptr_offset(leaf, slot); extref = (struct btrfs_inode_extref *) (ptr + cur_offset); parent = btrfs_inode_extref_parent(leaf, extref); cur_offset += sizeof(*extref); cur_offset += btrfs_inode_extref_name_len(leaf, extref); } else { parent = key.offset; cur_offset = item_size; } ret = get_inode_gen(root, parent, &parent_gen); if (ret < 0) goto out; ret = check_ino_in_path(root, ino1, ino1_gen, parent, parent_gen, fs_path); if (ret) goto out; } } ret = 0; if (iter_ret < 0) ret = iter_ret; out: btrfs_free_path(path); if (free_fs_path) fs_path_free(fs_path); return ret; } static int wait_for_parent_move(struct send_ctx *sctx, struct recorded_ref *parent_ref, const bool is_orphan) { int ret = 0; u64 ino = parent_ref->dir; u64 ino_gen = parent_ref->dir_gen; u64 parent_ino_before, parent_ino_after; struct fs_path *path_before = NULL; struct fs_path *path_after = NULL; int len1, len2; path_after = fs_path_alloc(); path_before = fs_path_alloc(); if (!path_after || !path_before) { ret = -ENOMEM; goto out; } /* * Our current directory inode may not yet be renamed/moved because some * ancestor (immediate or not) has to be renamed/moved first. So find if * such ancestor exists and make sure our own rename/move happens after * that ancestor is processed to avoid path build infinite loops (done * at get_cur_path()). */ while (ino > BTRFS_FIRST_FREE_OBJECTID) { u64 parent_ino_after_gen; if (is_waiting_for_move(sctx, ino)) { /* * If the current inode is an ancestor of ino in the * parent root, we need to delay the rename of the * current inode, otherwise don't delayed the rename * because we can end up with a circular dependency * of renames, resulting in some directories never * getting the respective rename operations issued in * the send stream or getting into infinite path build * loops. */ ret = is_ancestor(sctx->parent_root, sctx->cur_ino, sctx->cur_inode_gen, ino, path_before); if (ret) break; } fs_path_reset(path_before); fs_path_reset(path_after); ret = get_first_ref(sctx->send_root, ino, &parent_ino_after, &parent_ino_after_gen, path_after); if (ret < 0) goto out; ret = get_first_ref(sctx->parent_root, ino, &parent_ino_before, NULL, path_before); if (ret < 0 && ret != -ENOENT) { goto out; } else if (ret == -ENOENT) { ret = 0; break; } len1 = fs_path_len(path_before); len2 = fs_path_len(path_after); if (ino > sctx->cur_ino && (parent_ino_before != parent_ino_after || len1 != len2 || memcmp(path_before->start, path_after->start, len1))) { u64 parent_ino_gen; ret = get_inode_gen(sctx->parent_root, ino, &parent_ino_gen); if (ret < 0) goto out; if (ino_gen == parent_ino_gen) { ret = 1; break; } } ino = parent_ino_after; ino_gen = parent_ino_after_gen; } out: fs_path_free(path_before); fs_path_free(path_after); if (ret == 1) { ret = add_pending_dir_move(sctx, sctx->cur_ino, sctx->cur_inode_gen, ino, &sctx->new_refs, &sctx->deleted_refs, is_orphan); if (!ret) ret = 1; } return ret; } static int update_ref_path(struct send_ctx *sctx, struct recorded_ref *ref) { int ret; struct fs_path *new_path; /* * Our reference's name member points to its full_path member string, so * we use here a new path. */ new_path = fs_path_alloc(); if (!new_path) return -ENOMEM; ret = get_cur_path(sctx, ref->dir, ref->dir_gen, new_path); if (ret < 0) { fs_path_free(new_path); return ret; } ret = fs_path_add(new_path, ref->name, ref->name_len); if (ret < 0) { fs_path_free(new_path); return ret; } fs_path_free(ref->full_path); set_ref_path(ref, new_path); return 0; } /* * When processing the new references for an inode we may orphanize an existing * directory inode because its old name conflicts with one of the new references * of the current inode. Later, when processing another new reference of our * inode, we might need to orphanize another inode, but the path we have in the * reference reflects the pre-orphanization name of the directory we previously * orphanized. For example: * * parent snapshot looks like: * * . (ino 256) * |----- f1 (ino 257) * |----- f2 (ino 258) * |----- d1/ (ino 259) * |----- d2/ (ino 260) * * send snapshot looks like: * * . (ino 256) * |----- d1 (ino 258) * |----- f2/ (ino 259) * |----- f2_link/ (ino 260) * | |----- f1 (ino 257) * | * |----- d2 (ino 258) * * When processing inode 257 we compute the name for inode 259 as "d1", and we * cache it in the name cache. Later when we start processing inode 258, when * collecting all its new references we set a full path of "d1/d2" for its new * reference with name "d2". When we start processing the new references we * start by processing the new reference with name "d1", and this results in * orphanizing inode 259, since its old reference causes a conflict. Then we * move on the next new reference, with name "d2", and we find out we must * orphanize inode 260, as its old reference conflicts with ours - but for the * orphanization we use a source path corresponding to the path we stored in the * new reference, which is "d1/d2" and not "o259-6-0/d2" - this makes the * receiver fail since the path component "d1/" no longer exists, it was renamed * to "o259-6-0/" when processing the previous new reference. So in this case we * must recompute the path in the new reference and use it for the new * orphanization operation. */ static int refresh_ref_path(struct send_ctx *sctx, struct recorded_ref *ref) { char *name; int ret; name = kmemdup(ref->name, ref->name_len, GFP_KERNEL); if (!name) return -ENOMEM; fs_path_reset(ref->full_path); ret = get_cur_path(sctx, ref->dir, ref->dir_gen, ref->full_path); if (ret < 0) goto out; ret = fs_path_add(ref->full_path, name, ref->name_len); if (ret < 0) goto out; /* Update the reference's base name pointer. */ set_ref_path(ref, ref->full_path); out: kfree(name); return ret; } /* * This does all the move/link/unlink/rmdir magic. */ static int process_recorded_refs(struct send_ctx *sctx, int *pending_move) { struct btrfs_fs_info *fs_info = sctx->send_root->fs_info; int ret = 0; struct recorded_ref *cur; struct recorded_ref *cur2; LIST_HEAD(check_dirs); struct fs_path *valid_path = NULL; u64 ow_inode = 0; u64 ow_gen; u64 ow_mode; int did_overwrite = 0; int is_orphan = 0; u64 last_dir_ino_rm = 0; bool can_rename = true; bool orphanized_dir = false; bool orphanized_ancestor = false; btrfs_debug(fs_info, "process_recorded_refs %llu", sctx->cur_ino); /* * This should never happen as the root dir always has the same ref * which is always '..' */ BUG_ON(sctx->cur_ino <= BTRFS_FIRST_FREE_OBJECTID); valid_path = fs_path_alloc(); if (!valid_path) { ret = -ENOMEM; goto out; } /* * First, check if the first ref of the current inode was overwritten * before. If yes, we know that the current inode was already orphanized * and thus use the orphan name. If not, we can use get_cur_path to * get the path of the first ref as it would like while receiving at * this point in time. * New inodes are always orphan at the beginning, so force to use the * orphan name in this case. * The first ref is stored in valid_path and will be updated if it * gets moved around. */ if (!sctx->cur_inode_new) { ret = did_overwrite_first_ref(sctx, sctx->cur_ino, sctx->cur_inode_gen); if (ret < 0) goto out; if (ret) did_overwrite = 1; } if (sctx->cur_inode_new || did_overwrite) { ret = gen_unique_name(sctx, sctx->cur_ino, sctx->cur_inode_gen, valid_path); if (ret < 0) goto out; is_orphan = 1; } else { ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, valid_path); if (ret < 0) goto out; } /* * Before doing any rename and link operations, do a first pass on the * new references to orphanize any unprocessed inodes that may have a * reference that conflicts with one of the new references of the current * inode. This needs to happen first because a new reference may conflict * with the old reference of a parent directory, so we must make sure * that the path used for link and rename commands don't use an * orphanized name when an ancestor was not yet orphanized. * * Example: * * Parent snapshot: * * . (ino 256) * |----- testdir/ (ino 259) * | |----- a (ino 257) * | * |----- b (ino 258) * * Send snapshot: * * . (ino 256) * |----- testdir_2/ (ino 259) * | |----- a (ino 260) * | * |----- testdir (ino 257) * |----- b (ino 257) * |----- b2 (ino 258) * * Processing the new reference for inode 257 with name "b" may happen * before processing the new reference with name "testdir". If so, we * must make sure that by the time we send a link command to create the * hard link "b", inode 259 was already orphanized, since the generated * path in "valid_path" already contains the orphanized name for 259. * We are processing inode 257, so only later when processing 259 we do * the rename operation to change its temporary (orphanized) name to * "testdir_2". */ list_for_each_entry(cur, &sctx->new_refs, list) { ret = get_cur_inode_state(sctx, cur->dir, cur->dir_gen, NULL, NULL); if (ret < 0) goto out; if (ret == inode_state_will_create) continue; /* * Check if this new ref would overwrite the first ref of another * unprocessed inode. If yes, orphanize the overwritten inode. * If we find an overwritten ref that is not the first ref, * simply unlink it. */ ret = will_overwrite_ref(sctx, cur->dir, cur->dir_gen, cur->name, cur->name_len, &ow_inode, &ow_gen, &ow_mode); if (ret < 0) goto out; if (ret) { ret = is_first_ref(sctx->parent_root, ow_inode, cur->dir, cur->name, cur->name_len); if (ret < 0) goto out; if (ret) { struct name_cache_entry *nce; struct waiting_dir_move *wdm; if (orphanized_dir) { ret = refresh_ref_path(sctx, cur); if (ret < 0) goto out; } ret = orphanize_inode(sctx, ow_inode, ow_gen, cur->full_path); if (ret < 0) goto out; if (S_ISDIR(ow_mode)) orphanized_dir = true; /* * If ow_inode has its rename operation delayed * make sure that its orphanized name is used in * the source path when performing its rename * operation. */ wdm = get_waiting_dir_move(sctx, ow_inode); if (wdm) wdm->orphanized = true; /* * Make sure we clear our orphanized inode's * name from the name cache. This is because the * inode ow_inode might be an ancestor of some * other inode that will be orphanized as well * later and has an inode number greater than * sctx->send_progress. We need to prevent * future name lookups from using the old name * and get instead the orphan name. */ nce = name_cache_search(sctx, ow_inode, ow_gen); if (nce) btrfs_lru_cache_remove(&sctx->name_cache, &nce->entry); /* * ow_inode might currently be an ancestor of * cur_ino, therefore compute valid_path (the * current path of cur_ino) again because it * might contain the pre-orphanization name of * ow_inode, which is no longer valid. */ ret = is_ancestor(sctx->parent_root, ow_inode, ow_gen, sctx->cur_ino, NULL); if (ret > 0) { orphanized_ancestor = true; fs_path_reset(valid_path); ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, valid_path); } if (ret < 0) goto out; } else { /* * If we previously orphanized a directory that * collided with a new reference that we already * processed, recompute the current path because * that directory may be part of the path. */ if (orphanized_dir) { ret = refresh_ref_path(sctx, cur); if (ret < 0) goto out; } ret = send_unlink(sctx, cur->full_path); if (ret < 0) goto out; } } } list_for_each_entry(cur, &sctx->new_refs, list) { /* * We may have refs where the parent directory does not exist * yet. This happens if the parent directories inum is higher * than the current inum. To handle this case, we create the * parent directory out of order. But we need to check if this * did already happen before due to other refs in the same dir. */ ret = get_cur_inode_state(sctx, cur->dir, cur->dir_gen, NULL, NULL); if (ret < 0) goto out; if (ret == inode_state_will_create) { ret = 0; /* * First check if any of the current inodes refs did * already create the dir. */ list_for_each_entry(cur2, &sctx->new_refs, list) { if (cur == cur2) break; if (cur2->dir == cur->dir) { ret = 1; break; } } /* * If that did not happen, check if a previous inode * did already create the dir. */ if (!ret) ret = did_create_dir(sctx, cur->dir); if (ret < 0) goto out; if (!ret) { ret = send_create_inode(sctx, cur->dir); if (ret < 0) goto out; cache_dir_created(sctx, cur->dir); } } if (S_ISDIR(sctx->cur_inode_mode) && sctx->parent_root) { ret = wait_for_dest_dir_move(sctx, cur, is_orphan); if (ret < 0) goto out; if (ret == 1) { can_rename = false; *pending_move = 1; } } if (S_ISDIR(sctx->cur_inode_mode) && sctx->parent_root && can_rename) { ret = wait_for_parent_move(sctx, cur, is_orphan); if (ret < 0) goto out; if (ret == 1) { can_rename = false; *pending_move = 1; } } /* * link/move the ref to the new place. If we have an orphan * inode, move it and update valid_path. If not, link or move * it depending on the inode mode. */ if (is_orphan && can_rename) { ret = send_rename(sctx, valid_path, cur->full_path); if (ret < 0) goto out; is_orphan = 0; ret = fs_path_copy(valid_path, cur->full_path); if (ret < 0) goto out; } else if (can_rename) { if (S_ISDIR(sctx->cur_inode_mode)) { /* * Dirs can't be linked, so move it. For moved * dirs, we always have one new and one deleted * ref. The deleted ref is ignored later. */ ret = send_rename(sctx, valid_path, cur->full_path); if (!ret) ret = fs_path_copy(valid_path, cur->full_path); if (ret < 0) goto out; } else { /* * We might have previously orphanized an inode * which is an ancestor of our current inode, * so our reference's full path, which was * computed before any such orphanizations, must * be updated. */ if (orphanized_dir) { ret = update_ref_path(sctx, cur); if (ret < 0) goto out; } ret = send_link(sctx, cur->full_path, valid_path); if (ret < 0) goto out; } } ret = dup_ref(cur, &check_dirs); if (ret < 0) goto out; } if (S_ISDIR(sctx->cur_inode_mode) && sctx->cur_inode_deleted) { /* * Check if we can already rmdir the directory. If not, * orphanize it. For every dir item inside that gets deleted * later, we do this check again and rmdir it then if possible. * See the use of check_dirs for more details. */ ret = can_rmdir(sctx, sctx->cur_ino, sctx->cur_inode_gen); if (ret < 0) goto out; if (ret) { ret = send_rmdir(sctx, valid_path); if (ret < 0) goto out; } else if (!is_orphan) { ret = orphanize_inode(sctx, sctx->cur_ino, sctx->cur_inode_gen, valid_path); if (ret < 0) goto out; is_orphan = 1; } list_for_each_entry(cur, &sctx->deleted_refs, list) { ret = dup_ref(cur, &check_dirs); if (ret < 0) goto out; } } else if (S_ISDIR(sctx->cur_inode_mode) && !list_empty(&sctx->deleted_refs)) { /* * We have a moved dir. Add the old parent to check_dirs */ cur = list_entry(sctx->deleted_refs.next, struct recorded_ref, list); ret = dup_ref(cur, &check_dirs); if (ret < 0) goto out; } else if (!S_ISDIR(sctx->cur_inode_mode)) { /* * We have a non dir inode. Go through all deleted refs and * unlink them if they were not already overwritten by other * inodes. */ list_for_each_entry(cur, &sctx->deleted_refs, list) { ret = did_overwrite_ref(sctx, cur->dir, cur->dir_gen, sctx->cur_ino, sctx->cur_inode_gen, cur->name, cur->name_len); if (ret < 0) goto out; if (!ret) { /* * If we orphanized any ancestor before, we need * to recompute the full path for deleted names, * since any such path was computed before we * processed any references and orphanized any * ancestor inode. */ if (orphanized_ancestor) { ret = update_ref_path(sctx, cur); if (ret < 0) goto out; } ret = send_unlink(sctx, cur->full_path); if (ret < 0) goto out; } ret = dup_ref(cur, &check_dirs); if (ret < 0) goto out; } /* * If the inode is still orphan, unlink the orphan. This may * happen when a previous inode did overwrite the first ref * of this inode and no new refs were added for the current * inode. Unlinking does not mean that the inode is deleted in * all cases. There may still be links to this inode in other * places. */ if (is_orphan) { ret = send_unlink(sctx, valid_path); if (ret < 0) goto out; } } /* * We did collect all parent dirs where cur_inode was once located. We * now go through all these dirs and check if they are pending for * deletion and if it's finally possible to perform the rmdir now. * We also update the inode stats of the parent dirs here. */ list_for_each_entry(cur, &check_dirs, list) { /* * In case we had refs into dirs that were not processed yet, * we don't need to do the utime and rmdir logic for these dirs. * The dir will be processed later. */ if (cur->dir > sctx->cur_ino) continue; ret = get_cur_inode_state(sctx, cur->dir, cur->dir_gen, NULL, NULL); if (ret < 0) goto out; if (ret == inode_state_did_create || ret == inode_state_no_change) { ret = cache_dir_utimes(sctx, cur->dir, cur->dir_gen); if (ret < 0) goto out; } else if (ret == inode_state_did_delete && cur->dir != last_dir_ino_rm) { ret = can_rmdir(sctx, cur->dir, cur->dir_gen); if (ret < 0) goto out; if (ret) { ret = get_cur_path(sctx, cur->dir, cur->dir_gen, valid_path); if (ret < 0) goto out; ret = send_rmdir(sctx, valid_path); if (ret < 0) goto out; last_dir_ino_rm = cur->dir; } } } ret = 0; out: __free_recorded_refs(&check_dirs); free_recorded_refs(sctx); fs_path_free(valid_path); return ret; } static int rbtree_ref_comp(const void *k, const struct rb_node *node) { const struct recorded_ref *data = k; const struct recorded_ref *ref = rb_entry(node, struct recorded_ref, node); int result; if (data->dir > ref->dir) return 1; if (data->dir < ref->dir) return -1; if (data->dir_gen > ref->dir_gen) return 1; if (data->dir_gen < ref->dir_gen) return -1; if (data->name_len > ref->name_len) return 1; if (data->name_len < ref->name_len) return -1; result = strcmp(data->name, ref->name); if (result > 0) return 1; if (result < 0) return -1; return 0; } static bool rbtree_ref_less(struct rb_node *node, const struct rb_node *parent) { const struct recorded_ref *entry = rb_entry(node, struct recorded_ref, node); return rbtree_ref_comp(entry, parent) < 0; } static int record_ref_in_tree(struct rb_root *root, struct list_head *refs, struct fs_path *name, u64 dir, u64 dir_gen, struct send_ctx *sctx) { int ret = 0; struct fs_path *path = NULL; struct recorded_ref *ref = NULL; path = fs_path_alloc(); if (!path) { ret = -ENOMEM; goto out; } ref = recorded_ref_alloc(); if (!ref) { ret = -ENOMEM; goto out; } ret = get_cur_path(sctx, dir, dir_gen, path); if (ret < 0) goto out; ret = fs_path_add_path(path, name); if (ret < 0) goto out; ref->dir = dir; ref->dir_gen = dir_gen; set_ref_path(ref, path); list_add_tail(&ref->list, refs); rb_add(&ref->node, root, rbtree_ref_less); ref->root = root; out: if (ret) { if (path && (!ref || !ref->full_path)) fs_path_free(path); recorded_ref_free(ref); } return ret; } static int record_new_ref_if_needed(int num, u64 dir, int index, struct fs_path *name, void *ctx) { int ret = 0; struct send_ctx *sctx = ctx; struct rb_node *node = NULL; struct recorded_ref data; struct recorded_ref *ref; u64 dir_gen; ret = get_inode_gen(sctx->send_root, dir, &dir_gen); if (ret < 0) goto out; data.dir = dir; data.dir_gen = dir_gen; set_ref_path(&data, name); node = rb_find(&data, &sctx->rbtree_deleted_refs, rbtree_ref_comp); if (node) { ref = rb_entry(node, struct recorded_ref, node); recorded_ref_free(ref); } else { ret = record_ref_in_tree(&sctx->rbtree_new_refs, &sctx->new_refs, name, dir, dir_gen, sctx); } out: return ret; } static int record_deleted_ref_if_needed(int num, u64 dir, int index, struct fs_path *name, void *ctx) { int ret = 0; struct send_ctx *sctx = ctx; struct rb_node *node = NULL; struct recorded_ref data; struct recorded_ref *ref; u64 dir_gen; ret = get_inode_gen(sctx->parent_root, dir, &dir_gen); if (ret < 0) goto out; data.dir = dir; data.dir_gen = dir_gen; set_ref_path(&data, name); node = rb_find(&data, &sctx->rbtree_new_refs, rbtree_ref_comp); if (node) { ref = rb_entry(node, struct recorded_ref, node); recorded_ref_free(ref); } else { ret = record_ref_in_tree(&sctx->rbtree_deleted_refs, &sctx->deleted_refs, name, dir, dir_gen, sctx); } out: return ret; } static int record_new_ref(struct send_ctx *sctx) { int ret; ret = iterate_inode_ref(sctx->send_root, sctx->left_path, sctx->cmp_key, 0, record_new_ref_if_needed, sctx); if (ret < 0) goto out; ret = 0; out: return ret; } static int record_deleted_ref(struct send_ctx *sctx) { int ret; ret = iterate_inode_ref(sctx->parent_root, sctx->right_path, sctx->cmp_key, 0, record_deleted_ref_if_needed, sctx); if (ret < 0) goto out; ret = 0; out: return ret; } static int record_changed_ref(struct send_ctx *sctx) { int ret = 0; ret = iterate_inode_ref(sctx->send_root, sctx->left_path, sctx->cmp_key, 0, record_new_ref_if_needed, sctx); if (ret < 0) goto out; ret = iterate_inode_ref(sctx->parent_root, sctx->right_path, sctx->cmp_key, 0, record_deleted_ref_if_needed, sctx); if (ret < 0) goto out; ret = 0; out: return ret; } /* * Record and process all refs at once. Needed when an inode changes the * generation number, which means that it was deleted and recreated. */ static int process_all_refs(struct send_ctx *sctx, enum btrfs_compare_tree_result cmd) { int ret = 0; int iter_ret = 0; struct btrfs_root *root; struct btrfs_path *path; struct btrfs_key key; struct btrfs_key found_key; iterate_inode_ref_t cb; int pending_move = 0; path = alloc_path_for_send(); if (!path) return -ENOMEM; if (cmd == BTRFS_COMPARE_TREE_NEW) { root = sctx->send_root; cb = record_new_ref_if_needed; } else if (cmd == BTRFS_COMPARE_TREE_DELETED) { root = sctx->parent_root; cb = record_deleted_ref_if_needed; } else { btrfs_err(sctx->send_root->fs_info, "Wrong command %d in process_all_refs", cmd); ret = -EINVAL; goto out; } key.objectid = sctx->cmp_key->objectid; key.type = BTRFS_INODE_REF_KEY; key.offset = 0; btrfs_for_each_slot(root, &key, &found_key, path, iter_ret) { if (found_key.objectid != key.objectid || (found_key.type != BTRFS_INODE_REF_KEY && found_key.type != BTRFS_INODE_EXTREF_KEY)) break; ret = iterate_inode_ref(root, path, &found_key, 0, cb, sctx); if (ret < 0) goto out; } /* Catch error found during iteration */ if (iter_ret < 0) { ret = iter_ret; goto out; } btrfs_release_path(path); /* * We don't actually care about pending_move as we are simply * re-creating this inode and will be rename'ing it into place once we * rename the parent directory. */ ret = process_recorded_refs(sctx, &pending_move); out: btrfs_free_path(path); return ret; } static int send_set_xattr(struct send_ctx *sctx, struct fs_path *path, const char *name, int name_len, const char *data, int data_len) { int ret = 0; ret = begin_cmd(sctx, BTRFS_SEND_C_SET_XATTR); if (ret < 0) goto out; TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path); TLV_PUT_STRING(sctx, BTRFS_SEND_A_XATTR_NAME, name, name_len); TLV_PUT(sctx, BTRFS_SEND_A_XATTR_DATA, data, data_len); ret = send_cmd(sctx); tlv_put_failure: out: return ret; } static int send_remove_xattr(struct send_ctx *sctx, struct fs_path *path, const char *name, int name_len) { int ret = 0; ret = begin_cmd(sctx, BTRFS_SEND_C_REMOVE_XATTR); if (ret < 0) goto out; TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path); TLV_PUT_STRING(sctx, BTRFS_SEND_A_XATTR_NAME, name, name_len); ret = send_cmd(sctx); tlv_put_failure: out: return ret; } static int __process_new_xattr(int num, struct btrfs_key *di_key, const char *name, int name_len, const char *data, int data_len, void *ctx) { int ret; struct send_ctx *sctx = ctx; struct fs_path *p; struct posix_acl_xattr_header dummy_acl; /* Capabilities are emitted by finish_inode_if_needed */ if (!strncmp(name, XATTR_NAME_CAPS, name_len)) return 0; p = fs_path_alloc(); if (!p) return -ENOMEM; /* * This hack is needed because empty acls are stored as zero byte * data in xattrs. Problem with that is, that receiving these zero byte * acls will fail later. To fix this, we send a dummy acl list that * only contains the version number and no entries. */ if (!strncmp(name, XATTR_NAME_POSIX_ACL_ACCESS, name_len) || !strncmp(name, XATTR_NAME_POSIX_ACL_DEFAULT, name_len)) { if (data_len == 0) { dummy_acl.a_version = cpu_to_le32(POSIX_ACL_XATTR_VERSION); data = (char *)&dummy_acl; data_len = sizeof(dummy_acl); } } ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p); if (ret < 0) goto out; ret = send_set_xattr(sctx, p, name, name_len, data, data_len); out: fs_path_free(p); return ret; } static int __process_deleted_xattr(int num, struct btrfs_key *di_key, const char *name, int name_len, const char *data, int data_len, void *ctx) { int ret; struct send_ctx *sctx = ctx; struct fs_path *p; p = fs_path_alloc(); if (!p) return -ENOMEM; ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p); if (ret < 0) goto out; ret = send_remove_xattr(sctx, p, name, name_len); out: fs_path_free(p); return ret; } static int process_new_xattr(struct send_ctx *sctx) { int ret = 0; ret = iterate_dir_item(sctx->send_root, sctx->left_path, __process_new_xattr, sctx); return ret; } static int process_deleted_xattr(struct send_ctx *sctx) { return iterate_dir_item(sctx->parent_root, sctx->right_path, __process_deleted_xattr, sctx); } struct find_xattr_ctx { const char *name; int name_len; int found_idx; char *found_data; int found_data_len; }; static int __find_xattr(int num, struct btrfs_key *di_key, const char *name, int name_len, const char *data, int data_len, void *vctx) { struct find_xattr_ctx *ctx = vctx; if (name_len == ctx->name_len && strncmp(name, ctx->name, name_len) == 0) { ctx->found_idx = num; ctx->found_data_len = data_len; ctx->found_data = kmemdup(data, data_len, GFP_KERNEL); if (!ctx->found_data) return -ENOMEM; return 1; } return 0; } static int find_xattr(struct btrfs_root *root, struct btrfs_path *path, struct btrfs_key *key, const char *name, int name_len, char **data, int *data_len) { int ret; struct find_xattr_ctx ctx; ctx.name = name; ctx.name_len = name_len; ctx.found_idx = -1; ctx.found_data = NULL; ctx.found_data_len = 0; ret = iterate_dir_item(root, path, __find_xattr, &ctx); if (ret < 0) return ret; if (ctx.found_idx == -1) return -ENOENT; if (data) { *data = ctx.found_data; *data_len = ctx.found_data_len; } else { kfree(ctx.found_data); } return ctx.found_idx; } static int __process_changed_new_xattr(int num, struct btrfs_key *di_key, const char *name, int name_len, const char *data, int data_len, void *ctx) { int ret; struct send_ctx *sctx = ctx; char *found_data = NULL; int found_data_len = 0; ret = find_xattr(sctx->parent_root, sctx->right_path, sctx->cmp_key, name, name_len, &found_data, &found_data_len); if (ret == -ENOENT) { ret = __process_new_xattr(num, di_key, name, name_len, data, data_len, ctx); } else if (ret >= 0) { if (data_len != found_data_len || memcmp(data, found_data, data_len)) { ret = __process_new_xattr(num, di_key, name, name_len, data, data_len, ctx); } else { ret = 0; } } kfree(found_data); return ret; } static int __process_changed_deleted_xattr(int num, struct btrfs_key *di_key, const char *name, int name_len, const char *data, int data_len, void *ctx) { int ret; struct send_ctx *sctx = ctx; ret = find_xattr(sctx->send_root, sctx->left_path, sctx->cmp_key, name, name_len, NULL, NULL); if (ret == -ENOENT) ret = __process_deleted_xattr(num, di_key, name, name_len, data, data_len, ctx); else if (ret >= 0) ret = 0; return ret; } static int process_changed_xattr(struct send_ctx *sctx) { int ret = 0; ret = iterate_dir_item(sctx->send_root, sctx->left_path, __process_changed_new_xattr, sctx); if (ret < 0) goto out; ret = iterate_dir_item(sctx->parent_root, sctx->right_path, __process_changed_deleted_xattr, sctx); out: return ret; } static int process_all_new_xattrs(struct send_ctx *sctx) { int ret = 0; int iter_ret = 0; struct btrfs_root *root; struct btrfs_path *path; struct btrfs_key key; struct btrfs_key found_key; path = alloc_path_for_send(); if (!path) return -ENOMEM; root = sctx->send_root; key.objectid = sctx->cmp_key->objectid; key.type = BTRFS_XATTR_ITEM_KEY; key.offset = 0; btrfs_for_each_slot(root, &key, &found_key, path, iter_ret) { if (found_key.objectid != key.objectid || found_key.type != key.type) { ret = 0; break; } ret = iterate_dir_item(root, path, __process_new_xattr, sctx); if (ret < 0) break; } /* Catch error found during iteration */ if (iter_ret < 0) ret = iter_ret; btrfs_free_path(path); return ret; } static int send_verity(struct send_ctx *sctx, struct fs_path *path, struct fsverity_descriptor *desc) { int ret; ret = begin_cmd(sctx, BTRFS_SEND_C_ENABLE_VERITY); if (ret < 0) goto out; TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path); TLV_PUT_U8(sctx, BTRFS_SEND_A_VERITY_ALGORITHM, le8_to_cpu(desc->hash_algorithm)); TLV_PUT_U32(sctx, BTRFS_SEND_A_VERITY_BLOCK_SIZE, 1U << le8_to_cpu(desc->log_blocksize)); TLV_PUT(sctx, BTRFS_SEND_A_VERITY_SALT_DATA, desc->salt, le8_to_cpu(desc->salt_size)); TLV_PUT(sctx, BTRFS_SEND_A_VERITY_SIG_DATA, desc->signature, le32_to_cpu(desc->sig_size)); ret = send_cmd(sctx); tlv_put_failure: out: return ret; } static int process_verity(struct send_ctx *sctx) { int ret = 0; struct btrfs_fs_info *fs_info = sctx->send_root->fs_info; struct inode *inode; struct fs_path *p; inode = btrfs_iget(fs_info->sb, sctx->cur_ino, sctx->send_root); if (IS_ERR(inode)) return PTR_ERR(inode); ret = btrfs_get_verity_descriptor(inode, NULL, 0); if (ret < 0) goto iput; if (ret > FS_VERITY_MAX_DESCRIPTOR_SIZE) { ret = -EMSGSIZE; goto iput; } if (!sctx->verity_descriptor) { sctx->verity_descriptor = kvmalloc(FS_VERITY_MAX_DESCRIPTOR_SIZE, GFP_KERNEL); if (!sctx->verity_descriptor) { ret = -ENOMEM; goto iput; } } ret = btrfs_get_verity_descriptor(inode, sctx->verity_descriptor, ret); if (ret < 0) goto iput; p = fs_path_alloc(); if (!p) { ret = -ENOMEM; goto iput; } ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p); if (ret < 0) goto free_path; ret = send_verity(sctx, p, sctx->verity_descriptor); if (ret < 0) goto free_path; free_path: fs_path_free(p); iput: iput(inode); return ret; } static inline u64 max_send_read_size(const struct send_ctx *sctx) { return sctx->send_max_size - SZ_16K; } static int put_data_header(struct send_ctx *sctx, u32 len) { if (WARN_ON_ONCE(sctx->put_data)) return -EINVAL; sctx->put_data = true; if (sctx->proto >= 2) { /* * Since v2, the data attribute header doesn't include a length, * it is implicitly to the end of the command. */ if (sctx->send_max_size - sctx->send_size < sizeof(__le16) + len) return -EOVERFLOW; put_unaligned_le16(BTRFS_SEND_A_DATA, sctx->send_buf + sctx->send_size); sctx->send_size += sizeof(__le16); } else { struct btrfs_tlv_header *hdr; if (sctx->send_max_size - sctx->send_size < sizeof(*hdr) + len) return -EOVERFLOW; hdr = (struct btrfs_tlv_header *)(sctx->send_buf + sctx->send_size); put_unaligned_le16(BTRFS_SEND_A_DATA, &hdr->tlv_type); put_unaligned_le16(len, &hdr->tlv_len); sctx->send_size += sizeof(*hdr); } return 0; } static int put_file_data(struct send_ctx *sctx, u64 offset, u32 len) { struct btrfs_root *root = sctx->send_root; struct btrfs_fs_info *fs_info = root->fs_info; struct page *page; pgoff_t index = offset >> PAGE_SHIFT; pgoff_t last_index; unsigned pg_offset = offset_in_page(offset); int ret; ret = put_data_header(sctx, len); if (ret) return ret; last_index = (offset + len - 1) >> PAGE_SHIFT; while (index <= last_index) { unsigned cur_len = min_t(unsigned, len, PAGE_SIZE - pg_offset); page = find_lock_page(sctx->cur_inode->i_mapping, index); if (!page) { page_cache_sync_readahead(sctx->cur_inode->i_mapping, &sctx->ra, NULL, index, last_index + 1 - index); page = find_or_create_page(sctx->cur_inode->i_mapping, index, GFP_KERNEL); if (!page) { ret = -ENOMEM; break; } } if (PageReadahead(page)) page_cache_async_readahead(sctx->cur_inode->i_mapping, &sctx->ra, NULL, page_folio(page), index, last_index + 1 - index); if (!PageUptodate(page)) { btrfs_read_folio(NULL, page_folio(page)); lock_page(page); if (!PageUptodate(page)) { unlock_page(page); btrfs_err(fs_info, "send: IO error at offset %llu for inode %llu root %llu", page_offset(page), sctx->cur_ino, sctx->send_root->root_key.objectid); put_page(page); ret = -EIO; break; } } memcpy_from_page(sctx->send_buf + sctx->send_size, page, pg_offset, cur_len); unlock_page(page); put_page(page); index++; pg_offset = 0; len -= cur_len; sctx->send_size += cur_len; } return ret; } /* * Read some bytes from the current inode/file and send a write command to * user space. */ static int send_write(struct send_ctx *sctx, u64 offset, u32 len) { struct btrfs_fs_info *fs_info = sctx->send_root->fs_info; int ret = 0; struct fs_path *p; p = fs_path_alloc(); if (!p) return -ENOMEM; btrfs_debug(fs_info, "send_write offset=%llu, len=%d", offset, len); ret = begin_cmd(sctx, BTRFS_SEND_C_WRITE); if (ret < 0) goto out; ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p); if (ret < 0) goto out; TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p); TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset); ret = put_file_data(sctx, offset, len); if (ret < 0) goto out; ret = send_cmd(sctx); tlv_put_failure: out: fs_path_free(p); return ret; } /* * Send a clone command to user space. */ static int send_clone(struct send_ctx *sctx, u64 offset, u32 len, struct clone_root *clone_root) { int ret = 0; struct fs_path *p; u64 gen; btrfs_debug(sctx->send_root->fs_info, "send_clone offset=%llu, len=%d, clone_root=%llu, clone_inode=%llu, clone_offset=%llu", offset, len, clone_root->root->root_key.objectid, clone_root->ino, clone_root->offset); p = fs_path_alloc(); if (!p) return -ENOMEM; ret = begin_cmd(sctx, BTRFS_SEND_C_CLONE); if (ret < 0) goto out; ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p); if (ret < 0) goto out; TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset); TLV_PUT_U64(sctx, BTRFS_SEND_A_CLONE_LEN, len); TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p); if (clone_root->root == sctx->send_root) { ret = get_inode_gen(sctx->send_root, clone_root->ino, &gen); if (ret < 0) goto out; ret = get_cur_path(sctx, clone_root->ino, gen, p); } else { ret = get_inode_path(clone_root->root, clone_root->ino, p); } if (ret < 0) goto out; /* * If the parent we're using has a received_uuid set then use that as * our clone source as that is what we will look for when doing a * receive. * * This covers the case that we create a snapshot off of a received * subvolume and then use that as the parent and try to receive on a * different host. */ if (!btrfs_is_empty_uuid(clone_root->root->root_item.received_uuid)) TLV_PUT_UUID(sctx, BTRFS_SEND_A_CLONE_UUID, clone_root->root->root_item.received_uuid); else TLV_PUT_UUID(sctx, BTRFS_SEND_A_CLONE_UUID, clone_root->root->root_item.uuid); TLV_PUT_U64(sctx, BTRFS_SEND_A_CLONE_CTRANSID, btrfs_root_ctransid(&clone_root->root->root_item)); TLV_PUT_PATH(sctx, BTRFS_SEND_A_CLONE_PATH, p); TLV_PUT_U64(sctx, BTRFS_SEND_A_CLONE_OFFSET, clone_root->offset); ret = send_cmd(sctx); tlv_put_failure: out: fs_path_free(p); return ret; } /* * Send an update extent command to user space. */ static int send_update_extent(struct send_ctx *sctx, u64 offset, u32 len) { int ret = 0; struct fs_path *p; p = fs_path_alloc(); if (!p) return -ENOMEM; ret = begin_cmd(sctx, BTRFS_SEND_C_UPDATE_EXTENT); if (ret < 0) goto out; ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p); if (ret < 0) goto out; TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p); TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset); TLV_PUT_U64(sctx, BTRFS_SEND_A_SIZE, len); ret = send_cmd(sctx); tlv_put_failure: out: fs_path_free(p); return ret; } static int send_hole(struct send_ctx *sctx, u64 end) { struct fs_path *p = NULL; u64 read_size = max_send_read_size(sctx); u64 offset = sctx->cur_inode_last_extent; int ret = 0; /* * A hole that starts at EOF or beyond it. Since we do not yet support * fallocate (for extent preallocation and hole punching), sending a * write of zeroes starting at EOF or beyond would later require issuing * a truncate operation which would undo the write and achieve nothing. */ if (offset >= sctx->cur_inode_size) return 0; /* * Don't go beyond the inode's i_size due to prealloc extents that start * after the i_size. */ end = min_t(u64, end, sctx->cur_inode_size); if (sctx->flags & BTRFS_SEND_FLAG_NO_FILE_DATA) return send_update_extent(sctx, offset, end - offset); p = fs_path_alloc(); if (!p) return -ENOMEM; ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p); if (ret < 0) goto tlv_put_failure; while (offset < end) { u64 len = min(end - offset, read_size); ret = begin_cmd(sctx, BTRFS_SEND_C_WRITE); if (ret < 0) break; TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p); TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset); ret = put_data_header(sctx, len); if (ret < 0) break; memset(sctx->send_buf + sctx->send_size, 0, len); sctx->send_size += len; ret = send_cmd(sctx); if (ret < 0) break; offset += len; } sctx->cur_inode_next_write_offset = offset; tlv_put_failure: fs_path_free(p); return ret; } static int send_encoded_inline_extent(struct send_ctx *sctx, struct btrfs_path *path, u64 offset, u64 len) { struct btrfs_root *root = sctx->send_root; struct btrfs_fs_info *fs_info = root->fs_info; struct inode *inode; struct fs_path *fspath; struct extent_buffer *leaf = path->nodes[0]; struct btrfs_key key; struct btrfs_file_extent_item *ei; u64 ram_bytes; size_t inline_size; int ret; inode = btrfs_iget(fs_info->sb, sctx->cur_ino, root); if (IS_ERR(inode)) return PTR_ERR(inode); fspath = fs_path_alloc(); if (!fspath) { ret = -ENOMEM; goto out; } ret = begin_cmd(sctx, BTRFS_SEND_C_ENCODED_WRITE); if (ret < 0) goto out; ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, fspath); if (ret < 0) goto out; btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_file_extent_item); ram_bytes = btrfs_file_extent_ram_bytes(leaf, ei); inline_size = btrfs_file_extent_inline_item_len(leaf, path->slots[0]); TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, fspath); TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset); TLV_PUT_U64(sctx, BTRFS_SEND_A_UNENCODED_FILE_LEN, min(key.offset + ram_bytes - offset, len)); TLV_PUT_U64(sctx, BTRFS_SEND_A_UNENCODED_LEN, ram_bytes); TLV_PUT_U64(sctx, BTRFS_SEND_A_UNENCODED_OFFSET, offset - key.offset); ret = btrfs_encoded_io_compression_from_extent(fs_info, btrfs_file_extent_compression(leaf, ei)); if (ret < 0) goto out; TLV_PUT_U32(sctx, BTRFS_SEND_A_COMPRESSION, ret); ret = put_data_header(sctx, inline_size); if (ret < 0) goto out; read_extent_buffer(leaf, sctx->send_buf + sctx->send_size, btrfs_file_extent_inline_start(ei), inline_size); sctx->send_size += inline_size; ret = send_cmd(sctx); tlv_put_failure: out: fs_path_free(fspath); iput(inode); return ret; } static int send_encoded_extent(struct send_ctx *sctx, struct btrfs_path *path, u64 offset, u64 len) { struct btrfs_root *root = sctx->send_root; struct btrfs_fs_info *fs_info = root->fs_info; struct inode *inode; struct fs_path *fspath; struct extent_buffer *leaf = path->nodes[0]; struct btrfs_key key; struct btrfs_file_extent_item *ei; u64 disk_bytenr, disk_num_bytes; u32 data_offset; struct btrfs_cmd_header *hdr; u32 crc; int ret; inode = btrfs_iget(fs_info->sb, sctx->cur_ino, root); if (IS_ERR(inode)) return PTR_ERR(inode); fspath = fs_path_alloc(); if (!fspath) { ret = -ENOMEM; goto out; } ret = begin_cmd(sctx, BTRFS_SEND_C_ENCODED_WRITE); if (ret < 0) goto out; ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, fspath); if (ret < 0) goto out; btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_file_extent_item); disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, ei); disk_num_bytes = btrfs_file_extent_disk_num_bytes(leaf, ei); TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, fspath); TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset); TLV_PUT_U64(sctx, BTRFS_SEND_A_UNENCODED_FILE_LEN, min(key.offset + btrfs_file_extent_num_bytes(leaf, ei) - offset, len)); TLV_PUT_U64(sctx, BTRFS_SEND_A_UNENCODED_LEN, btrfs_file_extent_ram_bytes(leaf, ei)); TLV_PUT_U64(sctx, BTRFS_SEND_A_UNENCODED_OFFSET, offset - key.offset + btrfs_file_extent_offset(leaf, ei)); ret = btrfs_encoded_io_compression_from_extent(fs_info, btrfs_file_extent_compression(leaf, ei)); if (ret < 0) goto out; TLV_PUT_U32(sctx, BTRFS_SEND_A_COMPRESSION, ret); TLV_PUT_U32(sctx, BTRFS_SEND_A_ENCRYPTION, 0); ret = put_data_header(sctx, disk_num_bytes); if (ret < 0) goto out; /* * We want to do I/O directly into the send buffer, so get the next page * boundary in the send buffer. This means that there may be a gap * between the beginning of the command and the file data. */ data_offset = PAGE_ALIGN(sctx->send_size); if (data_offset > sctx->send_max_size || sctx->send_max_size - data_offset < disk_num_bytes) { ret = -EOVERFLOW; goto out; } /* * Note that send_buf is a mapping of send_buf_pages, so this is really * reading into send_buf. */ ret = btrfs_encoded_read_regular_fill_pages(BTRFS_I(inode), offset, disk_bytenr, disk_num_bytes, sctx->send_buf_pages + (data_offset >> PAGE_SHIFT)); if (ret) goto out; hdr = (struct btrfs_cmd_header *)sctx->send_buf; hdr->len = cpu_to_le32(sctx->send_size + disk_num_bytes - sizeof(*hdr)); hdr->crc = 0; crc = crc32c(0, sctx->send_buf, sctx->send_size); crc = crc32c(crc, sctx->send_buf + data_offset, disk_num_bytes); hdr->crc = cpu_to_le32(crc); ret = write_buf(sctx->send_filp, sctx->send_buf, sctx->send_size, &sctx->send_off); if (!ret) { ret = write_buf(sctx->send_filp, sctx->send_buf + data_offset, disk_num_bytes, &sctx->send_off); } sctx->send_size = 0; sctx->put_data = false; tlv_put_failure: out: fs_path_free(fspath); iput(inode); return ret; } static int send_extent_data(struct send_ctx *sctx, struct btrfs_path *path, const u64 offset, const u64 len) { const u64 end = offset + len; struct extent_buffer *leaf = path->nodes[0]; struct btrfs_file_extent_item *ei; u64 read_size = max_send_read_size(sctx); u64 sent = 0; if (sctx->flags & BTRFS_SEND_FLAG_NO_FILE_DATA) return send_update_extent(sctx, offset, len); ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_file_extent_item); if ((sctx->flags & BTRFS_SEND_FLAG_COMPRESSED) && btrfs_file_extent_compression(leaf, ei) != BTRFS_COMPRESS_NONE) { bool is_inline = (btrfs_file_extent_type(leaf, ei) == BTRFS_FILE_EXTENT_INLINE); /* * Send the compressed extent unless the compressed data is * larger than the decompressed data. This can happen if we're * not sending the entire extent, either because it has been * partially overwritten/truncated or because this is a part of * the extent that we couldn't clone in clone_range(). */ if (is_inline && btrfs_file_extent_inline_item_len(leaf, path->slots[0]) <= len) { return send_encoded_inline_extent(sctx, path, offset, len); } else if (!is_inline && btrfs_file_extent_disk_num_bytes(leaf, ei) <= len) { return send_encoded_extent(sctx, path, offset, len); } } if (sctx->cur_inode == NULL) { struct btrfs_root *root = sctx->send_root; sctx->cur_inode = btrfs_iget(root->fs_info->sb, sctx->cur_ino, root); if (IS_ERR(sctx->cur_inode)) { int err = PTR_ERR(sctx->cur_inode); sctx->cur_inode = NULL; return err; } memset(&sctx->ra, 0, sizeof(struct file_ra_state)); file_ra_state_init(&sctx->ra, sctx->cur_inode->i_mapping); /* * It's very likely there are no pages from this inode in the page * cache, so after reading extents and sending their data, we clean * the page cache to avoid trashing the page cache (adding pressure * to the page cache and forcing eviction of other data more useful * for applications). * * We decide if we should clean the page cache simply by checking * if the inode's mapping nrpages is 0 when we first open it, and * not by using something like filemap_range_has_page() before * reading an extent because when we ask the readahead code to * read a given file range, it may (and almost always does) read * pages from beyond that range (see the documentation for * page_cache_sync_readahead()), so it would not be reliable, * because after reading the first extent future calls to * filemap_range_has_page() would return true because the readahead * on the previous extent resulted in reading pages of the current * extent as well. */ sctx->clean_page_cache = (sctx->cur_inode->i_mapping->nrpages == 0); sctx->page_cache_clear_start = round_down(offset, PAGE_SIZE); } while (sent < len) { u64 size = min(len - sent, read_size); int ret; ret = send_write(sctx, offset + sent, size); if (ret < 0) return ret; sent += size; } if (sctx->clean_page_cache && PAGE_ALIGNED(end)) { /* * Always operate only on ranges that are a multiple of the page * size. This is not only to prevent zeroing parts of a page in * the case of subpage sector size, but also to guarantee we evict * pages, as passing a range that is smaller than page size does * not evict the respective page (only zeroes part of its content). * * Always start from the end offset of the last range cleared. * This is because the readahead code may (and very often does) * reads pages beyond the range we request for readahead. So if * we have an extent layout like this: * * [ extent A ] [ extent B ] [ extent C ] * * When we ask page_cache_sync_readahead() to read extent A, it * may also trigger reads for pages of extent B. If we are doing * an incremental send and extent B has not changed between the * parent and send snapshots, some or all of its pages may end * up being read and placed in the page cache. So when truncating * the page cache we always start from the end offset of the * previously processed extent up to the end of the current * extent. */ truncate_inode_pages_range(&sctx->cur_inode->i_data, sctx->page_cache_clear_start, end - 1); sctx->page_cache_clear_start = end; } return 0; } /* * Search for a capability xattr related to sctx->cur_ino. If the capability is * found, call send_set_xattr function to emit it. * * Return 0 if there isn't a capability, or when the capability was emitted * successfully, or < 0 if an error occurred. */ static int send_capabilities(struct send_ctx *sctx) { struct fs_path *fspath = NULL; struct btrfs_path *path; struct btrfs_dir_item *di; struct extent_buffer *leaf; unsigned long data_ptr; char *buf = NULL; int buf_len; int ret = 0; path = alloc_path_for_send(); if (!path) return -ENOMEM; di = btrfs_lookup_xattr(NULL, sctx->send_root, path, sctx->cur_ino, XATTR_NAME_CAPS, strlen(XATTR_NAME_CAPS), 0); if (!di) { /* There is no xattr for this inode */ goto out; } else if (IS_ERR(di)) { ret = PTR_ERR(di); goto out; } leaf = path->nodes[0]; buf_len = btrfs_dir_data_len(leaf, di); fspath = fs_path_alloc(); buf = kmalloc(buf_len, GFP_KERNEL); if (!fspath || !buf) { ret = -ENOMEM; goto out; } ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, fspath); if (ret < 0) goto out; data_ptr = (unsigned long)(di + 1) + btrfs_dir_name_len(leaf, di); read_extent_buffer(leaf, buf, data_ptr, buf_len); ret = send_set_xattr(sctx, fspath, XATTR_NAME_CAPS, strlen(XATTR_NAME_CAPS), buf, buf_len); out: kfree(buf); fs_path_free(fspath); btrfs_free_path(path); return ret; } static int clone_range(struct send_ctx *sctx, struct btrfs_path *dst_path, struct clone_root *clone_root, const u64 disk_byte, u64 data_offset, u64 offset, u64 len) { struct btrfs_path *path; struct btrfs_key key; int ret; struct btrfs_inode_info info; u64 clone_src_i_size = 0; /* * Prevent cloning from a zero offset with a length matching the sector * size because in some scenarios this will make the receiver fail. * * For example, if in the source filesystem the extent at offset 0 * has a length of sectorsize and it was written using direct IO, then * it can never be an inline extent (even if compression is enabled). * Then this extent can be cloned in the original filesystem to a non * zero file offset, but it may not be possible to clone in the * destination filesystem because it can be inlined due to compression * on the destination filesystem (as the receiver's write operations are * always done using buffered IO). The same happens when the original * filesystem does not have compression enabled but the destination * filesystem has. */ if (clone_root->offset == 0 && len == sctx->send_root->fs_info->sectorsize) return send_extent_data(sctx, dst_path, offset, len); path = alloc_path_for_send(); if (!path) return -ENOMEM; /* * There are inodes that have extents that lie behind its i_size. Don't * accept clones from these extents. */ ret = get_inode_info(clone_root->root, clone_root->ino, &info); btrfs_release_path(path); if (ret < 0) goto out; clone_src_i_size = info.size; /* * We can't send a clone operation for the entire range if we find * extent items in the respective range in the source file that * refer to different extents or if we find holes. * So check for that and do a mix of clone and regular write/copy * operations if needed. * * Example: * * mkfs.btrfs -f /dev/sda * mount /dev/sda /mnt * xfs_io -f -c "pwrite -S 0xaa 0K 100K" /mnt/foo * cp --reflink=always /mnt/foo /mnt/bar * xfs_io -c "pwrite -S 0xbb 50K 50K" /mnt/foo * btrfs subvolume snapshot -r /mnt /mnt/snap * * If when we send the snapshot and we are processing file bar (which * has a higher inode number than foo) we blindly send a clone operation * for the [0, 100K[ range from foo to bar, the receiver ends up getting * a file bar that matches the content of file foo - iow, doesn't match * the content from bar in the original filesystem. */ key.objectid = clone_root->ino; key.type = BTRFS_EXTENT_DATA_KEY; key.offset = clone_root->offset; ret = btrfs_search_slot(NULL, clone_root->root, &key, path, 0, 0); if (ret < 0) goto out; if (ret > 0 && path->slots[0] > 0) { btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0] - 1); if (key.objectid == clone_root->ino && key.type == BTRFS_EXTENT_DATA_KEY) path->slots[0]--; } while (true) { struct extent_buffer *leaf = path->nodes[0]; int slot = path->slots[0]; struct btrfs_file_extent_item *ei; u8 type; u64 ext_len; u64 clone_len; u64 clone_data_offset; bool crossed_src_i_size = false; if (slot >= btrfs_header_nritems(leaf)) { ret = btrfs_next_leaf(clone_root->root, path); if (ret < 0) goto out; else if (ret > 0) break; continue; } btrfs_item_key_to_cpu(leaf, &key, slot); /* * We might have an implicit trailing hole (NO_HOLES feature * enabled). We deal with it after leaving this loop. */ if (key.objectid != clone_root->ino || key.type != BTRFS_EXTENT_DATA_KEY) break; ei = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item); type = btrfs_file_extent_type(leaf, ei); if (type == BTRFS_FILE_EXTENT_INLINE) { ext_len = btrfs_file_extent_ram_bytes(leaf, ei); ext_len = PAGE_ALIGN(ext_len); } else { ext_len = btrfs_file_extent_num_bytes(leaf, ei); } if (key.offset + ext_len <= clone_root->offset) goto next; if (key.offset > clone_root->offset) { /* Implicit hole, NO_HOLES feature enabled. */ u64 hole_len = key.offset - clone_root->offset; if (hole_len > len) hole_len = len; ret = send_extent_data(sctx, dst_path, offset, hole_len); if (ret < 0) goto out; len -= hole_len; if (len == 0) break; offset += hole_len; clone_root->offset += hole_len; data_offset += hole_len; } if (key.offset >= clone_root->offset + len) break; if (key.offset >= clone_src_i_size) break; if (key.offset + ext_len > clone_src_i_size) { ext_len = clone_src_i_size - key.offset; crossed_src_i_size = true; } clone_data_offset = btrfs_file_extent_offset(leaf, ei); if (btrfs_file_extent_disk_bytenr(leaf, ei) == disk_byte) { clone_root->offset = key.offset; if (clone_data_offset < data_offset && clone_data_offset + ext_len > data_offset) { u64 extent_offset; extent_offset = data_offset - clone_data_offset; ext_len -= extent_offset; clone_data_offset += extent_offset; clone_root->offset += extent_offset; } } clone_len = min_t(u64, ext_len, len); if (btrfs_file_extent_disk_bytenr(leaf, ei) == disk_byte && clone_data_offset == data_offset) { const u64 src_end = clone_root->offset + clone_len; const u64 sectorsize = SZ_64K; /* * We can't clone the last block, when its size is not * sector size aligned, into the middle of a file. If we * do so, the receiver will get a failure (-EINVAL) when * trying to clone or will silently corrupt the data in * the destination file if it's on a kernel without the * fix introduced by commit ac765f83f1397646 * ("Btrfs: fix data corruption due to cloning of eof * block). * * So issue a clone of the aligned down range plus a * regular write for the eof block, if we hit that case. * * Also, we use the maximum possible sector size, 64K, * because we don't know what's the sector size of the * filesystem that receives the stream, so we have to * assume the largest possible sector size. */ if (src_end == clone_src_i_size && !IS_ALIGNED(src_end, sectorsize) && offset + clone_len < sctx->cur_inode_size) { u64 slen; slen = ALIGN_DOWN(src_end - clone_root->offset, sectorsize); if (slen > 0) { ret = send_clone(sctx, offset, slen, clone_root); if (ret < 0) goto out; } ret = send_extent_data(sctx, dst_path, offset + slen, clone_len - slen); } else { ret = send_clone(sctx, offset, clone_len, clone_root); } } else if (crossed_src_i_size && clone_len < len) { /* * If we are at i_size of the clone source inode and we * can not clone from it, terminate the loop. This is * to avoid sending two write operations, one with a * length matching clone_len and the final one after * this loop with a length of len - clone_len. * * When using encoded writes (BTRFS_SEND_FLAG_COMPRESSED * was passed to the send ioctl), this helps avoid * sending an encoded write for an offset that is not * sector size aligned, in case the i_size of the source * inode is not sector size aligned. That will make the * receiver fallback to decompression of the data and * writing it using regular buffered IO, therefore while * not incorrect, it's not optimal due decompression and * possible re-compression at the receiver. */ break; } else { ret = send_extent_data(sctx, dst_path, offset, clone_len); } if (ret < 0) goto out; len -= clone_len; if (len == 0) break; offset += clone_len; clone_root->offset += clone_len; /* * If we are cloning from the file we are currently processing, * and using the send root as the clone root, we must stop once * the current clone offset reaches the current eof of the file * at the receiver, otherwise we would issue an invalid clone * operation (source range going beyond eof) and cause the * receiver to fail. So if we reach the current eof, bail out * and fallback to a regular write. */ if (clone_root->root == sctx->send_root && clone_root->ino == sctx->cur_ino && clone_root->offset >= sctx->cur_inode_next_write_offset) break; data_offset += clone_len; next: path->slots[0]++; } if (len > 0) ret = send_extent_data(sctx, dst_path, offset, len); else ret = 0; out: btrfs_free_path(path); return ret; } static int send_write_or_clone(struct send_ctx *sctx, struct btrfs_path *path, struct btrfs_key *key, struct clone_root *clone_root) { int ret = 0; u64 offset = key->offset; u64 end; u64 bs = sctx->send_root->fs_info->sb->s_blocksize; end = min_t(u64, btrfs_file_extent_end(path), sctx->cur_inode_size); if (offset >= end) return 0; if (clone_root && IS_ALIGNED(end, bs)) { struct btrfs_file_extent_item *ei; u64 disk_byte; u64 data_offset; ei = btrfs_item_ptr(path->nodes[0], path->slots[0], struct btrfs_file_extent_item); disk_byte = btrfs_file_extent_disk_bytenr(path->nodes[0], ei); data_offset = btrfs_file_extent_offset(path->nodes[0], ei); ret = clone_range(sctx, path, clone_root, disk_byte, data_offset, offset, end - offset); } else { ret = send_extent_data(sctx, path, offset, end - offset); } sctx->cur_inode_next_write_offset = end; return ret; } static int is_extent_unchanged(struct send_ctx *sctx, struct btrfs_path *left_path, struct btrfs_key *ekey) { int ret = 0; struct btrfs_key key; struct btrfs_path *path = NULL; struct extent_buffer *eb; int slot; struct btrfs_key found_key; struct btrfs_file_extent_item *ei; u64 left_disknr; u64 right_disknr; u64 left_offset; u64 right_offset; u64 left_offset_fixed; u64 left_len; u64 right_len; u64 left_gen; u64 right_gen; u8 left_type; u8 right_type; path = alloc_path_for_send(); if (!path) return -ENOMEM; eb = left_path->nodes[0]; slot = left_path->slots[0]; ei = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item); left_type = btrfs_file_extent_type(eb, ei); if (left_type != BTRFS_FILE_EXTENT_REG) { ret = 0; goto out; } left_disknr = btrfs_file_extent_disk_bytenr(eb, ei); left_len = btrfs_file_extent_num_bytes(eb, ei); left_offset = btrfs_file_extent_offset(eb, ei); left_gen = btrfs_file_extent_generation(eb, ei); /* * Following comments will refer to these graphics. L is the left * extents which we are checking at the moment. 1-8 are the right * extents that we iterate. * * |-----L-----| * |-1-|-2a-|-3-|-4-|-5-|-6-| * * |-----L-----| * |--1--|-2b-|...(same as above) * * Alternative situation. Happens on files where extents got split. * |-----L-----| * |-----------7-----------|-6-| * * Alternative situation. Happens on files which got larger. * |-----L-----| * |-8-| * Nothing follows after 8. */ key.objectid = ekey->objectid; key.type = BTRFS_EXTENT_DATA_KEY; key.offset = ekey->offset; ret = btrfs_search_slot_for_read(sctx->parent_root, &key, path, 0, 0); if (ret < 0) goto out; if (ret) { ret = 0; goto out; } /* * Handle special case where the right side has no extents at all. */ eb = path->nodes[0]; slot = path->slots[0]; btrfs_item_key_to_cpu(eb, &found_key, slot); if (found_key.objectid != key.objectid || found_key.type != key.type) { /* If we're a hole then just pretend nothing changed */ ret = (left_disknr) ? 0 : 1; goto out; } /* * We're now on 2a, 2b or 7. */ key = found_key; while (key.offset < ekey->offset + left_len) { ei = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item); right_type = btrfs_file_extent_type(eb, ei); if (right_type != BTRFS_FILE_EXTENT_REG && right_type != BTRFS_FILE_EXTENT_INLINE) { ret = 0; goto out; } if (right_type == BTRFS_FILE_EXTENT_INLINE) { right_len = btrfs_file_extent_ram_bytes(eb, ei); right_len = PAGE_ALIGN(right_len); } else { right_len = btrfs_file_extent_num_bytes(eb, ei); } /* * Are we at extent 8? If yes, we know the extent is changed. * This may only happen on the first iteration. */ if (found_key.offset + right_len <= ekey->offset) { /* If we're a hole just pretend nothing changed */ ret = (left_disknr) ? 0 : 1; goto out; } /* * We just wanted to see if when we have an inline extent, what * follows it is a regular extent (wanted to check the above * condition for inline extents too). This should normally not * happen but it's possible for example when we have an inline * compressed extent representing data with a size matching * the page size (currently the same as sector size). */ if (right_type == BTRFS_FILE_EXTENT_INLINE) { ret = 0; goto out; } right_disknr = btrfs_file_extent_disk_bytenr(eb, ei); right_offset = btrfs_file_extent_offset(eb, ei); right_gen = btrfs_file_extent_generation(eb, ei); left_offset_fixed = left_offset; if (key.offset < ekey->offset) { /* Fix the right offset for 2a and 7. */ right_offset += ekey->offset - key.offset; } else { /* Fix the left offset for all behind 2a and 2b */ left_offset_fixed += key.offset - ekey->offset; } /* * Check if we have the same extent. */ if (left_disknr != right_disknr || left_offset_fixed != right_offset || left_gen != right_gen) { ret = 0; goto out; } /* * Go to the next extent. */ ret = btrfs_next_item(sctx->parent_root, path); if (ret < 0) goto out; if (!ret) { eb = path->nodes[0]; slot = path->slots[0]; btrfs_item_key_to_cpu(eb, &found_key, slot); } if (ret || found_key.objectid != key.objectid || found_key.type != key.type) { key.offset += right_len; break; } if (found_key.offset != key.offset + right_len) { ret = 0; goto out; } key = found_key; } /* * We're now behind the left extent (treat as unchanged) or at the end * of the right side (treat as changed). */ if (key.offset >= ekey->offset + left_len) ret = 1; else ret = 0; out: btrfs_free_path(path); return ret; } static int get_last_extent(struct send_ctx *sctx, u64 offset) { struct btrfs_path *path; struct btrfs_root *root = sctx->send_root; struct btrfs_key key; int ret; path = alloc_path_for_send(); if (!path) return -ENOMEM; sctx->cur_inode_last_extent = 0; key.objectid = sctx->cur_ino; key.type = BTRFS_EXTENT_DATA_KEY; key.offset = offset; ret = btrfs_search_slot_for_read(root, &key, path, 0, 1); if (ret < 0) goto out; ret = 0; btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); if (key.objectid != sctx->cur_ino || key.type != BTRFS_EXTENT_DATA_KEY) goto out; sctx->cur_inode_last_extent = btrfs_file_extent_end(path); out: btrfs_free_path(path); return ret; } static int range_is_hole_in_parent(struct send_ctx *sctx, const u64 start, const u64 end) { struct btrfs_path *path; struct btrfs_key key; struct btrfs_root *root = sctx->parent_root; u64 search_start = start; int ret; path = alloc_path_for_send(); if (!path) return -ENOMEM; key.objectid = sctx->cur_ino; key.type = BTRFS_EXTENT_DATA_KEY; key.offset = search_start; ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); if (ret < 0) goto out; if (ret > 0 && path->slots[0] > 0) path->slots[0]--; while (search_start < end) { struct extent_buffer *leaf = path->nodes[0]; int slot = path->slots[0]; struct btrfs_file_extent_item *fi; u64 extent_end; if (slot >= btrfs_header_nritems(leaf)) { ret = btrfs_next_leaf(root, path); if (ret < 0) goto out; else if (ret > 0) break; continue; } btrfs_item_key_to_cpu(leaf, &key, slot); if (key.objectid < sctx->cur_ino || key.type < BTRFS_EXTENT_DATA_KEY) goto next; if (key.objectid > sctx->cur_ino || key.type > BTRFS_EXTENT_DATA_KEY || key.offset >= end) break; fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item); extent_end = btrfs_file_extent_end(path); if (extent_end <= start) goto next; if (btrfs_file_extent_disk_bytenr(leaf, fi) == 0) { search_start = extent_end; goto next; } ret = 0; goto out; next: path->slots[0]++; } ret = 1; out: btrfs_free_path(path); return ret; } static int maybe_send_hole(struct send_ctx *sctx, struct btrfs_path *path, struct btrfs_key *key) { int ret = 0; if (sctx->cur_ino != key->objectid || !need_send_hole(sctx)) return 0; if (sctx->cur_inode_last_extent == (u64)-1) { ret = get_last_extent(sctx, key->offset - 1); if (ret) return ret; } if (path->slots[0] == 0 && sctx->cur_inode_last_extent < key->offset) { /* * We might have skipped entire leafs that contained only * file extent items for our current inode. These leafs have * a generation number smaller (older) than the one in the * current leaf and the leaf our last extent came from, and * are located between these 2 leafs. */ ret = get_last_extent(sctx, key->offset - 1); if (ret) return ret; } if (sctx->cur_inode_last_extent < key->offset) { ret = range_is_hole_in_parent(sctx, sctx->cur_inode_last_extent, key->offset); if (ret < 0) return ret; else if (ret == 0) ret = send_hole(sctx, key->offset); else ret = 0; } sctx->cur_inode_last_extent = btrfs_file_extent_end(path); return ret; } static int process_extent(struct send_ctx *sctx, struct btrfs_path *path, struct btrfs_key *key) { struct clone_root *found_clone = NULL; int ret = 0; if (S_ISLNK(sctx->cur_inode_mode)) return 0; if (sctx->parent_root && !sctx->cur_inode_new) { ret = is_extent_unchanged(sctx, path, key); if (ret < 0) goto out; if (ret) { ret = 0; goto out_hole; } } else { struct btrfs_file_extent_item *ei; u8 type; ei = btrfs_item_ptr(path->nodes[0], path->slots[0], struct btrfs_file_extent_item); type = btrfs_file_extent_type(path->nodes[0], ei); if (type == BTRFS_FILE_EXTENT_PREALLOC || type == BTRFS_FILE_EXTENT_REG) { /* * The send spec does not have a prealloc command yet, * so just leave a hole for prealloc'ed extents until * we have enough commands queued up to justify rev'ing * the send spec. */ if (type == BTRFS_FILE_EXTENT_PREALLOC) { ret = 0; goto out; } /* Have a hole, just skip it. */ if (btrfs_file_extent_disk_bytenr(path->nodes[0], ei) == 0) { ret = 0; goto out; } } } ret = find_extent_clone(sctx, path, key->objectid, key->offset, sctx->cur_inode_size, &found_clone); if (ret != -ENOENT && ret < 0) goto out; ret = send_write_or_clone(sctx, path, key, found_clone); if (ret) goto out; out_hole: ret = maybe_send_hole(sctx, path, key); out: return ret; } static int process_all_extents(struct send_ctx *sctx) { int ret = 0; int iter_ret = 0; struct btrfs_root *root; struct btrfs_path *path; struct btrfs_key key; struct btrfs_key found_key; root = sctx->send_root; path = alloc_path_for_send(); if (!path) return -ENOMEM; key.objectid = sctx->cmp_key->objectid; key.type = BTRFS_EXTENT_DATA_KEY; key.offset = 0; btrfs_for_each_slot(root, &key, &found_key, path, iter_ret) { if (found_key.objectid != key.objectid || found_key.type != key.type) { ret = 0; break; } ret = process_extent(sctx, path, &found_key); if (ret < 0) break; } /* Catch error found during iteration */ if (iter_ret < 0) ret = iter_ret; btrfs_free_path(path); return ret; } static int process_recorded_refs_if_needed(struct send_ctx *sctx, int at_end, int *pending_move, int *refs_processed) { int ret = 0; if (sctx->cur_ino == 0) goto out; if (!at_end && sctx->cur_ino == sctx->cmp_key->objectid && sctx->cmp_key->type <= BTRFS_INODE_EXTREF_KEY) goto out; if (list_empty(&sctx->new_refs) && list_empty(&sctx->deleted_refs)) goto out; ret = process_recorded_refs(sctx, pending_move); if (ret < 0) goto out; *refs_processed = 1; out: return ret; } static int finish_inode_if_needed(struct send_ctx *sctx, int at_end) { int ret = 0; struct btrfs_inode_info info; u64 left_mode; u64 left_uid; u64 left_gid; u64 left_fileattr; u64 right_mode; u64 right_uid; u64 right_gid; u64 right_fileattr; int need_chmod = 0; int need_chown = 0; bool need_fileattr = false; int need_truncate = 1; int pending_move = 0; int refs_processed = 0; if (sctx->ignore_cur_inode) return 0; ret = process_recorded_refs_if_needed(sctx, at_end, &pending_move, &refs_processed); if (ret < 0) goto out; /* * We have processed the refs and thus need to advance send_progress. * Now, calls to get_cur_xxx will take the updated refs of the current * inode into account. * * On the other hand, if our current inode is a directory and couldn't * be moved/renamed because its parent was renamed/moved too and it has * a higher inode number, we can only move/rename our current inode * after we moved/renamed its parent. Therefore in this case operate on * the old path (pre move/rename) of our current inode, and the * move/rename will be performed later. */ if (refs_processed && !pending_move) sctx->send_progress = sctx->cur_ino + 1; if (sctx->cur_ino == 0 || sctx->cur_inode_deleted) goto out; if (!at_end && sctx->cmp_key->objectid == sctx->cur_ino) goto out; ret = get_inode_info(sctx->send_root, sctx->cur_ino, &info); if (ret < 0) goto out; left_mode = info.mode; left_uid = info.uid; left_gid = info.gid; left_fileattr = info.fileattr; if (!sctx->parent_root || sctx->cur_inode_new) { need_chown = 1; if (!S_ISLNK(sctx->cur_inode_mode)) need_chmod = 1; if (sctx->cur_inode_next_write_offset == sctx->cur_inode_size) need_truncate = 0; } else { u64 old_size; ret = get_inode_info(sctx->parent_root, sctx->cur_ino, &info); if (ret < 0) goto out; old_size = info.size; right_mode = info.mode; right_uid = info.uid; right_gid = info.gid; right_fileattr = info.fileattr; if (left_uid != right_uid || left_gid != right_gid) need_chown = 1; if (!S_ISLNK(sctx->cur_inode_mode) && left_mode != right_mode) need_chmod = 1; if (!S_ISLNK(sctx->cur_inode_mode) && left_fileattr != right_fileattr) need_fileattr = true; if ((old_size == sctx->cur_inode_size) || (sctx->cur_inode_size > old_size && sctx->cur_inode_next_write_offset == sctx->cur_inode_size)) need_truncate = 0; } if (S_ISREG(sctx->cur_inode_mode)) { if (need_send_hole(sctx)) { if (sctx->cur_inode_last_extent == (u64)-1 || sctx->cur_inode_last_extent < sctx->cur_inode_size) { ret = get_last_extent(sctx, (u64)-1); if (ret) goto out; } if (sctx->cur_inode_last_extent < sctx->cur_inode_size) { ret = send_hole(sctx, sctx->cur_inode_size); if (ret) goto out; } } if (need_truncate) { ret = send_truncate(sctx, sctx->cur_ino, sctx->cur_inode_gen, sctx->cur_inode_size); if (ret < 0) goto out; } } if (need_chown) { ret = send_chown(sctx, sctx->cur_ino, sctx->cur_inode_gen, left_uid, left_gid); if (ret < 0) goto out; } if (need_chmod) { ret = send_chmod(sctx, sctx->cur_ino, sctx->cur_inode_gen, left_mode); if (ret < 0) goto out; } if (need_fileattr) { ret = send_fileattr(sctx, sctx->cur_ino, sctx->cur_inode_gen, left_fileattr); if (ret < 0) goto out; } if (proto_cmd_ok(sctx, BTRFS_SEND_C_ENABLE_VERITY) && sctx->cur_inode_needs_verity) { ret = process_verity(sctx); if (ret < 0) goto out; } ret = send_capabilities(sctx); if (ret < 0) goto out; /* * If other directory inodes depended on our current directory * inode's move/rename, now do their move/rename operations. */ if (!is_waiting_for_move(sctx, sctx->cur_ino)) { ret = apply_children_dir_moves(sctx); if (ret) goto out; /* * Need to send that every time, no matter if it actually * changed between the two trees as we have done changes to * the inode before. If our inode is a directory and it's * waiting to be moved/renamed, we will send its utimes when * it's moved/renamed, therefore we don't need to do it here. */ sctx->send_progress = sctx->cur_ino + 1; /* * If the current inode is a non-empty directory, delay issuing * the utimes command for it, as it's very likely we have inodes * with an higher number inside it. We want to issue the utimes * command only after adding all dentries to it. */ if (S_ISDIR(sctx->cur_inode_mode) && sctx->cur_inode_size > 0) ret = cache_dir_utimes(sctx, sctx->cur_ino, sctx->cur_inode_gen); else ret = send_utimes(sctx, sctx->cur_ino, sctx->cur_inode_gen); if (ret < 0) goto out; } out: if (!ret) ret = trim_dir_utimes_cache(sctx); return ret; } static void close_current_inode(struct send_ctx *sctx) { u64 i_size; if (sctx->cur_inode == NULL) return; i_size = i_size_read(sctx->cur_inode); /* * If we are doing an incremental send, we may have extents between the * last processed extent and the i_size that have not been processed * because they haven't changed but we may have read some of their pages * through readahead, see the comments at send_extent_data(). */ if (sctx->clean_page_cache && sctx->page_cache_clear_start < i_size) truncate_inode_pages_range(&sctx->cur_inode->i_data, sctx->page_cache_clear_start, round_up(i_size, PAGE_SIZE) - 1); iput(sctx->cur_inode); sctx->cur_inode = NULL; } static int changed_inode(struct send_ctx *sctx, enum btrfs_compare_tree_result result) { int ret = 0; struct btrfs_key *key = sctx->cmp_key; struct btrfs_inode_item *left_ii = NULL; struct btrfs_inode_item *right_ii = NULL; u64 left_gen = 0; u64 right_gen = 0; close_current_inode(sctx); sctx->cur_ino = key->objectid; sctx->cur_inode_new_gen = false; sctx->cur_inode_last_extent = (u64)-1; sctx->cur_inode_next_write_offset = 0; sctx->ignore_cur_inode = false; /* * Set send_progress to current inode. This will tell all get_cur_xxx * functions that the current inode's refs are not updated yet. Later, * when process_recorded_refs is finished, it is set to cur_ino + 1. */ sctx->send_progress = sctx->cur_ino; if (result == BTRFS_COMPARE_TREE_NEW || result == BTRFS_COMPARE_TREE_CHANGED) { left_ii = btrfs_item_ptr(sctx->left_path->nodes[0], sctx->left_path->slots[0], struct btrfs_inode_item); left_gen = btrfs_inode_generation(sctx->left_path->nodes[0], left_ii); } else { right_ii = btrfs_item_ptr(sctx->right_path->nodes[0], sctx->right_path->slots[0], struct btrfs_inode_item); right_gen = btrfs_inode_generation(sctx->right_path->nodes[0], right_ii); } if (result == BTRFS_COMPARE_TREE_CHANGED) { right_ii = btrfs_item_ptr(sctx->right_path->nodes[0], sctx->right_path->slots[0], struct btrfs_inode_item); right_gen = btrfs_inode_generation(sctx->right_path->nodes[0], right_ii); /* * The cur_ino = root dir case is special here. We can't treat * the inode as deleted+reused because it would generate a * stream that tries to delete/mkdir the root dir. */ if (left_gen != right_gen && sctx->cur_ino != BTRFS_FIRST_FREE_OBJECTID) sctx->cur_inode_new_gen = true; } /* * Normally we do not find inodes with a link count of zero (orphans) * because the most common case is to create a snapshot and use it * for a send operation. However other less common use cases involve * using a subvolume and send it after turning it to RO mode just * after deleting all hard links of a file while holding an open * file descriptor against it or turning a RO snapshot into RW mode, * keep an open file descriptor against a file, delete it and then * turn the snapshot back to RO mode before using it for a send * operation. The former is what the receiver operation does. * Therefore, if we want to send these snapshots soon after they're * received, we need to handle orphan inodes as well. Moreover, orphans * can appear not only in the send snapshot but also in the parent * snapshot. Here are several cases: * * Case 1: BTRFS_COMPARE_TREE_NEW * | send snapshot | action * -------------------------------- * nlink | 0 | ignore * * Case 2: BTRFS_COMPARE_TREE_DELETED * | parent snapshot | action * ---------------------------------- * nlink | 0 | as usual * Note: No unlinks will be sent because there're no paths for it. * * Case 3: BTRFS_COMPARE_TREE_CHANGED * | | parent snapshot | send snapshot | action * ----------------------------------------------------------------------- * subcase 1 | nlink | 0 | 0 | ignore * subcase 2 | nlink | >0 | 0 | new_gen(deletion) * subcase 3 | nlink | 0 | >0 | new_gen(creation) * */ if (result == BTRFS_COMPARE_TREE_NEW) { if (btrfs_inode_nlink(sctx->left_path->nodes[0], left_ii) == 0) { sctx->ignore_cur_inode = true; goto out; } sctx->cur_inode_gen = left_gen; sctx->cur_inode_new = true; sctx->cur_inode_deleted = false; sctx->cur_inode_size = btrfs_inode_size( sctx->left_path->nodes[0], left_ii); sctx->cur_inode_mode = btrfs_inode_mode( sctx->left_path->nodes[0], left_ii); sctx->cur_inode_rdev = btrfs_inode_rdev( sctx->left_path->nodes[0], left_ii); if (sctx->cur_ino != BTRFS_FIRST_FREE_OBJECTID) ret = send_create_inode_if_needed(sctx); } else if (result == BTRFS_COMPARE_TREE_DELETED) { sctx->cur_inode_gen = right_gen; sctx->cur_inode_new = false; sctx->cur_inode_deleted = true; sctx->cur_inode_size = btrfs_inode_size( sctx->right_path->nodes[0], right_ii); sctx->cur_inode_mode = btrfs_inode_mode( sctx->right_path->nodes[0], right_ii); } else if (result == BTRFS_COMPARE_TREE_CHANGED) { u32 new_nlinks, old_nlinks; new_nlinks = btrfs_inode_nlink(sctx->left_path->nodes[0], left_ii); old_nlinks = btrfs_inode_nlink(sctx->right_path->nodes[0], right_ii); if (new_nlinks == 0 && old_nlinks == 0) { sctx->ignore_cur_inode = true; goto out; } else if (new_nlinks == 0 || old_nlinks == 0) { sctx->cur_inode_new_gen = 1; } /* * We need to do some special handling in case the inode was * reported as changed with a changed generation number. This * means that the original inode was deleted and new inode * reused the same inum. So we have to treat the old inode as * deleted and the new one as new. */ if (sctx->cur_inode_new_gen) { /* * First, process the inode as if it was deleted. */ if (old_nlinks > 0) { sctx->cur_inode_gen = right_gen; sctx->cur_inode_new = false; sctx->cur_inode_deleted = true; sctx->cur_inode_size = btrfs_inode_size( sctx->right_path->nodes[0], right_ii); sctx->cur_inode_mode = btrfs_inode_mode( sctx->right_path->nodes[0], right_ii); ret = process_all_refs(sctx, BTRFS_COMPARE_TREE_DELETED); if (ret < 0) goto out; } /* * Now process the inode as if it was new. */ if (new_nlinks > 0) { sctx->cur_inode_gen = left_gen; sctx->cur_inode_new = true; sctx->cur_inode_deleted = false; sctx->cur_inode_size = btrfs_inode_size( sctx->left_path->nodes[0], left_ii); sctx->cur_inode_mode = btrfs_inode_mode( sctx->left_path->nodes[0], left_ii); sctx->cur_inode_rdev = btrfs_inode_rdev( sctx->left_path->nodes[0], left_ii); ret = send_create_inode_if_needed(sctx); if (ret < 0) goto out; ret = process_all_refs(sctx, BTRFS_COMPARE_TREE_NEW); if (ret < 0) goto out; /* * Advance send_progress now as we did not get * into process_recorded_refs_if_needed in the * new_gen case. */ sctx->send_progress = sctx->cur_ino + 1; /* * Now process all extents and xattrs of the * inode as if they were all new. */ ret = process_all_extents(sctx); if (ret < 0) goto out; ret = process_all_new_xattrs(sctx); if (ret < 0) goto out; } } else { sctx->cur_inode_gen = left_gen; sctx->cur_inode_new = false; sctx->cur_inode_new_gen = false; sctx->cur_inode_deleted = false; sctx->cur_inode_size = btrfs_inode_size( sctx->left_path->nodes[0], left_ii); sctx->cur_inode_mode = btrfs_inode_mode( sctx->left_path->nodes[0], left_ii); } } out: return ret; } /* * We have to process new refs before deleted refs, but compare_trees gives us * the new and deleted refs mixed. To fix this, we record the new/deleted refs * first and later process them in process_recorded_refs. * For the cur_inode_new_gen case, we skip recording completely because * changed_inode did already initiate processing of refs. The reason for this is * that in this case, compare_tree actually compares the refs of 2 different * inodes. To fix this, process_all_refs is used in changed_inode to handle all * refs of the right tree as deleted and all refs of the left tree as new. */ static int changed_ref(struct send_ctx *sctx, enum btrfs_compare_tree_result result) { int ret = 0; if (sctx->cur_ino != sctx->cmp_key->objectid) { inconsistent_snapshot_error(sctx, result, "reference"); return -EIO; } if (!sctx->cur_inode_new_gen && sctx->cur_ino != BTRFS_FIRST_FREE_OBJECTID) { if (result == BTRFS_COMPARE_TREE_NEW) ret = record_new_ref(sctx); else if (result == BTRFS_COMPARE_TREE_DELETED) ret = record_deleted_ref(sctx); else if (result == BTRFS_COMPARE_TREE_CHANGED) ret = record_changed_ref(sctx); } return ret; } /* * Process new/deleted/changed xattrs. We skip processing in the * cur_inode_new_gen case because changed_inode did already initiate processing * of xattrs. The reason is the same as in changed_ref */ static int changed_xattr(struct send_ctx *sctx, enum btrfs_compare_tree_result result) { int ret = 0; if (sctx->cur_ino != sctx->cmp_key->objectid) { inconsistent_snapshot_error(sctx, result, "xattr"); return -EIO; } if (!sctx->cur_inode_new_gen && !sctx->cur_inode_deleted) { if (result == BTRFS_COMPARE_TREE_NEW) ret = process_new_xattr(sctx); else if (result == BTRFS_COMPARE_TREE_DELETED) ret = process_deleted_xattr(sctx); else if (result == BTRFS_COMPARE_TREE_CHANGED) ret = process_changed_xattr(sctx); } return ret; } /* * Process new/deleted/changed extents. We skip processing in the * cur_inode_new_gen case because changed_inode did already initiate processing * of extents. The reason is the same as in changed_ref */ static int changed_extent(struct send_ctx *sctx, enum btrfs_compare_tree_result result) { int ret = 0; /* * We have found an extent item that changed without the inode item * having changed. This can happen either after relocation (where the * disk_bytenr of an extent item is replaced at * relocation.c:replace_file_extents()) or after deduplication into a * file in both the parent and send snapshots (where an extent item can * get modified or replaced with a new one). Note that deduplication * updates the inode item, but it only changes the iversion (sequence * field in the inode item) of the inode, so if a file is deduplicated * the same amount of times in both the parent and send snapshots, its * iversion becomes the same in both snapshots, whence the inode item is * the same on both snapshots. */ if (sctx->cur_ino != sctx->cmp_key->objectid) return 0; if (!sctx->cur_inode_new_gen && !sctx->cur_inode_deleted) { if (result != BTRFS_COMPARE_TREE_DELETED) ret = process_extent(sctx, sctx->left_path, sctx->cmp_key); } return ret; } static int changed_verity(struct send_ctx *sctx, enum btrfs_compare_tree_result result) { int ret = 0; if (!sctx->cur_inode_new_gen && !sctx->cur_inode_deleted) { if (result == BTRFS_COMPARE_TREE_NEW) sctx->cur_inode_needs_verity = true; } return ret; } static int dir_changed(struct send_ctx *sctx, u64 dir) { u64 orig_gen, new_gen; int ret; ret = get_inode_gen(sctx->send_root, dir, &new_gen); if (ret) return ret; ret = get_inode_gen(sctx->parent_root, dir, &orig_gen); if (ret) return ret; return (orig_gen != new_gen) ? 1 : 0; } static int compare_refs(struct send_ctx *sctx, struct btrfs_path *path, struct btrfs_key *key) { struct btrfs_inode_extref *extref; struct extent_buffer *leaf; u64 dirid = 0, last_dirid = 0; unsigned long ptr; u32 item_size; u32 cur_offset = 0; int ref_name_len; int ret = 0; /* Easy case, just check this one dirid */ if (key->type == BTRFS_INODE_REF_KEY) { dirid = key->offset; ret = dir_changed(sctx, dirid); goto out; } leaf = path->nodes[0]; item_size = btrfs_item_size(leaf, path->slots[0]); ptr = btrfs_item_ptr_offset(leaf, path->slots[0]); while (cur_offset < item_size) { extref = (struct btrfs_inode_extref *)(ptr + cur_offset); dirid = btrfs_inode_extref_parent(leaf, extref); ref_name_len = btrfs_inode_extref_name_len(leaf, extref); cur_offset += ref_name_len + sizeof(*extref); if (dirid == last_dirid) continue; ret = dir_changed(sctx, dirid); if (ret) break; last_dirid = dirid; } out: return ret; } /* * Updates compare related fields in sctx and simply forwards to the actual * changed_xxx functions. */ static int changed_cb(struct btrfs_path *left_path, struct btrfs_path *right_path, struct btrfs_key *key, enum btrfs_compare_tree_result result, struct send_ctx *sctx) { int ret = 0; /* * We can not hold the commit root semaphore here. This is because in * the case of sending and receiving to the same filesystem, using a * pipe, could result in a deadlock: * * 1) The task running send blocks on the pipe because it's full; * * 2) The task running receive, which is the only consumer of the pipe, * is waiting for a transaction commit (for example due to a space * reservation when doing a write or triggering a transaction commit * when creating a subvolume); * * 3) The transaction is waiting to write lock the commit root semaphore, * but can not acquire it since it's being held at 1). * * Down this call chain we write to the pipe through kernel_write(). * The same type of problem can also happen when sending to a file that * is stored in the same filesystem - when reserving space for a write * into the file, we can trigger a transaction commit. * * Our caller has supplied us with clones of leaves from the send and * parent roots, so we're safe here from a concurrent relocation and * further reallocation of metadata extents while we are here. Below we * also assert that the leaves are clones. */ lockdep_assert_not_held(&sctx->send_root->fs_info->commit_root_sem); /* * We always have a send root, so left_path is never NULL. We will not * have a leaf when we have reached the end of the send root but have * not yet reached the end of the parent root. */ if (left_path->nodes[0]) ASSERT(test_bit(EXTENT_BUFFER_UNMAPPED, &left_path->nodes[0]->bflags)); /* * When doing a full send we don't have a parent root, so right_path is * NULL. When doing an incremental send, we may have reached the end of * the parent root already, so we don't have a leaf at right_path. */ if (right_path && right_path->nodes[0]) ASSERT(test_bit(EXTENT_BUFFER_UNMAPPED, &right_path->nodes[0]->bflags)); if (result == BTRFS_COMPARE_TREE_SAME) { if (key->type == BTRFS_INODE_REF_KEY || key->type == BTRFS_INODE_EXTREF_KEY) { ret = compare_refs(sctx, left_path, key); if (!ret) return 0; if (ret < 0) return ret; } else if (key->type == BTRFS_EXTENT_DATA_KEY) { return maybe_send_hole(sctx, left_path, key); } else { return 0; } result = BTRFS_COMPARE_TREE_CHANGED; ret = 0; } sctx->left_path = left_path; sctx->right_path = right_path; sctx->cmp_key = key; ret = finish_inode_if_needed(sctx, 0); if (ret < 0) goto out; /* Ignore non-FS objects */ if (key->objectid == BTRFS_FREE_INO_OBJECTID || key->objectid == BTRFS_FREE_SPACE_OBJECTID) goto out; if (key->type == BTRFS_INODE_ITEM_KEY) { ret = changed_inode(sctx, result); } else if (!sctx->ignore_cur_inode) { if (key->type == BTRFS_INODE_REF_KEY || key->type == BTRFS_INODE_EXTREF_KEY) ret = changed_ref(sctx, result); else if (key->type == BTRFS_XATTR_ITEM_KEY) ret = changed_xattr(sctx, result); else if (key->type == BTRFS_EXTENT_DATA_KEY) ret = changed_extent(sctx, result); else if (key->type == BTRFS_VERITY_DESC_ITEM_KEY && key->offset == 0) ret = changed_verity(sctx, result); } out: return ret; } static int search_key_again(const struct send_ctx *sctx, struct btrfs_root *root, struct btrfs_path *path, const struct btrfs_key *key) { int ret; if (!path->need_commit_sem) lockdep_assert_held_read(&root->fs_info->commit_root_sem); /* * Roots used for send operations are readonly and no one can add, * update or remove keys from them, so we should be able to find our * key again. The only exception is deduplication, which can operate on * readonly roots and add, update or remove keys to/from them - but at * the moment we don't allow it to run in parallel with send. */ ret = btrfs_search_slot(NULL, root, key, path, 0, 0); ASSERT(ret <= 0); if (ret > 0) { btrfs_print_tree(path->nodes[path->lowest_level], false); btrfs_err(root->fs_info, "send: key (%llu %u %llu) not found in %s root %llu, lowest_level %d, slot %d", key->objectid, key->type, key->offset, (root == sctx->parent_root ? "parent" : "send"), root->root_key.objectid, path->lowest_level, path->slots[path->lowest_level]); return -EUCLEAN; } return ret; } static int full_send_tree(struct send_ctx *sctx) { int ret; struct btrfs_root *send_root = sctx->send_root; struct btrfs_key key; struct btrfs_fs_info *fs_info = send_root->fs_info; struct btrfs_path *path; path = alloc_path_for_send(); if (!path) return -ENOMEM; path->reada = READA_FORWARD_ALWAYS; key.objectid = BTRFS_FIRST_FREE_OBJECTID; key.type = BTRFS_INODE_ITEM_KEY; key.offset = 0; down_read(&fs_info->commit_root_sem); sctx->last_reloc_trans = fs_info->last_reloc_trans; up_read(&fs_info->commit_root_sem); ret = btrfs_search_slot_for_read(send_root, &key, path, 1, 0); if (ret < 0) goto out; if (ret) goto out_finish; while (1) { btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); ret = changed_cb(path, NULL, &key, BTRFS_COMPARE_TREE_NEW, sctx); if (ret < 0) goto out; down_read(&fs_info->commit_root_sem); if (fs_info->last_reloc_trans > sctx->last_reloc_trans) { sctx->last_reloc_trans = fs_info->last_reloc_trans; up_read(&fs_info->commit_root_sem); /* * A transaction used for relocating a block group was * committed or is about to finish its commit. Release * our path (leaf) and restart the search, so that we * avoid operating on any file extent items that are * stale, with a disk_bytenr that reflects a pre * relocation value. This way we avoid as much as * possible to fallback to regular writes when checking * if we can clone file ranges. */ btrfs_release_path(path); ret = search_key_again(sctx, send_root, path, &key); if (ret < 0) goto out; } else { up_read(&fs_info->commit_root_sem); } ret = btrfs_next_item(send_root, path); if (ret < 0) goto out; if (ret) { ret = 0; break; } } out_finish: ret = finish_inode_if_needed(sctx, 1); out: btrfs_free_path(path); return ret; } static int replace_node_with_clone(struct btrfs_path *path, int level) { struct extent_buffer *clone; clone = btrfs_clone_extent_buffer(path->nodes[level]); if (!clone) return -ENOMEM; free_extent_buffer(path->nodes[level]); path->nodes[level] = clone; return 0; } static int tree_move_down(struct btrfs_path *path, int *level, u64 reada_min_gen) { struct extent_buffer *eb; struct extent_buffer *parent = path->nodes[*level]; int slot = path->slots[*level]; const int nritems = btrfs_header_nritems(parent); u64 reada_max; u64 reada_done = 0; lockdep_assert_held_read(&parent->fs_info->commit_root_sem); BUG_ON(*level == 0); eb = btrfs_read_node_slot(parent, slot); if (IS_ERR(eb)) return PTR_ERR(eb); /* * Trigger readahead for the next leaves we will process, so that it is * very likely that when we need them they are already in memory and we * will not block on disk IO. For nodes we only do readahead for one, * since the time window between processing nodes is typically larger. */ reada_max = (*level == 1 ? SZ_128K : eb->fs_info->nodesize); for (slot++; slot < nritems && reada_done < reada_max; slot++) { if (btrfs_node_ptr_generation(parent, slot) > reada_min_gen) { btrfs_readahead_node_child(parent, slot); reada_done += eb->fs_info->nodesize; } } path->nodes[*level - 1] = eb; path->slots[*level - 1] = 0; (*level)--; if (*level == 0) return replace_node_with_clone(path, 0); return 0; } static int tree_move_next_or_upnext(struct btrfs_path *path, int *level, int root_level) { int ret = 0; int nritems; nritems = btrfs_header_nritems(path->nodes[*level]); path->slots[*level]++; while (path->slots[*level] >= nritems) { if (*level == root_level) { path->slots[*level] = nritems - 1; return -1; } /* move upnext */ path->slots[*level] = 0; free_extent_buffer(path->nodes[*level]); path->nodes[*level] = NULL; (*level)++; path->slots[*level]++; nritems = btrfs_header_nritems(path->nodes[*level]); ret = 1; } return ret; } /* * Returns 1 if it had to move up and next. 0 is returned if it moved only next * or down. */ static int tree_advance(struct btrfs_path *path, int *level, int root_level, int allow_down, struct btrfs_key *key, u64 reada_min_gen) { int ret; if (*level == 0 || !allow_down) { ret = tree_move_next_or_upnext(path, level, root_level); } else { ret = tree_move_down(path, level, reada_min_gen); } /* * Even if we have reached the end of a tree, ret is -1, update the key * anyway, so that in case we need to restart due to a block group * relocation, we can assert that the last key of the root node still * exists in the tree. */ if (*level == 0) btrfs_item_key_to_cpu(path->nodes[*level], key, path->slots[*level]); else btrfs_node_key_to_cpu(path->nodes[*level], key, path->slots[*level]); return ret; } static int tree_compare_item(struct btrfs_path *left_path, struct btrfs_path *right_path, char *tmp_buf) { int cmp; int len1, len2; unsigned long off1, off2; len1 = btrfs_item_size(left_path->nodes[0], left_path->slots[0]); len2 = btrfs_item_size(right_path->nodes[0], right_path->slots[0]); if (len1 != len2) return 1; off1 = btrfs_item_ptr_offset(left_path->nodes[0], left_path->slots[0]); off2 = btrfs_item_ptr_offset(right_path->nodes[0], right_path->slots[0]); read_extent_buffer(left_path->nodes[0], tmp_buf, off1, len1); cmp = memcmp_extent_buffer(right_path->nodes[0], tmp_buf, off2, len1); if (cmp) return 1; return 0; } /* * A transaction used for relocating a block group was committed or is about to * finish its commit. Release our paths and restart the search, so that we are * not using stale extent buffers: * * 1) For levels > 0, we are only holding references of extent buffers, without * any locks on them, which does not prevent them from having been relocated * and reallocated after the last time we released the commit root semaphore. * The exception are the root nodes, for which we always have a clone, see * the comment at btrfs_compare_trees(); * * 2) For leaves, level 0, we are holding copies (clones) of extent buffers, so * we are safe from the concurrent relocation and reallocation. However they * can have file extent items with a pre relocation disk_bytenr value, so we * restart the start from the current commit roots and clone the new leaves so * that we get the post relocation disk_bytenr values. Not doing so, could * make us clone the wrong data in case there are new extents using the old * disk_bytenr that happen to be shared. */ static int restart_after_relocation(struct btrfs_path *left_path, struct btrfs_path *right_path, const struct btrfs_key *left_key, const struct btrfs_key *right_key, int left_level, int right_level, const struct send_ctx *sctx) { int root_level; int ret; lockdep_assert_held_read(&sctx->send_root->fs_info->commit_root_sem); btrfs_release_path(left_path); btrfs_release_path(right_path); /* * Since keys can not be added or removed to/from our roots because they * are readonly and we do not allow deduplication to run in parallel * (which can add, remove or change keys), the layout of the trees should * not change. */ left_path->lowest_level = left_level; ret = search_key_again(sctx, sctx->send_root, left_path, left_key); if (ret < 0) return ret; right_path->lowest_level = right_level; ret = search_key_again(sctx, sctx->parent_root, right_path, right_key); if (ret < 0) return ret; /* * If the lowest level nodes are leaves, clone them so that they can be * safely used by changed_cb() while not under the protection of the * commit root semaphore, even if relocation and reallocation happens in * parallel. */ if (left_level == 0) { ret = replace_node_with_clone(left_path, 0); if (ret < 0) return ret; } if (right_level == 0) { ret = replace_node_with_clone(right_path, 0); if (ret < 0) return ret; } /* * Now clone the root nodes (unless they happen to be the leaves we have * already cloned). This is to protect against concurrent snapshotting of * the send and parent roots (see the comment at btrfs_compare_trees()). */ root_level = btrfs_header_level(sctx->send_root->commit_root); if (root_level > 0) { ret = replace_node_with_clone(left_path, root_level); if (ret < 0) return ret; } root_level = btrfs_header_level(sctx->parent_root->commit_root); if (root_level > 0) { ret = replace_node_with_clone(right_path, root_level); if (ret < 0) return ret; } return 0; } /* * This function compares two trees and calls the provided callback for * every changed/new/deleted item it finds. * If shared tree blocks are encountered, whole subtrees are skipped, making * the compare pretty fast on snapshotted subvolumes. * * This currently works on commit roots only. As commit roots are read only, * we don't do any locking. The commit roots are protected with transactions. * Transactions are ended and rejoined when a commit is tried in between. * * This function checks for modifications done to the trees while comparing. * If it detects a change, it aborts immediately. */ static int btrfs_compare_trees(struct btrfs_root *left_root, struct btrfs_root *right_root, struct send_ctx *sctx) { struct btrfs_fs_info *fs_info = left_root->fs_info; int ret; int cmp; struct btrfs_path *left_path = NULL; struct btrfs_path *right_path = NULL; struct btrfs_key left_key; struct btrfs_key right_key; char *tmp_buf = NULL; int left_root_level; int right_root_level; int left_level; int right_level; int left_end_reached = 0; int right_end_reached = 0; int advance_left = 0; int advance_right = 0; u64 left_blockptr; u64 right_blockptr; u64 left_gen; u64 right_gen; u64 reada_min_gen; left_path = btrfs_alloc_path(); if (!left_path) { ret = -ENOMEM; goto out; } right_path = btrfs_alloc_path(); if (!right_path) { ret = -ENOMEM; goto out; } tmp_buf = kvmalloc(fs_info->nodesize, GFP_KERNEL); if (!tmp_buf) { ret = -ENOMEM; goto out; } left_path->search_commit_root = 1; left_path->skip_locking = 1; right_path->search_commit_root = 1; right_path->skip_locking = 1; /* * Strategy: Go to the first items of both trees. Then do * * If both trees are at level 0 * Compare keys of current items * If left < right treat left item as new, advance left tree * and repeat * If left > right treat right item as deleted, advance right tree * and repeat * If left == right do deep compare of items, treat as changed if * needed, advance both trees and repeat * If both trees are at the same level but not at level 0 * Compare keys of current nodes/leafs * If left < right advance left tree and repeat * If left > right advance right tree and repeat * If left == right compare blockptrs of the next nodes/leafs * If they match advance both trees but stay at the same level * and repeat * If they don't match advance both trees while allowing to go * deeper and repeat * If tree levels are different * Advance the tree that needs it and repeat * * Advancing a tree means: * If we are at level 0, try to go to the next slot. If that's not * possible, go one level up and repeat. Stop when we found a level * where we could go to the next slot. We may at this point be on a * node or a leaf. * * If we are not at level 0 and not on shared tree blocks, go one * level deeper. * * If we are not at level 0 and on shared tree blocks, go one slot to * the right if possible or go up and right. */ down_read(&fs_info->commit_root_sem); left_level = btrfs_header_level(left_root->commit_root); left_root_level = left_level; /* * We clone the root node of the send and parent roots to prevent races * with snapshot creation of these roots. Snapshot creation COWs the * root node of a tree, so after the transaction is committed the old * extent can be reallocated while this send operation is still ongoing. * So we clone them, under the commit root semaphore, to be race free. */ left_path->nodes[left_level] = btrfs_clone_extent_buffer(left_root->commit_root); if (!left_path->nodes[left_level]) { ret = -ENOMEM; goto out_unlock; } right_level = btrfs_header_level(right_root->commit_root); right_root_level = right_level; right_path->nodes[right_level] = btrfs_clone_extent_buffer(right_root->commit_root); if (!right_path->nodes[right_level]) { ret = -ENOMEM; goto out_unlock; } /* * Our right root is the parent root, while the left root is the "send" * root. We know that all new nodes/leaves in the left root must have * a generation greater than the right root's generation, so we trigger * readahead for those nodes and leaves of the left root, as we know we * will need to read them at some point. */ reada_min_gen = btrfs_header_generation(right_root->commit_root); if (left_level == 0) btrfs_item_key_to_cpu(left_path->nodes[left_level], &left_key, left_path->slots[left_level]); else btrfs_node_key_to_cpu(left_path->nodes[left_level], &left_key, left_path->slots[left_level]); if (right_level == 0) btrfs_item_key_to_cpu(right_path->nodes[right_level], &right_key, right_path->slots[right_level]); else btrfs_node_key_to_cpu(right_path->nodes[right_level], &right_key, right_path->slots[right_level]); sctx->last_reloc_trans = fs_info->last_reloc_trans; while (1) { if (need_resched() || rwsem_is_contended(&fs_info->commit_root_sem)) { up_read(&fs_info->commit_root_sem); cond_resched(); down_read(&fs_info->commit_root_sem); } if (fs_info->last_reloc_trans > sctx->last_reloc_trans) { ret = restart_after_relocation(left_path, right_path, &left_key, &right_key, left_level, right_level, sctx); if (ret < 0) goto out_unlock; sctx->last_reloc_trans = fs_info->last_reloc_trans; } if (advance_left && !left_end_reached) { ret = tree_advance(left_path, &left_level, left_root_level, advance_left != ADVANCE_ONLY_NEXT, &left_key, reada_min_gen); if (ret == -1) left_end_reached = ADVANCE; else if (ret < 0) goto out_unlock; advance_left = 0; } if (advance_right && !right_end_reached) { ret = tree_advance(right_path, &right_level, right_root_level, advance_right != ADVANCE_ONLY_NEXT, &right_key, reada_min_gen); if (ret == -1) right_end_reached = ADVANCE; else if (ret < 0) goto out_unlock; advance_right = 0; } if (left_end_reached && right_end_reached) { ret = 0; goto out_unlock; } else if (left_end_reached) { if (right_level == 0) { up_read(&fs_info->commit_root_sem); ret = changed_cb(left_path, right_path, &right_key, BTRFS_COMPARE_TREE_DELETED, sctx); if (ret < 0) goto out; down_read(&fs_info->commit_root_sem); } advance_right = ADVANCE; continue; } else if (right_end_reached) { if (left_level == 0) { up_read(&fs_info->commit_root_sem); ret = changed_cb(left_path, right_path, &left_key, BTRFS_COMPARE_TREE_NEW, sctx); if (ret < 0) goto out; down_read(&fs_info->commit_root_sem); } advance_left = ADVANCE; continue; } if (left_level == 0 && right_level == 0) { up_read(&fs_info->commit_root_sem); cmp = btrfs_comp_cpu_keys(&left_key, &right_key); if (cmp < 0) { ret = changed_cb(left_path, right_path, &left_key, BTRFS_COMPARE_TREE_NEW, sctx); advance_left = ADVANCE; } else if (cmp > 0) { ret = changed_cb(left_path, right_path, &right_key, BTRFS_COMPARE_TREE_DELETED, sctx); advance_right = ADVANCE; } else { enum btrfs_compare_tree_result result; WARN_ON(!extent_buffer_uptodate(left_path->nodes[0])); ret = tree_compare_item(left_path, right_path, tmp_buf); if (ret) result = BTRFS_COMPARE_TREE_CHANGED; else result = BTRFS_COMPARE_TREE_SAME; ret = changed_cb(left_path, right_path, &left_key, result, sctx); advance_left = ADVANCE; advance_right = ADVANCE; } if (ret < 0) goto out; down_read(&fs_info->commit_root_sem); } else if (left_level == right_level) { cmp = btrfs_comp_cpu_keys(&left_key, &right_key); if (cmp < 0) { advance_left = ADVANCE; } else if (cmp > 0) { advance_right = ADVANCE; } else { left_blockptr = btrfs_node_blockptr( left_path->nodes[left_level], left_path->slots[left_level]); right_blockptr = btrfs_node_blockptr( right_path->nodes[right_level], right_path->slots[right_level]); left_gen = btrfs_node_ptr_generation( left_path->nodes[left_level], left_path->slots[left_level]); right_gen = btrfs_node_ptr_generation( right_path->nodes[right_level], right_path->slots[right_level]); if (left_blockptr == right_blockptr && left_gen == right_gen) { /* * As we're on a shared block, don't * allow to go deeper. */ advance_left = ADVANCE_ONLY_NEXT; advance_right = ADVANCE_ONLY_NEXT; } else { advance_left = ADVANCE; advance_right = ADVANCE; } } } else if (left_level < right_level) { advance_right = ADVANCE; } else { advance_left = ADVANCE; } } out_unlock: up_read(&fs_info->commit_root_sem); out: btrfs_free_path(left_path); btrfs_free_path(right_path); kvfree(tmp_buf); return ret; } static int send_subvol(struct send_ctx *sctx) { int ret; if (!(sctx->flags & BTRFS_SEND_FLAG_OMIT_STREAM_HEADER)) { ret = send_header(sctx); if (ret < 0) goto out; } ret = send_subvol_begin(sctx); if (ret < 0) goto out; if (sctx->parent_root) { ret = btrfs_compare_trees(sctx->send_root, sctx->parent_root, sctx); if (ret < 0) goto out; ret = finish_inode_if_needed(sctx, 1); if (ret < 0) goto out; } else { ret = full_send_tree(sctx); if (ret < 0) goto out; } out: free_recorded_refs(sctx); return ret; } /* * If orphan cleanup did remove any orphans from a root, it means the tree * was modified and therefore the commit root is not the same as the current * root anymore. This is a problem, because send uses the commit root and * therefore can see inode items that don't exist in the current root anymore, * and for example make calls to btrfs_iget, which will do tree lookups based * on the current root and not on the commit root. Those lookups will fail, * returning a -ESTALE error, and making send fail with that error. So make * sure a send does not see any orphans we have just removed, and that it will * see the same inodes regardless of whether a transaction commit happened * before it started (meaning that the commit root will be the same as the * current root) or not. */ static int ensure_commit_roots_uptodate(struct send_ctx *sctx) { int i; struct btrfs_trans_handle *trans = NULL; again: if (sctx->parent_root && sctx->parent_root->node != sctx->parent_root->commit_root) goto commit_trans; for (i = 0; i < sctx->clone_roots_cnt; i++) if (sctx->clone_roots[i].root->node != sctx->clone_roots[i].root->commit_root) goto commit_trans; if (trans) return btrfs_end_transaction(trans); return 0; commit_trans: /* Use any root, all fs roots will get their commit roots updated. */ if (!trans) { trans = btrfs_join_transaction(sctx->send_root); if (IS_ERR(trans)) return PTR_ERR(trans); goto again; } return btrfs_commit_transaction(trans); } /* * Make sure any existing dellaloc is flushed for any root used by a send * operation so that we do not miss any data and we do not race with writeback * finishing and changing a tree while send is using the tree. This could * happen if a subvolume is in RW mode, has delalloc, is turned to RO mode and * a send operation then uses the subvolume. * After flushing delalloc ensure_commit_roots_uptodate() must be called. */ static int flush_delalloc_roots(struct send_ctx *sctx) { struct btrfs_root *root = sctx->parent_root; int ret; int i; if (root) { ret = btrfs_start_delalloc_snapshot(root, false); if (ret) return ret; btrfs_wait_ordered_extents(root, U64_MAX, 0, U64_MAX); } for (i = 0; i < sctx->clone_roots_cnt; i++) { root = sctx->clone_roots[i].root; ret = btrfs_start_delalloc_snapshot(root, false); if (ret) return ret; btrfs_wait_ordered_extents(root, U64_MAX, 0, U64_MAX); } return 0; } static void btrfs_root_dec_send_in_progress(struct btrfs_root* root) { spin_lock(&root->root_item_lock); root->send_in_progress--; /* * Not much left to do, we don't know why it's unbalanced and * can't blindly reset it to 0. */ if (root->send_in_progress < 0) btrfs_err(root->fs_info, "send_in_progress unbalanced %d root %llu", root->send_in_progress, root->root_key.objectid); spin_unlock(&root->root_item_lock); } static void dedupe_in_progress_warn(const struct btrfs_root *root) { btrfs_warn_rl(root->fs_info, "cannot use root %llu for send while deduplications on it are in progress (%d in progress)", root->root_key.objectid, root->dedupe_in_progress); } long btrfs_ioctl_send(struct inode *inode, struct btrfs_ioctl_send_args *arg) { int ret = 0; struct btrfs_root *send_root = BTRFS_I(inode)->root; struct btrfs_fs_info *fs_info = send_root->fs_info; struct btrfs_root *clone_root; struct send_ctx *sctx = NULL; u32 i; u64 *clone_sources_tmp = NULL; int clone_sources_to_rollback = 0; size_t alloc_size; int sort_clone_roots = 0; struct btrfs_lru_cache_entry *entry; struct btrfs_lru_cache_entry *tmp; if (!capable(CAP_SYS_ADMIN)) return -EPERM; /* * The subvolume must remain read-only during send, protect against * making it RW. This also protects against deletion. */ spin_lock(&send_root->root_item_lock); if (btrfs_root_readonly(send_root) && send_root->dedupe_in_progress) { dedupe_in_progress_warn(send_root); spin_unlock(&send_root->root_item_lock); return -EAGAIN; } send_root->send_in_progress++; spin_unlock(&send_root->root_item_lock); /* * Userspace tools do the checks and warn the user if it's * not RO. */ if (!btrfs_root_readonly(send_root)) { ret = -EPERM; goto out; } /* * Check that we don't overflow at later allocations, we request * clone_sources_count + 1 items, and compare to unsigned long inside * access_ok. Also set an upper limit for allocation size so this can't * easily exhaust memory. Max number of clone sources is about 200K. */ if (arg->clone_sources_count > SZ_8M / sizeof(struct clone_root)) { ret = -EINVAL; goto out; } if (arg->flags & ~BTRFS_SEND_FLAG_MASK) { ret = -EOPNOTSUPP; goto out; } sctx = kzalloc(sizeof(struct send_ctx), GFP_KERNEL); if (!sctx) { ret = -ENOMEM; goto out; } INIT_LIST_HEAD(&sctx->new_refs); INIT_LIST_HEAD(&sctx->deleted_refs); btrfs_lru_cache_init(&sctx->name_cache, SEND_MAX_NAME_CACHE_SIZE); btrfs_lru_cache_init(&sctx->backref_cache, SEND_MAX_BACKREF_CACHE_SIZE); btrfs_lru_cache_init(&sctx->dir_created_cache, SEND_MAX_DIR_CREATED_CACHE_SIZE); /* * This cache is periodically trimmed to a fixed size elsewhere, see * cache_dir_utimes() and trim_dir_utimes_cache(). */ btrfs_lru_cache_init(&sctx->dir_utimes_cache, 0); sctx->pending_dir_moves = RB_ROOT; sctx->waiting_dir_moves = RB_ROOT; sctx->orphan_dirs = RB_ROOT; sctx->rbtree_new_refs = RB_ROOT; sctx->rbtree_deleted_refs = RB_ROOT; sctx->flags = arg->flags; if (arg->flags & BTRFS_SEND_FLAG_VERSION) { if (arg->version > BTRFS_SEND_STREAM_VERSION) { ret = -EPROTO; goto out; } /* Zero means "use the highest version" */ sctx->proto = arg->version ?: BTRFS_SEND_STREAM_VERSION; } else { sctx->proto = 1; } if ((arg->flags & BTRFS_SEND_FLAG_COMPRESSED) && sctx->proto < 2) { ret = -EINVAL; goto out; } sctx->send_filp = fget(arg->send_fd); if (!sctx->send_filp || !(sctx->send_filp->f_mode & FMODE_WRITE)) { ret = -EBADF; goto out; } sctx->send_root = send_root; /* * Unlikely but possible, if the subvolume is marked for deletion but * is slow to remove the directory entry, send can still be started */ if (btrfs_root_dead(sctx->send_root)) { ret = -EPERM; goto out; } sctx->clone_roots_cnt = arg->clone_sources_count; if (sctx->proto >= 2) { u32 send_buf_num_pages; sctx->send_max_size = BTRFS_SEND_BUF_SIZE_V2; sctx->send_buf = vmalloc(sctx->send_max_size); if (!sctx->send_buf) { ret = -ENOMEM; goto out; } send_buf_num_pages = sctx->send_max_size >> PAGE_SHIFT; sctx->send_buf_pages = kcalloc(send_buf_num_pages, sizeof(*sctx->send_buf_pages), GFP_KERNEL); if (!sctx->send_buf_pages) { ret = -ENOMEM; goto out; } for (i = 0; i < send_buf_num_pages; i++) { sctx->send_buf_pages[i] = vmalloc_to_page(sctx->send_buf + (i << PAGE_SHIFT)); } } else { sctx->send_max_size = BTRFS_SEND_BUF_SIZE_V1; sctx->send_buf = kvmalloc(sctx->send_max_size, GFP_KERNEL); } if (!sctx->send_buf) { ret = -ENOMEM; goto out; } sctx->clone_roots = kvcalloc(arg->clone_sources_count + 1, sizeof(*sctx->clone_roots), GFP_KERNEL); if (!sctx->clone_roots) { ret = -ENOMEM; goto out; } alloc_size = array_size(sizeof(*arg->clone_sources), arg->clone_sources_count); if (arg->clone_sources_count) { clone_sources_tmp = kvmalloc(alloc_size, GFP_KERNEL); if (!clone_sources_tmp) { ret = -ENOMEM; goto out; } ret = copy_from_user(clone_sources_tmp, arg->clone_sources, alloc_size); if (ret) { ret = -EFAULT; goto out; } for (i = 0; i < arg->clone_sources_count; i++) { clone_root = btrfs_get_fs_root(fs_info, clone_sources_tmp[i], true); if (IS_ERR(clone_root)) { ret = PTR_ERR(clone_root); goto out; } spin_lock(&clone_root->root_item_lock); if (!btrfs_root_readonly(clone_root) || btrfs_root_dead(clone_root)) { spin_unlock(&clone_root->root_item_lock); btrfs_put_root(clone_root); ret = -EPERM; goto out; } if (clone_root->dedupe_in_progress) { dedupe_in_progress_warn(clone_root); spin_unlock(&clone_root->root_item_lock); btrfs_put_root(clone_root); ret = -EAGAIN; goto out; } clone_root->send_in_progress++; spin_unlock(&clone_root->root_item_lock); sctx->clone_roots[i].root = clone_root; clone_sources_to_rollback = i + 1; } kvfree(clone_sources_tmp); clone_sources_tmp = NULL; } if (arg->parent_root) { sctx->parent_root = btrfs_get_fs_root(fs_info, arg->parent_root, true); if (IS_ERR(sctx->parent_root)) { ret = PTR_ERR(sctx->parent_root); goto out; } spin_lock(&sctx->parent_root->root_item_lock); sctx->parent_root->send_in_progress++; if (!btrfs_root_readonly(sctx->parent_root) || btrfs_root_dead(sctx->parent_root)) { spin_unlock(&sctx->parent_root->root_item_lock); ret = -EPERM; goto out; } if (sctx->parent_root->dedupe_in_progress) { dedupe_in_progress_warn(sctx->parent_root); spin_unlock(&sctx->parent_root->root_item_lock); ret = -EAGAIN; goto out; } spin_unlock(&sctx->parent_root->root_item_lock); } /* * Clones from send_root are allowed, but only if the clone source * is behind the current send position. This is checked while searching * for possible clone sources. */ sctx->clone_roots[sctx->clone_roots_cnt++].root = btrfs_grab_root(sctx->send_root); /* We do a bsearch later */ sort(sctx->clone_roots, sctx->clone_roots_cnt, sizeof(*sctx->clone_roots), __clone_root_cmp_sort, NULL); sort_clone_roots = 1; ret = flush_delalloc_roots(sctx); if (ret) goto out; ret = ensure_commit_roots_uptodate(sctx); if (ret) goto out; ret = send_subvol(sctx); if (ret < 0) goto out; btrfs_lru_cache_for_each_entry_safe(&sctx->dir_utimes_cache, entry, tmp) { ret = send_utimes(sctx, entry->key, entry->gen); if (ret < 0) goto out; btrfs_lru_cache_remove(&sctx->dir_utimes_cache, entry); } if (!(sctx->flags & BTRFS_SEND_FLAG_OMIT_END_CMD)) { ret = begin_cmd(sctx, BTRFS_SEND_C_END); if (ret < 0) goto out; ret = send_cmd(sctx); if (ret < 0) goto out; } out: WARN_ON(sctx && !ret && !RB_EMPTY_ROOT(&sctx->pending_dir_moves)); while (sctx && !RB_EMPTY_ROOT(&sctx->pending_dir_moves)) { struct rb_node *n; struct pending_dir_move *pm; n = rb_first(&sctx->pending_dir_moves); pm = rb_entry(n, struct pending_dir_move, node); while (!list_empty(&pm->list)) { struct pending_dir_move *pm2; pm2 = list_first_entry(&pm->list, struct pending_dir_move, list); free_pending_move(sctx, pm2); } free_pending_move(sctx, pm); } WARN_ON(sctx && !ret && !RB_EMPTY_ROOT(&sctx->waiting_dir_moves)); while (sctx && !RB_EMPTY_ROOT(&sctx->waiting_dir_moves)) { struct rb_node *n; struct waiting_dir_move *dm; n = rb_first(&sctx->waiting_dir_moves); dm = rb_entry(n, struct waiting_dir_move, node); rb_erase(&dm->node, &sctx->waiting_dir_moves); kfree(dm); } WARN_ON(sctx && !ret && !RB_EMPTY_ROOT(&sctx->orphan_dirs)); while (sctx && !RB_EMPTY_ROOT(&sctx->orphan_dirs)) { struct rb_node *n; struct orphan_dir_info *odi; n = rb_first(&sctx->orphan_dirs); odi = rb_entry(n, struct orphan_dir_info, node); free_orphan_dir_info(sctx, odi); } if (sort_clone_roots) { for (i = 0; i < sctx->clone_roots_cnt; i++) { btrfs_root_dec_send_in_progress( sctx->clone_roots[i].root); btrfs_put_root(sctx->clone_roots[i].root); } } else { for (i = 0; sctx && i < clone_sources_to_rollback; i++) { btrfs_root_dec_send_in_progress( sctx->clone_roots[i].root); btrfs_put_root(sctx->clone_roots[i].root); } btrfs_root_dec_send_in_progress(send_root); } if (sctx && !IS_ERR_OR_NULL(sctx->parent_root)) { btrfs_root_dec_send_in_progress(sctx->parent_root); btrfs_put_root(sctx->parent_root); } kvfree(clone_sources_tmp); if (sctx) { if (sctx->send_filp) fput(sctx->send_filp); kvfree(sctx->clone_roots); kfree(sctx->send_buf_pages); kvfree(sctx->send_buf); kvfree(sctx->verity_descriptor); close_current_inode(sctx); btrfs_lru_cache_clear(&sctx->name_cache); btrfs_lru_cache_clear(&sctx->backref_cache); btrfs_lru_cache_clear(&sctx->dir_created_cache); btrfs_lru_cache_clear(&sctx->dir_utimes_cache); kfree(sctx); } return ret; } |
1 1 1 63 62 63 63 10 63 62 60 62 19 62 62 29 29 2 62 62 54 62 1 1 1 1 1 1 1 1 1 1 1 1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 | /* * hw_random/core.c: HWRNG core API * * Copyright 2006 Michael Buesch <m@bues.ch> * Copyright 2005 (c) MontaVista Software, Inc. * * Please read Documentation/admin-guide/hw_random.rst for details on use. * * This software may be used and distributed according to the terms * of the GNU General Public License, incorporated herein by reference. */ #include <linux/delay.h> #include <linux/device.h> #include <linux/err.h> #include <linux/fs.h> #include <linux/hw_random.h> #include <linux/kernel.h> #include <linux/kthread.h> #include <linux/miscdevice.h> #include <linux/module.h> #include <linux/random.h> #include <linux/sched.h> #include <linux/sched/signal.h> #include <linux/slab.h> #include <linux/string.h> #include <linux/uaccess.h> #define RNG_MODULE_NAME "hw_random" #define RNG_BUFFER_SIZE (SMP_CACHE_BYTES < 32 ? 32 : SMP_CACHE_BYTES) static struct hwrng *current_rng; /* the current rng has been explicitly chosen by user via sysfs */ static int cur_rng_set_by_user; static struct task_struct *hwrng_fill; /* list of registered rngs */ static LIST_HEAD(rng_list); /* Protects rng_list and current_rng */ static DEFINE_MUTEX(rng_mutex); /* Protects rng read functions, data_avail, rng_buffer and rng_fillbuf */ static DEFINE_MUTEX(reading_mutex); static int data_avail; static u8 *rng_buffer, *rng_fillbuf; static unsigned short current_quality; static unsigned short default_quality = 1024; /* default to maximum */ module_param(current_quality, ushort, 0644); MODULE_PARM_DESC(current_quality, "current hwrng entropy estimation per 1024 bits of input -- obsolete, use rng_quality instead"); module_param(default_quality, ushort, 0644); MODULE_PARM_DESC(default_quality, "default maximum entropy content of hwrng per 1024 bits of input"); static void drop_current_rng(void); static int hwrng_init(struct hwrng *rng); static int hwrng_fillfn(void *unused); static inline int rng_get_data(struct hwrng *rng, u8 *buffer, size_t size, int wait); static size_t rng_buffer_size(void) { return RNG_BUFFER_SIZE; } static void add_early_randomness(struct hwrng *rng) { int bytes_read; mutex_lock(&reading_mutex); bytes_read = rng_get_data(rng, rng_fillbuf, 32, 0); mutex_unlock(&reading_mutex); if (bytes_read > 0) { size_t entropy = bytes_read * 8 * rng->quality / 1024; add_hwgenerator_randomness(rng_fillbuf, bytes_read, entropy, false); } } static inline void cleanup_rng(struct kref *kref) { struct hwrng *rng = container_of(kref, struct hwrng, ref); if (rng->cleanup) rng->cleanup(rng); complete(&rng->cleanup_done); } static int set_current_rng(struct hwrng *rng) { int err; BUG_ON(!mutex_is_locked(&rng_mutex)); err = hwrng_init(rng); if (err) return err; drop_current_rng(); current_rng = rng; /* if necessary, start hwrng thread */ if (!hwrng_fill) { hwrng_fill = kthread_run(hwrng_fillfn, NULL, "hwrng"); if (IS_ERR(hwrng_fill)) { pr_err("hwrng_fill thread creation failed\n"); hwrng_fill = NULL; } } return 0; } static void drop_current_rng(void) { BUG_ON(!mutex_is_locked(&rng_mutex)); if (!current_rng) return; /* decrease last reference for triggering the cleanup */ kref_put(¤t_rng->ref, cleanup_rng); current_rng = NULL; } /* Returns ERR_PTR(), NULL or refcounted hwrng */ static struct hwrng *get_current_rng_nolock(void) { if (current_rng) kref_get(¤t_rng->ref); return current_rng; } static struct hwrng *get_current_rng(void) { struct hwrng *rng; if (mutex_lock_interruptible(&rng_mutex)) return ERR_PTR(-ERESTARTSYS); rng = get_current_rng_nolock(); mutex_unlock(&rng_mutex); return rng; } static void put_rng(struct hwrng *rng) { /* * Hold rng_mutex here so we serialize in case they set_current_rng * on rng again immediately. */ mutex_lock(&rng_mutex); if (rng) kref_put(&rng->ref, cleanup_rng); mutex_unlock(&rng_mutex); } static int hwrng_init(struct hwrng *rng) { if (kref_get_unless_zero(&rng->ref)) goto skip_init; if (rng->init) { int ret; ret = rng->init(rng); if (ret) return ret; } kref_init(&rng->ref); reinit_completion(&rng->cleanup_done); skip_init: rng->quality = min_t(u16, min_t(u16, default_quality, 1024), rng->quality ?: 1024); current_quality = rng->quality; /* obsolete */ return 0; } static int rng_dev_open(struct inode *inode, struct file *filp) { /* enforce read-only access to this chrdev */ if ((filp->f_mode & FMODE_READ) == 0) return -EINVAL; if (filp->f_mode & FMODE_WRITE) return -EINVAL; return 0; } static inline int rng_get_data(struct hwrng *rng, u8 *buffer, size_t size, int wait) { int present; BUG_ON(!mutex_is_locked(&reading_mutex)); if (rng->read) return rng->read(rng, (void *)buffer, size, wait); if (rng->data_present) present = rng->data_present(rng, wait); else present = 1; if (present) return rng->data_read(rng, (u32 *)buffer); return 0; } static ssize_t rng_dev_read(struct file *filp, char __user *buf, size_t size, loff_t *offp) { u8 buffer[RNG_BUFFER_SIZE]; ssize_t ret = 0; int err = 0; int bytes_read, len; struct hwrng *rng; while (size) { rng = get_current_rng(); if (IS_ERR(rng)) { err = PTR_ERR(rng); goto out; } if (!rng) { err = -ENODEV; goto out; } if (mutex_lock_interruptible(&reading_mutex)) { err = -ERESTARTSYS; goto out_put; } if (!data_avail) { bytes_read = rng_get_data(rng, rng_buffer, rng_buffer_size(), !(filp->f_flags & O_NONBLOCK)); if (bytes_read < 0) { err = bytes_read; goto out_unlock_reading; } else if (bytes_read == 0 && (filp->f_flags & O_NONBLOCK)) { err = -EAGAIN; goto out_unlock_reading; } data_avail = bytes_read; } len = data_avail; if (len) { if (len > size) len = size; data_avail -= len; memcpy(buffer, rng_buffer + data_avail, len); } mutex_unlock(&reading_mutex); put_rng(rng); if (len) { if (copy_to_user(buf + ret, buffer, len)) { err = -EFAULT; goto out; } size -= len; ret += len; } if (need_resched()) schedule_timeout_interruptible(1); if (signal_pending(current)) { err = -ERESTARTSYS; goto out; } } out: memzero_explicit(buffer, sizeof(buffer)); return ret ? : err; out_unlock_reading: mutex_unlock(&reading_mutex); out_put: put_rng(rng); goto out; } static const struct file_operations rng_chrdev_ops = { .owner = THIS_MODULE, .open = rng_dev_open, .read = rng_dev_read, .llseek = noop_llseek, }; static const struct attribute_group *rng_dev_groups[]; static struct miscdevice rng_miscdev = { .minor = HWRNG_MINOR, .name = RNG_MODULE_NAME, .nodename = "hwrng", .fops = &rng_chrdev_ops, .groups = rng_dev_groups, }; static int enable_best_rng(void) { struct hwrng *rng, *new_rng = NULL; int ret = -ENODEV; BUG_ON(!mutex_is_locked(&rng_mutex)); /* no rng to use? */ if (list_empty(&rng_list)) { drop_current_rng(); cur_rng_set_by_user = 0; return 0; } /* use the rng which offers the best quality */ list_for_each_entry(rng, &rng_list, list) { if (!new_rng || rng->quality > new_rng->quality) new_rng = rng; } ret = ((new_rng == current_rng) ? 0 : set_current_rng(new_rng)); if (!ret) cur_rng_set_by_user = 0; return ret; } static ssize_t rng_current_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { int err; struct hwrng *rng, *old_rng, *new_rng; err = mutex_lock_interruptible(&rng_mutex); if (err) return -ERESTARTSYS; old_rng = current_rng; if (sysfs_streq(buf, "")) { err = enable_best_rng(); } else { list_for_each_entry(rng, &rng_list, list) { if (sysfs_streq(rng->name, buf)) { err = set_current_rng(rng); if (!err) cur_rng_set_by_user = 1; break; } } } new_rng = get_current_rng_nolock(); mutex_unlock(&rng_mutex); if (new_rng) { if (new_rng != old_rng) add_early_randomness(new_rng); put_rng(new_rng); } return err ? : len; } static ssize_t rng_current_show(struct device *dev, struct device_attribute *attr, char *buf) { ssize_t ret; struct hwrng *rng; rng = get_current_rng(); if (IS_ERR(rng)) return PTR_ERR(rng); ret = snprintf(buf, PAGE_SIZE, "%s\n", rng ? rng->name : "none"); put_rng(rng); return ret; } static ssize_t rng_available_show(struct device *dev, struct device_attribute *attr, char *buf) { int err; struct hwrng *rng; err = mutex_lock_interruptible(&rng_mutex); if (err) return -ERESTARTSYS; buf[0] = '\0'; list_for_each_entry(rng, &rng_list, list) { strlcat(buf, rng->name, PAGE_SIZE); strlcat(buf, " ", PAGE_SIZE); } strlcat(buf, "\n", PAGE_SIZE); mutex_unlock(&rng_mutex); return strlen(buf); } static ssize_t rng_selected_show(struct device *dev, struct device_attribute *attr, char *buf) { return sysfs_emit(buf, "%d\n", cur_rng_set_by_user); } static ssize_t rng_quality_show(struct device *dev, struct device_attribute *attr, char *buf) { ssize_t ret; struct hwrng *rng; rng = get_current_rng(); if (IS_ERR(rng)) return PTR_ERR(rng); if (!rng) /* no need to put_rng */ return -ENODEV; ret = sysfs_emit(buf, "%hu\n", rng->quality); put_rng(rng); return ret; } static ssize_t rng_quality_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { u16 quality; int ret = -EINVAL; if (len < 2) return -EINVAL; ret = mutex_lock_interruptible(&rng_mutex); if (ret) return -ERESTARTSYS; ret = kstrtou16(buf, 0, &quality); if (ret || quality > 1024) { ret = -EINVAL; goto out; } if (!current_rng) { ret = -ENODEV; goto out; } current_rng->quality = quality; current_quality = quality; /* obsolete */ /* the best available RNG may have changed */ ret = enable_best_rng(); out: mutex_unlock(&rng_mutex); return ret ? ret : len; } static DEVICE_ATTR_RW(rng_current); static DEVICE_ATTR_RO(rng_available); static DEVICE_ATTR_RO(rng_selected); static DEVICE_ATTR_RW(rng_quality); static struct attribute *rng_dev_attrs[] = { &dev_attr_rng_current.attr, &dev_attr_rng_available.attr, &dev_attr_rng_selected.attr, &dev_attr_rng_quality.attr, NULL }; ATTRIBUTE_GROUPS(rng_dev); static void __exit unregister_miscdev(void) { misc_deregister(&rng_miscdev); } static int __init register_miscdev(void) { return misc_register(&rng_miscdev); } static int hwrng_fillfn(void *unused) { size_t entropy, entropy_credit = 0; /* in 1/1024 of a bit */ long rc; while (!kthread_should_stop()) { unsigned short quality; struct hwrng *rng; rng = get_current_rng(); if (IS_ERR(rng) || !rng) break; mutex_lock(&reading_mutex); rc = rng_get_data(rng, rng_fillbuf, rng_buffer_size(), 1); if (current_quality != rng->quality) rng->quality = current_quality; /* obsolete */ quality = rng->quality; mutex_unlock(&reading_mutex); if (rc <= 0) hwrng_msleep(rng, 10000); put_rng(rng); if (rc <= 0) continue; /* If we cannot credit at least one bit of entropy, * keep track of the remainder for the next iteration */ entropy = rc * quality * 8 + entropy_credit; if ((entropy >> 10) == 0) entropy_credit = entropy; /* Outside lock, sure, but y'know: randomness. */ add_hwgenerator_randomness((void *)rng_fillbuf, rc, entropy >> 10, true); } hwrng_fill = NULL; return 0; } int hwrng_register(struct hwrng *rng) { int err = -EINVAL; struct hwrng *tmp; bool is_new_current = false; if (!rng->name || (!rng->data_read && !rng->read)) goto out; mutex_lock(&rng_mutex); /* Must not register two RNGs with the same name. */ err = -EEXIST; list_for_each_entry(tmp, &rng_list, list) { if (strcmp(tmp->name, rng->name) == 0) goto out_unlock; } list_add_tail(&rng->list, &rng_list); init_completion(&rng->cleanup_done); complete(&rng->cleanup_done); init_completion(&rng->dying); if (!current_rng || (!cur_rng_set_by_user && rng->quality > current_rng->quality)) { /* * Set new rng as current as the new rng source * provides better entropy quality and was not * chosen by userspace. */ err = set_current_rng(rng); if (err) goto out_unlock; /* to use current_rng in add_early_randomness() we need * to take a ref */ is_new_current = true; kref_get(&rng->ref); } mutex_unlock(&rng_mutex); if (is_new_current || !rng->init) { /* * Use a new device's input to add some randomness to * the system. If this rng device isn't going to be * used right away, its init function hasn't been * called yet by set_current_rng(); so only use the * randomness from devices that don't need an init callback */ add_early_randomness(rng); } if (is_new_current) put_rng(rng); return 0; out_unlock: mutex_unlock(&rng_mutex); out: return err; } EXPORT_SYMBOL_GPL(hwrng_register); void hwrng_unregister(struct hwrng *rng) { struct hwrng *old_rng, *new_rng; int err; mutex_lock(&rng_mutex); old_rng = current_rng; list_del(&rng->list); complete_all(&rng->dying); if (current_rng == rng) { err = enable_best_rng(); if (err) { drop_current_rng(); cur_rng_set_by_user = 0; } } new_rng = get_current_rng_nolock(); if (list_empty(&rng_list)) { mutex_unlock(&rng_mutex); if (hwrng_fill) kthread_stop(hwrng_fill); } else mutex_unlock(&rng_mutex); if (new_rng) { if (old_rng != new_rng) add_early_randomness(new_rng); put_rng(new_rng); } wait_for_completion(&rng->cleanup_done); } EXPORT_SYMBOL_GPL(hwrng_unregister); static void devm_hwrng_release(struct device *dev, void *res) { hwrng_unregister(*(struct hwrng **)res); } static int devm_hwrng_match(struct device *dev, void *res, void *data) { struct hwrng **r = res; if (WARN_ON(!r || !*r)) return 0; return *r == data; } int devm_hwrng_register(struct device *dev, struct hwrng *rng) { struct hwrng **ptr; int error; ptr = devres_alloc(devm_hwrng_release, sizeof(*ptr), GFP_KERNEL); if (!ptr) return -ENOMEM; error = hwrng_register(rng); if (error) { devres_free(ptr); return error; } *ptr = rng; devres_add(dev, ptr); return 0; } EXPORT_SYMBOL_GPL(devm_hwrng_register); void devm_hwrng_unregister(struct device *dev, struct hwrng *rng) { devres_release(dev, devm_hwrng_release, devm_hwrng_match, rng); } EXPORT_SYMBOL_GPL(devm_hwrng_unregister); long hwrng_msleep(struct hwrng *rng, unsigned int msecs) { unsigned long timeout = msecs_to_jiffies(msecs) + 1; return wait_for_completion_interruptible_timeout(&rng->dying, timeout); } EXPORT_SYMBOL_GPL(hwrng_msleep); long hwrng_yield(struct hwrng *rng) { return wait_for_completion_interruptible_timeout(&rng->dying, 1); } EXPORT_SYMBOL_GPL(hwrng_yield); static int __init hwrng_modinit(void) { int ret; /* kmalloc makes this safe for virt_to_page() in virtio_rng.c */ rng_buffer = kmalloc(rng_buffer_size(), GFP_KERNEL); if (!rng_buffer) return -ENOMEM; rng_fillbuf = kmalloc(rng_buffer_size(), GFP_KERNEL); if (!rng_fillbuf) { kfree(rng_buffer); return -ENOMEM; } ret = register_miscdev(); if (ret) { kfree(rng_fillbuf); kfree(rng_buffer); } return ret; } static void __exit hwrng_modexit(void) { mutex_lock(&rng_mutex); BUG_ON(current_rng); kfree(rng_buffer); kfree(rng_fillbuf); mutex_unlock(&rng_mutex); unregister_miscdev(); } fs_initcall(hwrng_modinit); /* depends on misc_register() */ module_exit(hwrng_modexit); MODULE_DESCRIPTION("H/W Random Number Generator (RNG) driver"); MODULE_LICENSE("GPL"); |
1802 3 493 3 173 2073 48 2046 2044 1735 793 2056 47 2015 582 3 578 579 576 3 573 2 2 56 56 177 178 3 178 178 178 173 174 173 173 3 56 3 3 3 170 28 143 1752 3 494 1749 12120 11986 1748 1750 1752 1749 1750 1751 1802 46 1802 1802 1801 46 1802 493 493 494 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 | // SPDX-License-Identifier: GPL-2.0-only /* * Copyright (c) 2013 Red Hat, Inc. and Parallels Inc. All rights reserved. * Authors: David Chinner and Glauber Costa * * Generic LRU infrastructure */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/mm.h> #include <linux/list_lru.h> #include <linux/slab.h> #include <linux/mutex.h> #include <linux/memcontrol.h> #include "slab.h" #include "internal.h" #ifdef CONFIG_MEMCG_KMEM static LIST_HEAD(memcg_list_lrus); static DEFINE_MUTEX(list_lrus_mutex); static inline bool list_lru_memcg_aware(struct list_lru *lru) { return lru->memcg_aware; } static void list_lru_register(struct list_lru *lru) { if (!list_lru_memcg_aware(lru)) return; mutex_lock(&list_lrus_mutex); list_add(&lru->list, &memcg_list_lrus); mutex_unlock(&list_lrus_mutex); } static void list_lru_unregister(struct list_lru *lru) { if (!list_lru_memcg_aware(lru)) return; mutex_lock(&list_lrus_mutex); list_del(&lru->list); mutex_unlock(&list_lrus_mutex); } static int lru_shrinker_id(struct list_lru *lru) { return lru->shrinker_id; } static inline struct list_lru_one * list_lru_from_memcg_idx(struct list_lru *lru, int nid, int idx) { if (list_lru_memcg_aware(lru) && idx >= 0) { struct list_lru_memcg *mlru = xa_load(&lru->xa, idx); return mlru ? &mlru->node[nid] : NULL; } return &lru->node[nid].lru; } #else static void list_lru_register(struct list_lru *lru) { } static void list_lru_unregister(struct list_lru *lru) { } static int lru_shrinker_id(struct list_lru *lru) { return -1; } static inline bool list_lru_memcg_aware(struct list_lru *lru) { return false; } static inline struct list_lru_one * list_lru_from_memcg_idx(struct list_lru *lru, int nid, int idx) { return &lru->node[nid].lru; } #endif /* CONFIG_MEMCG_KMEM */ bool list_lru_add(struct list_lru *lru, struct list_head *item, int nid, struct mem_cgroup *memcg) { struct list_lru_node *nlru = &lru->node[nid]; struct list_lru_one *l; spin_lock(&nlru->lock); if (list_empty(item)) { l = list_lru_from_memcg_idx(lru, nid, memcg_kmem_id(memcg)); list_add_tail(item, &l->list); /* Set shrinker bit if the first element was added */ if (!l->nr_items++) set_shrinker_bit(memcg, nid, lru_shrinker_id(lru)); nlru->nr_items++; spin_unlock(&nlru->lock); return true; } spin_unlock(&nlru->lock); return false; } EXPORT_SYMBOL_GPL(list_lru_add); bool list_lru_add_obj(struct list_lru *lru, struct list_head *item) { int nid = page_to_nid(virt_to_page(item)); struct mem_cgroup *memcg = list_lru_memcg_aware(lru) ? mem_cgroup_from_slab_obj(item) : NULL; return list_lru_add(lru, item, nid, memcg); } EXPORT_SYMBOL_GPL(list_lru_add_obj); bool list_lru_del(struct list_lru *lru, struct list_head *item, int nid, struct mem_cgroup *memcg) { struct list_lru_node *nlru = &lru->node[nid]; struct list_lru_one *l; spin_lock(&nlru->lock); if (!list_empty(item)) { l = list_lru_from_memcg_idx(lru, nid, memcg_kmem_id(memcg)); list_del_init(item); l->nr_items--; nlru->nr_items--; spin_unlock(&nlru->lock); return true; } spin_unlock(&nlru->lock); return false; } EXPORT_SYMBOL_GPL(list_lru_del); bool list_lru_del_obj(struct list_lru *lru, struct list_head *item) { int nid = page_to_nid(virt_to_page(item)); struct mem_cgroup *memcg = list_lru_memcg_aware(lru) ? mem_cgroup_from_slab_obj(item) : NULL; return list_lru_del(lru, item, nid, memcg); } EXPORT_SYMBOL_GPL(list_lru_del_obj); void list_lru_isolate(struct list_lru_one *list, struct list_head *item) { list_del_init(item); list->nr_items--; } EXPORT_SYMBOL_GPL(list_lru_isolate); void list_lru_isolate_move(struct list_lru_one *list, struct list_head *item, struct list_head *head) { list_move(item, head); list->nr_items--; } EXPORT_SYMBOL_GPL(list_lru_isolate_move); void list_lru_putback(struct list_lru *lru, struct list_head *item, int nid, struct mem_cgroup *memcg) { struct list_lru_one *list = list_lru_from_memcg_idx(lru, nid, memcg_kmem_id(memcg)); if (list_empty(item)) { list_add_tail(item, &list->list); if (!list->nr_items++) set_shrinker_bit(memcg, nid, lru_shrinker_id(lru)); } } EXPORT_SYMBOL_GPL(list_lru_putback); unsigned long list_lru_count_one(struct list_lru *lru, int nid, struct mem_cgroup *memcg) { struct list_lru_one *l; long count; rcu_read_lock(); l = list_lru_from_memcg_idx(lru, nid, memcg_kmem_id(memcg)); count = l ? READ_ONCE(l->nr_items) : 0; rcu_read_unlock(); if (unlikely(count < 0)) count = 0; return count; } EXPORT_SYMBOL_GPL(list_lru_count_one); unsigned long list_lru_count_node(struct list_lru *lru, int nid) { struct list_lru_node *nlru; nlru = &lru->node[nid]; return nlru->nr_items; } EXPORT_SYMBOL_GPL(list_lru_count_node); static unsigned long __list_lru_walk_one(struct list_lru *lru, int nid, int memcg_idx, list_lru_walk_cb isolate, void *cb_arg, unsigned long *nr_to_walk) { struct list_lru_node *nlru = &lru->node[nid]; struct list_lru_one *l; struct list_head *item, *n; unsigned long isolated = 0; restart: l = list_lru_from_memcg_idx(lru, nid, memcg_idx); if (!l) goto out; list_for_each_safe(item, n, &l->list) { enum lru_status ret; /* * decrement nr_to_walk first so that we don't livelock if we * get stuck on large numbers of LRU_RETRY items */ if (!*nr_to_walk) break; --*nr_to_walk; ret = isolate(item, l, &nlru->lock, cb_arg); switch (ret) { case LRU_REMOVED_RETRY: assert_spin_locked(&nlru->lock); fallthrough; case LRU_REMOVED: isolated++; nlru->nr_items--; /* * If the lru lock has been dropped, our list * traversal is now invalid and so we have to * restart from scratch. */ if (ret == LRU_REMOVED_RETRY) goto restart; break; case LRU_ROTATE: list_move_tail(item, &l->list); break; case LRU_SKIP: break; case LRU_RETRY: /* * The lru lock has been dropped, our list traversal is * now invalid and so we have to restart from scratch. */ assert_spin_locked(&nlru->lock); goto restart; default: BUG(); } } out: return isolated; } unsigned long list_lru_walk_one(struct list_lru *lru, int nid, struct mem_cgroup *memcg, list_lru_walk_cb isolate, void *cb_arg, unsigned long *nr_to_walk) { struct list_lru_node *nlru = &lru->node[nid]; unsigned long ret; spin_lock(&nlru->lock); ret = __list_lru_walk_one(lru, nid, memcg_kmem_id(memcg), isolate, cb_arg, nr_to_walk); spin_unlock(&nlru->lock); return ret; } EXPORT_SYMBOL_GPL(list_lru_walk_one); unsigned long list_lru_walk_one_irq(struct list_lru *lru, int nid, struct mem_cgroup *memcg, list_lru_walk_cb isolate, void *cb_arg, unsigned long *nr_to_walk) { struct list_lru_node *nlru = &lru->node[nid]; unsigned long ret; spin_lock_irq(&nlru->lock); ret = __list_lru_walk_one(lru, nid, memcg_kmem_id(memcg), isolate, cb_arg, nr_to_walk); spin_unlock_irq(&nlru->lock); return ret; } unsigned long list_lru_walk_node(struct list_lru *lru, int nid, list_lru_walk_cb isolate, void *cb_arg, unsigned long *nr_to_walk) { long isolated = 0; isolated += list_lru_walk_one(lru, nid, NULL, isolate, cb_arg, nr_to_walk); #ifdef CONFIG_MEMCG_KMEM if (*nr_to_walk > 0 && list_lru_memcg_aware(lru)) { struct list_lru_memcg *mlru; unsigned long index; xa_for_each(&lru->xa, index, mlru) { struct list_lru_node *nlru = &lru->node[nid]; spin_lock(&nlru->lock); isolated += __list_lru_walk_one(lru, nid, index, isolate, cb_arg, nr_to_walk); spin_unlock(&nlru->lock); if (*nr_to_walk <= 0) break; } } #endif return isolated; } EXPORT_SYMBOL_GPL(list_lru_walk_node); static void init_one_lru(struct list_lru_one *l) { INIT_LIST_HEAD(&l->list); l->nr_items = 0; } #ifdef CONFIG_MEMCG_KMEM static struct list_lru_memcg *memcg_init_list_lru_one(gfp_t gfp) { int nid; struct list_lru_memcg *mlru; mlru = kmalloc(struct_size(mlru, node, nr_node_ids), gfp); if (!mlru) return NULL; for_each_node(nid) init_one_lru(&mlru->node[nid]); return mlru; } static void memcg_list_lru_free(struct list_lru *lru, int src_idx) { struct list_lru_memcg *mlru = xa_erase_irq(&lru->xa, src_idx); /* * The __list_lru_walk_one() can walk the list of this node. * We need kvfree_rcu() here. And the walking of the list * is under lru->node[nid]->lock, which can serve as a RCU * read-side critical section. */ if (mlru) kvfree_rcu(mlru, rcu); } static inline void memcg_init_list_lru(struct list_lru *lru, bool memcg_aware) { if (memcg_aware) xa_init_flags(&lru->xa, XA_FLAGS_LOCK_IRQ); lru->memcg_aware = memcg_aware; } static void memcg_destroy_list_lru(struct list_lru *lru) { XA_STATE(xas, &lru->xa, 0); struct list_lru_memcg *mlru; if (!list_lru_memcg_aware(lru)) return; xas_lock_irq(&xas); xas_for_each(&xas, mlru, ULONG_MAX) { kfree(mlru); xas_store(&xas, NULL); } xas_unlock_irq(&xas); } static void memcg_reparent_list_lru_node(struct list_lru *lru, int nid, int src_idx, struct mem_cgroup *dst_memcg) { struct list_lru_node *nlru = &lru->node[nid]; int dst_idx = dst_memcg->kmemcg_id; struct list_lru_one *src, *dst; /* * Since list_lru_{add,del} may be called under an IRQ-safe lock, * we have to use IRQ-safe primitives here to avoid deadlock. */ spin_lock_irq(&nlru->lock); src = list_lru_from_memcg_idx(lru, nid, src_idx); if (!src) goto out; dst = list_lru_from_memcg_idx(lru, nid, dst_idx); list_splice_init(&src->list, &dst->list); if (src->nr_items) { dst->nr_items += src->nr_items; set_shrinker_bit(dst_memcg, nid, lru_shrinker_id(lru)); src->nr_items = 0; } out: spin_unlock_irq(&nlru->lock); } static void memcg_reparent_list_lru(struct list_lru *lru, int src_idx, struct mem_cgroup *dst_memcg) { int i; for_each_node(i) memcg_reparent_list_lru_node(lru, i, src_idx, dst_memcg); memcg_list_lru_free(lru, src_idx); } void memcg_reparent_list_lrus(struct mem_cgroup *memcg, struct mem_cgroup *parent) { struct cgroup_subsys_state *css; struct list_lru *lru; int src_idx = memcg->kmemcg_id; /* * Change kmemcg_id of this cgroup and all its descendants to the * parent's id, and then move all entries from this cgroup's list_lrus * to ones of the parent. * * After we have finished, all list_lrus corresponding to this cgroup * are guaranteed to remain empty. So we can safely free this cgroup's * list lrus in memcg_list_lru_free(). * * Changing ->kmemcg_id to the parent can prevent memcg_list_lru_alloc() * from allocating list lrus for this cgroup after memcg_list_lru_free() * call. */ rcu_read_lock(); css_for_each_descendant_pre(css, &memcg->css) { struct mem_cgroup *child; child = mem_cgroup_from_css(css); WRITE_ONCE(child->kmemcg_id, parent->kmemcg_id); } rcu_read_unlock(); mutex_lock(&list_lrus_mutex); list_for_each_entry(lru, &memcg_list_lrus, list) memcg_reparent_list_lru(lru, src_idx, parent); mutex_unlock(&list_lrus_mutex); } static inline bool memcg_list_lru_allocated(struct mem_cgroup *memcg, struct list_lru *lru) { int idx = memcg->kmemcg_id; return idx < 0 || xa_load(&lru->xa, idx); } int memcg_list_lru_alloc(struct mem_cgroup *memcg, struct list_lru *lru, gfp_t gfp) { int i; unsigned long flags; struct list_lru_memcg_table { struct list_lru_memcg *mlru; struct mem_cgroup *memcg; } *table; XA_STATE(xas, &lru->xa, 0); if (!list_lru_memcg_aware(lru) || memcg_list_lru_allocated(memcg, lru)) return 0; gfp &= GFP_RECLAIM_MASK; table = kmalloc_array(memcg->css.cgroup->level, sizeof(*table), gfp); if (!table) return -ENOMEM; /* * Because the list_lru can be reparented to the parent cgroup's * list_lru, we should make sure that this cgroup and all its * ancestors have allocated list_lru_memcg. */ for (i = 0; memcg; memcg = parent_mem_cgroup(memcg), i++) { if (memcg_list_lru_allocated(memcg, lru)) break; table[i].memcg = memcg; table[i].mlru = memcg_init_list_lru_one(gfp); if (!table[i].mlru) { while (i--) kfree(table[i].mlru); kfree(table); return -ENOMEM; } } xas_lock_irqsave(&xas, flags); while (i--) { int index = READ_ONCE(table[i].memcg->kmemcg_id); struct list_lru_memcg *mlru = table[i].mlru; xas_set(&xas, index); retry: if (unlikely(index < 0 || xas_error(&xas) || xas_load(&xas))) { kfree(mlru); } else { xas_store(&xas, mlru); if (xas_error(&xas) == -ENOMEM) { xas_unlock_irqrestore(&xas, flags); if (xas_nomem(&xas, gfp)) xas_set_err(&xas, 0); xas_lock_irqsave(&xas, flags); /* * The xas lock has been released, this memcg * can be reparented before us. So reload * memcg id. More details see the comments * in memcg_reparent_list_lrus(). */ index = READ_ONCE(table[i].memcg->kmemcg_id); if (index < 0) xas_set_err(&xas, 0); else if (!xas_error(&xas) && index != xas.xa_index) xas_set(&xas, index); goto retry; } } } /* xas_nomem() is used to free memory instead of memory allocation. */ if (xas.xa_alloc) xas_nomem(&xas, gfp); xas_unlock_irqrestore(&xas, flags); kfree(table); return xas_error(&xas); } #else static inline void memcg_init_list_lru(struct list_lru *lru, bool memcg_aware) { } static void memcg_destroy_list_lru(struct list_lru *lru) { } #endif /* CONFIG_MEMCG_KMEM */ int __list_lru_init(struct list_lru *lru, bool memcg_aware, struct lock_class_key *key, struct shrinker *shrinker) { int i; #ifdef CONFIG_MEMCG_KMEM if (shrinker) lru->shrinker_id = shrinker->id; else lru->shrinker_id = -1; #endif lru->node = kcalloc(nr_node_ids, sizeof(*lru->node), GFP_KERNEL); if (!lru->node) return -ENOMEM; for_each_node(i) { spin_lock_init(&lru->node[i].lock); if (key) lockdep_set_class(&lru->node[i].lock, key); init_one_lru(&lru->node[i].lru); } memcg_init_list_lru(lru, memcg_aware); list_lru_register(lru); return 0; } EXPORT_SYMBOL_GPL(__list_lru_init); void list_lru_destroy(struct list_lru *lru) { /* Already destroyed or not yet initialized? */ if (!lru->node) return; list_lru_unregister(lru); memcg_destroy_list_lru(lru); kfree(lru->node); lru->node = NULL; #ifdef CONFIG_MEMCG_KMEM lru->shrinker_id = -1; #endif } EXPORT_SYMBOL_GPL(list_lru_destroy); |
5 14 2 25 5 17 3 16 1 19 3 13 2 2 2 1 2 2 2 2 2 2 2 2 2 2 2 11 2 9 9 1 8 9 1 9 9 1 8 9 1 8 26 1 14 2 9 1 5 2 3 4 4 4 2 2 2 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 | // SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2010 IBM Corporation * Copyright (C) 2010 Politecnico di Torino, Italy * TORSEC group -- https://security.polito.it * * Authors: * Mimi Zohar <zohar@us.ibm.com> * Roberto Sassu <roberto.sassu@polito.it> * * See Documentation/security/keys/trusted-encrypted.rst */ #include <linux/uaccess.h> #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/parser.h> #include <linux/string.h> #include <linux/err.h> #include <keys/user-type.h> #include <keys/trusted-type.h> #include <keys/encrypted-type.h> #include <linux/key-type.h> #include <linux/random.h> #include <linux/rcupdate.h> #include <linux/scatterlist.h> #include <linux/ctype.h> #include <crypto/aes.h> #include <crypto/hash.h> #include <crypto/sha2.h> #include <crypto/skcipher.h> #include <crypto/utils.h> #include "encrypted.h" #include "ecryptfs_format.h" static const char KEY_TRUSTED_PREFIX[] = "trusted:"; static const char KEY_USER_PREFIX[] = "user:"; static const char hash_alg[] = "sha256"; static const char hmac_alg[] = "hmac(sha256)"; static const char blkcipher_alg[] = "cbc(aes)"; static const char key_format_default[] = "default"; static const char key_format_ecryptfs[] = "ecryptfs"; static const char key_format_enc32[] = "enc32"; static unsigned int ivsize; static int blksize; #define KEY_TRUSTED_PREFIX_LEN (sizeof (KEY_TRUSTED_PREFIX) - 1) #define KEY_USER_PREFIX_LEN (sizeof (KEY_USER_PREFIX) - 1) #define KEY_ECRYPTFS_DESC_LEN 16 #define HASH_SIZE SHA256_DIGEST_SIZE #define MAX_DATA_SIZE 4096 #define MIN_DATA_SIZE 20 #define KEY_ENC32_PAYLOAD_LEN 32 static struct crypto_shash *hash_tfm; enum { Opt_new, Opt_load, Opt_update, Opt_err }; enum { Opt_default, Opt_ecryptfs, Opt_enc32, Opt_error }; static const match_table_t key_format_tokens = { {Opt_default, "default"}, {Opt_ecryptfs, "ecryptfs"}, {Opt_enc32, "enc32"}, {Opt_error, NULL} }; static const match_table_t key_tokens = { {Opt_new, "new"}, {Opt_load, "load"}, {Opt_update, "update"}, {Opt_err, NULL} }; static bool user_decrypted_data = IS_ENABLED(CONFIG_USER_DECRYPTED_DATA); module_param(user_decrypted_data, bool, 0); MODULE_PARM_DESC(user_decrypted_data, "Allow instantiation of encrypted keys using provided decrypted data"); static int aes_get_sizes(void) { struct crypto_skcipher *tfm; tfm = crypto_alloc_skcipher(blkcipher_alg, 0, CRYPTO_ALG_ASYNC); if (IS_ERR(tfm)) { pr_err("encrypted_key: failed to alloc_cipher (%ld)\n", PTR_ERR(tfm)); return PTR_ERR(tfm); } ivsize = crypto_skcipher_ivsize(tfm); blksize = crypto_skcipher_blocksize(tfm); crypto_free_skcipher(tfm); return 0; } /* * valid_ecryptfs_desc - verify the description of a new/loaded encrypted key * * The description of a encrypted key with format 'ecryptfs' must contain * exactly 16 hexadecimal characters. * */ static int valid_ecryptfs_desc(const char *ecryptfs_desc) { int i; if (strlen(ecryptfs_desc) != KEY_ECRYPTFS_DESC_LEN) { pr_err("encrypted_key: key description must be %d hexadecimal " "characters long\n", KEY_ECRYPTFS_DESC_LEN); return -EINVAL; } for (i = 0; i < KEY_ECRYPTFS_DESC_LEN; i++) { if (!isxdigit(ecryptfs_desc[i])) { pr_err("encrypted_key: key description must contain " "only hexadecimal characters\n"); return -EINVAL; } } return 0; } /* * valid_master_desc - verify the 'key-type:desc' of a new/updated master-key * * key-type:= "trusted:" | "user:" * desc:= master-key description * * Verify that 'key-type' is valid and that 'desc' exists. On key update, * only the master key description is permitted to change, not the key-type. * The key-type remains constant. * * On success returns 0, otherwise -EINVAL. */ static int valid_master_desc(const char *new_desc, const char *orig_desc) { int prefix_len; if (!strncmp(new_desc, KEY_TRUSTED_PREFIX, KEY_TRUSTED_PREFIX_LEN)) prefix_len = KEY_TRUSTED_PREFIX_LEN; else if (!strncmp(new_desc, KEY_USER_PREFIX, KEY_USER_PREFIX_LEN)) prefix_len = KEY_USER_PREFIX_LEN; else return -EINVAL; if (!new_desc[prefix_len]) return -EINVAL; if (orig_desc && strncmp(new_desc, orig_desc, prefix_len)) return -EINVAL; return 0; } /* * datablob_parse - parse the keyctl data * * datablob format: * new [<format>] <master-key name> <decrypted data length> [<decrypted data>] * load [<format>] <master-key name> <decrypted data length> * <encrypted iv + data> * update <new-master-key name> * * Tokenizes a copy of the keyctl data, returning a pointer to each token, * which is null terminated. * * On success returns 0, otherwise -EINVAL. */ static int datablob_parse(char *datablob, const char **format, char **master_desc, char **decrypted_datalen, char **hex_encoded_iv, char **decrypted_data) { substring_t args[MAX_OPT_ARGS]; int ret = -EINVAL; int key_cmd; int key_format; char *p, *keyword; keyword = strsep(&datablob, " \t"); if (!keyword) { pr_info("encrypted_key: insufficient parameters specified\n"); return ret; } key_cmd = match_token(keyword, key_tokens, args); /* Get optional format: default | ecryptfs */ p = strsep(&datablob, " \t"); if (!p) { pr_err("encrypted_key: insufficient parameters specified\n"); return ret; } key_format = match_token(p, key_format_tokens, args); switch (key_format) { case Opt_ecryptfs: case Opt_enc32: case Opt_default: *format = p; *master_desc = strsep(&datablob, " \t"); break; case Opt_error: *master_desc = p; break; } if (!*master_desc) { pr_info("encrypted_key: master key parameter is missing\n"); goto out; } if (valid_master_desc(*master_desc, NULL) < 0) { pr_info("encrypted_key: master key parameter \'%s\' " "is invalid\n", *master_desc); goto out; } if (decrypted_datalen) { *decrypted_datalen = strsep(&datablob, " \t"); if (!*decrypted_datalen) { pr_info("encrypted_key: keylen parameter is missing\n"); goto out; } } switch (key_cmd) { case Opt_new: if (!decrypted_datalen) { pr_info("encrypted_key: keyword \'%s\' not allowed " "when called from .update method\n", keyword); break; } *decrypted_data = strsep(&datablob, " \t"); ret = 0; break; case Opt_load: if (!decrypted_datalen) { pr_info("encrypted_key: keyword \'%s\' not allowed " "when called from .update method\n", keyword); break; } *hex_encoded_iv = strsep(&datablob, " \t"); if (!*hex_encoded_iv) { pr_info("encrypted_key: hex blob is missing\n"); break; } ret = 0; break; case Opt_update: if (decrypted_datalen) { pr_info("encrypted_key: keyword \'%s\' not allowed " "when called from .instantiate method\n", keyword); break; } ret = 0; break; case Opt_err: pr_info("encrypted_key: keyword \'%s\' not recognized\n", keyword); break; } out: return ret; } /* * datablob_format - format as an ascii string, before copying to userspace */ static char *datablob_format(struct encrypted_key_payload *epayload, size_t asciiblob_len) { char *ascii_buf, *bufp; u8 *iv = epayload->iv; int len; int i; ascii_buf = kmalloc(asciiblob_len + 1, GFP_KERNEL); if (!ascii_buf) goto out; ascii_buf[asciiblob_len] = '\0'; /* copy datablob master_desc and datalen strings */ len = sprintf(ascii_buf, "%s %s %s ", epayload->format, epayload->master_desc, epayload->datalen); /* convert the hex encoded iv, encrypted-data and HMAC to ascii */ bufp = &ascii_buf[len]; for (i = 0; i < (asciiblob_len - len) / 2; i++) bufp = hex_byte_pack(bufp, iv[i]); out: return ascii_buf; } /* * request_user_key - request the user key * * Use a user provided key to encrypt/decrypt an encrypted-key. */ static struct key *request_user_key(const char *master_desc, const u8 **master_key, size_t *master_keylen) { const struct user_key_payload *upayload; struct key *ukey; ukey = request_key(&key_type_user, master_desc, NULL); if (IS_ERR(ukey)) goto error; down_read(&ukey->sem); upayload = user_key_payload_locked(ukey); if (!upayload) { /* key was revoked before we acquired its semaphore */ up_read(&ukey->sem); key_put(ukey); ukey = ERR_PTR(-EKEYREVOKED); goto error; } *master_key = upayload->data; *master_keylen = upayload->datalen; error: return ukey; } static int calc_hmac(u8 *digest, const u8 *key, unsigned int keylen, const u8 *buf, unsigned int buflen) { struct crypto_shash *tfm; int err; tfm = crypto_alloc_shash(hmac_alg, 0, 0); if (IS_ERR(tfm)) { pr_err("encrypted_key: can't alloc %s transform: %ld\n", hmac_alg, PTR_ERR(tfm)); return PTR_ERR(tfm); } err = crypto_shash_setkey(tfm, key, keylen); if (!err) err = crypto_shash_tfm_digest(tfm, buf, buflen, digest); crypto_free_shash(tfm); return err; } enum derived_key_type { ENC_KEY, AUTH_KEY }; /* Derive authentication/encryption key from trusted key */ static int get_derived_key(u8 *derived_key, enum derived_key_type key_type, const u8 *master_key, size_t master_keylen) { u8 *derived_buf; unsigned int derived_buf_len; int ret; derived_buf_len = strlen("AUTH_KEY") + 1 + master_keylen; if (derived_buf_len < HASH_SIZE) derived_buf_len = HASH_SIZE; derived_buf = kzalloc(derived_buf_len, GFP_KERNEL); if (!derived_buf) return -ENOMEM; if (key_type) strcpy(derived_buf, "AUTH_KEY"); else strcpy(derived_buf, "ENC_KEY"); memcpy(derived_buf + strlen(derived_buf) + 1, master_key, master_keylen); ret = crypto_shash_tfm_digest(hash_tfm, derived_buf, derived_buf_len, derived_key); kfree_sensitive(derived_buf); return ret; } static struct skcipher_request *init_skcipher_req(const u8 *key, unsigned int key_len) { struct skcipher_request *req; struct crypto_skcipher *tfm; int ret; tfm = crypto_alloc_skcipher(blkcipher_alg, 0, CRYPTO_ALG_ASYNC); if (IS_ERR(tfm)) { pr_err("encrypted_key: failed to load %s transform (%ld)\n", blkcipher_alg, PTR_ERR(tfm)); return ERR_CAST(tfm); } ret = crypto_skcipher_setkey(tfm, key, key_len); if (ret < 0) { pr_err("encrypted_key: failed to setkey (%d)\n", ret); crypto_free_skcipher(tfm); return ERR_PTR(ret); } req = skcipher_request_alloc(tfm, GFP_KERNEL); if (!req) { pr_err("encrypted_key: failed to allocate request for %s\n", blkcipher_alg); crypto_free_skcipher(tfm); return ERR_PTR(-ENOMEM); } skcipher_request_set_callback(req, 0, NULL, NULL); return req; } static struct key *request_master_key(struct encrypted_key_payload *epayload, const u8 **master_key, size_t *master_keylen) { struct key *mkey = ERR_PTR(-EINVAL); if (!strncmp(epayload->master_desc, KEY_TRUSTED_PREFIX, KEY_TRUSTED_PREFIX_LEN)) { mkey = request_trusted_key(epayload->master_desc + KEY_TRUSTED_PREFIX_LEN, master_key, master_keylen); } else if (!strncmp(epayload->master_desc, KEY_USER_PREFIX, KEY_USER_PREFIX_LEN)) { mkey = request_user_key(epayload->master_desc + KEY_USER_PREFIX_LEN, master_key, master_keylen); } else goto out; if (IS_ERR(mkey)) { int ret = PTR_ERR(mkey); if (ret == -ENOTSUPP) pr_info("encrypted_key: key %s not supported", epayload->master_desc); else pr_info("encrypted_key: key %s not found", epayload->master_desc); goto out; } dump_master_key(*master_key, *master_keylen); out: return mkey; } /* Before returning data to userspace, encrypt decrypted data. */ static int derived_key_encrypt(struct encrypted_key_payload *epayload, const u8 *derived_key, unsigned int derived_keylen) { struct scatterlist sg_in[2]; struct scatterlist sg_out[1]; struct crypto_skcipher *tfm; struct skcipher_request *req; unsigned int encrypted_datalen; u8 iv[AES_BLOCK_SIZE]; int ret; encrypted_datalen = roundup(epayload->decrypted_datalen, blksize); req = init_skcipher_req(derived_key, derived_keylen); ret = PTR_ERR(req); if (IS_ERR(req)) goto out; dump_decrypted_data(epayload); sg_init_table(sg_in, 2); sg_set_buf(&sg_in[0], epayload->decrypted_data, epayload->decrypted_datalen); sg_set_page(&sg_in[1], ZERO_PAGE(0), AES_BLOCK_SIZE, 0); sg_init_table(sg_out, 1); sg_set_buf(sg_out, epayload->encrypted_data, encrypted_datalen); memcpy(iv, epayload->iv, sizeof(iv)); skcipher_request_set_crypt(req, sg_in, sg_out, encrypted_datalen, iv); ret = crypto_skcipher_encrypt(req); tfm = crypto_skcipher_reqtfm(req); skcipher_request_free(req); crypto_free_skcipher(tfm); if (ret < 0) pr_err("encrypted_key: failed to encrypt (%d)\n", ret); else dump_encrypted_data(epayload, encrypted_datalen); out: return ret; } static int datablob_hmac_append(struct encrypted_key_payload *epayload, const u8 *master_key, size_t master_keylen) { u8 derived_key[HASH_SIZE]; u8 *digest; int ret; ret = get_derived_key(derived_key, AUTH_KEY, master_key, master_keylen); if (ret < 0) goto out; digest = epayload->format + epayload->datablob_len; ret = calc_hmac(digest, derived_key, sizeof derived_key, epayload->format, epayload->datablob_len); if (!ret) dump_hmac(NULL, digest, HASH_SIZE); out: memzero_explicit(derived_key, sizeof(derived_key)); return ret; } /* verify HMAC before decrypting encrypted key */ static int datablob_hmac_verify(struct encrypted_key_payload *epayload, const u8 *format, const u8 *master_key, size_t master_keylen) { u8 derived_key[HASH_SIZE]; u8 digest[HASH_SIZE]; int ret; char *p; unsigned short len; ret = get_derived_key(derived_key, AUTH_KEY, master_key, master_keylen); if (ret < 0) goto out; len = epayload->datablob_len; if (!format) { p = epayload->master_desc; len -= strlen(epayload->format) + 1; } else p = epayload->format; ret = calc_hmac(digest, derived_key, sizeof derived_key, p, len); if (ret < 0) goto out; ret = crypto_memneq(digest, epayload->format + epayload->datablob_len, sizeof(digest)); if (ret) { ret = -EINVAL; dump_hmac("datablob", epayload->format + epayload->datablob_len, HASH_SIZE); dump_hmac("calc", digest, HASH_SIZE); } out: memzero_explicit(derived_key, sizeof(derived_key)); return ret; } static int derived_key_decrypt(struct encrypted_key_payload *epayload, const u8 *derived_key, unsigned int derived_keylen) { struct scatterlist sg_in[1]; struct scatterlist sg_out[2]; struct crypto_skcipher *tfm; struct skcipher_request *req; unsigned int encrypted_datalen; u8 iv[AES_BLOCK_SIZE]; u8 *pad; int ret; /* Throwaway buffer to hold the unused zero padding at the end */ pad = kmalloc(AES_BLOCK_SIZE, GFP_KERNEL); if (!pad) return -ENOMEM; encrypted_datalen = roundup(epayload->decrypted_datalen, blksize); req = init_skcipher_req(derived_key, derived_keylen); ret = PTR_ERR(req); if (IS_ERR(req)) goto out; dump_encrypted_data(epayload, encrypted_datalen); sg_init_table(sg_in, 1); sg_init_table(sg_out, 2); sg_set_buf(sg_in, epayload->encrypted_data, encrypted_datalen); sg_set_buf(&sg_out[0], epayload->decrypted_data, epayload->decrypted_datalen); sg_set_buf(&sg_out[1], pad, AES_BLOCK_SIZE); memcpy(iv, epayload->iv, sizeof(iv)); skcipher_request_set_crypt(req, sg_in, sg_out, encrypted_datalen, iv); ret = crypto_skcipher_decrypt(req); tfm = crypto_skcipher_reqtfm(req); skcipher_request_free(req); crypto_free_skcipher(tfm); if (ret < 0) goto out; dump_decrypted_data(epayload); out: kfree(pad); return ret; } /* Allocate memory for decrypted key and datablob. */ static struct encrypted_key_payload *encrypted_key_alloc(struct key *key, const char *format, const char *master_desc, const char *datalen, const char *decrypted_data) { struct encrypted_key_payload *epayload = NULL; unsigned short datablob_len; unsigned short decrypted_datalen; unsigned short payload_datalen; unsigned int encrypted_datalen; unsigned int format_len; long dlen; int i; int ret; ret = kstrtol(datalen, 10, &dlen); if (ret < 0 || dlen < MIN_DATA_SIZE || dlen > MAX_DATA_SIZE) return ERR_PTR(-EINVAL); format_len = (!format) ? strlen(key_format_default) : strlen(format); decrypted_datalen = dlen; payload_datalen = decrypted_datalen; if (decrypted_data) { if (!user_decrypted_data) { pr_err("encrypted key: instantiation of keys using provided decrypted data is disabled since CONFIG_USER_DECRYPTED_DATA is set to false\n"); return ERR_PTR(-EINVAL); } if (strlen(decrypted_data) != decrypted_datalen * 2) { pr_err("encrypted key: decrypted data provided does not match decrypted data length provided\n"); return ERR_PTR(-EINVAL); } for (i = 0; i < strlen(decrypted_data); i++) { if (!isxdigit(decrypted_data[i])) { pr_err("encrypted key: decrypted data provided must contain only hexadecimal characters\n"); return ERR_PTR(-EINVAL); } } } if (format) { if (!strcmp(format, key_format_ecryptfs)) { if (dlen != ECRYPTFS_MAX_KEY_BYTES) { pr_err("encrypted_key: keylen for the ecryptfs format must be equal to %d bytes\n", ECRYPTFS_MAX_KEY_BYTES); return ERR_PTR(-EINVAL); } decrypted_datalen = ECRYPTFS_MAX_KEY_BYTES; payload_datalen = sizeof(struct ecryptfs_auth_tok); } else if (!strcmp(format, key_format_enc32)) { if (decrypted_datalen != KEY_ENC32_PAYLOAD_LEN) { pr_err("encrypted_key: enc32 key payload incorrect length: %d\n", decrypted_datalen); return ERR_PTR(-EINVAL); } } } encrypted_datalen = roundup(decrypted_datalen, blksize); datablob_len = format_len + 1 + strlen(master_desc) + 1 + strlen(datalen) + 1 + ivsize + 1 + encrypted_datalen; ret = key_payload_reserve(key, payload_datalen + datablob_len + HASH_SIZE + 1); if (ret < 0) return ERR_PTR(ret); epayload = kzalloc(sizeof(*epayload) + payload_datalen + datablob_len + HASH_SIZE + 1, GFP_KERNEL); if (!epayload) return ERR_PTR(-ENOMEM); epayload->payload_datalen = payload_datalen; epayload->decrypted_datalen = decrypted_datalen; epayload->datablob_len = datablob_len; return epayload; } static int encrypted_key_decrypt(struct encrypted_key_payload *epayload, const char *format, const char *hex_encoded_iv) { struct key *mkey; u8 derived_key[HASH_SIZE]; const u8 *master_key; u8 *hmac; const char *hex_encoded_data; unsigned int encrypted_datalen; size_t master_keylen; size_t asciilen; int ret; encrypted_datalen = roundup(epayload->decrypted_datalen, blksize); asciilen = (ivsize + 1 + encrypted_datalen + HASH_SIZE) * 2; if (strlen(hex_encoded_iv) != asciilen) return -EINVAL; hex_encoded_data = hex_encoded_iv + (2 * ivsize) + 2; ret = hex2bin(epayload->iv, hex_encoded_iv, ivsize); if (ret < 0) return -EINVAL; ret = hex2bin(epayload->encrypted_data, hex_encoded_data, encrypted_datalen); if (ret < 0) return -EINVAL; hmac = epayload->format + epayload->datablob_len; ret = hex2bin(hmac, hex_encoded_data + (encrypted_datalen * 2), HASH_SIZE); if (ret < 0) return -EINVAL; mkey = request_master_key(epayload, &master_key, &master_keylen); if (IS_ERR(mkey)) return PTR_ERR(mkey); ret = datablob_hmac_verify(epayload, format, master_key, master_keylen); if (ret < 0) { pr_err("encrypted_key: bad hmac (%d)\n", ret); goto out; } ret = get_derived_key(derived_key, ENC_KEY, master_key, master_keylen); if (ret < 0) goto out; ret = derived_key_decrypt(epayload, derived_key, sizeof derived_key); if (ret < 0) pr_err("encrypted_key: failed to decrypt key (%d)\n", ret); out: up_read(&mkey->sem); key_put(mkey); memzero_explicit(derived_key, sizeof(derived_key)); return ret; } static void __ekey_init(struct encrypted_key_payload *epayload, const char *format, const char *master_desc, const char *datalen) { unsigned int format_len; format_len = (!format) ? strlen(key_format_default) : strlen(format); epayload->format = epayload->payload_data + epayload->payload_datalen; epayload->master_desc = epayload->format + format_len + 1; epayload->datalen = epayload->master_desc + strlen(master_desc) + 1; epayload->iv = epayload->datalen + strlen(datalen) + 1; epayload->encrypted_data = epayload->iv + ivsize + 1; epayload->decrypted_data = epayload->payload_data; if (!format) memcpy(epayload->format, key_format_default, format_len); else { if (!strcmp(format, key_format_ecryptfs)) epayload->decrypted_data = ecryptfs_get_auth_tok_key((struct ecryptfs_auth_tok *)epayload->payload_data); memcpy(epayload->format, format, format_len); } memcpy(epayload->master_desc, master_desc, strlen(master_desc)); memcpy(epayload->datalen, datalen, strlen(datalen)); } /* * encrypted_init - initialize an encrypted key * * For a new key, use either a random number or user-provided decrypted data in * case it is provided. A random number is used for the iv in both cases. For * an old key, decrypt the hex encoded data. */ static int encrypted_init(struct encrypted_key_payload *epayload, const char *key_desc, const char *format, const char *master_desc, const char *datalen, const char *hex_encoded_iv, const char *decrypted_data) { int ret = 0; if (format && !strcmp(format, key_format_ecryptfs)) { ret = valid_ecryptfs_desc(key_desc); if (ret < 0) return ret; ecryptfs_fill_auth_tok((struct ecryptfs_auth_tok *)epayload->payload_data, key_desc); } __ekey_init(epayload, format, master_desc, datalen); if (hex_encoded_iv) { ret = encrypted_key_decrypt(epayload, format, hex_encoded_iv); } else if (decrypted_data) { get_random_bytes(epayload->iv, ivsize); ret = hex2bin(epayload->decrypted_data, decrypted_data, epayload->decrypted_datalen); } else { get_random_bytes(epayload->iv, ivsize); get_random_bytes(epayload->decrypted_data, epayload->decrypted_datalen); } return ret; } /* * encrypted_instantiate - instantiate an encrypted key * * Instantiates the key: * - by decrypting an existing encrypted datablob, or * - by creating a new encrypted key based on a kernel random number, or * - using provided decrypted data. * * On success, return 0. Otherwise return errno. */ static int encrypted_instantiate(struct key *key, struct key_preparsed_payload *prep) { struct encrypted_key_payload *epayload = NULL; char *datablob = NULL; const char *format = NULL; char *master_desc = NULL; char *decrypted_datalen = NULL; char *hex_encoded_iv = NULL; char *decrypted_data = NULL; size_t datalen = prep->datalen; int ret; if (datalen <= 0 || datalen > 32767 || !prep->data) return -EINVAL; datablob = kmalloc(datalen + 1, GFP_KERNEL); if (!datablob) return -ENOMEM; datablob[datalen] = 0; memcpy(datablob, prep->data, datalen); ret = datablob_parse(datablob, &format, &master_desc, &decrypted_datalen, &hex_encoded_iv, &decrypted_data); if (ret < 0) goto out; epayload = encrypted_key_alloc(key, format, master_desc, decrypted_datalen, decrypted_data); if (IS_ERR(epayload)) { ret = PTR_ERR(epayload); goto out; } ret = encrypted_init(epayload, key->description, format, master_desc, decrypted_datalen, hex_encoded_iv, decrypted_data); if (ret < 0) { kfree_sensitive(epayload); goto out; } rcu_assign_keypointer(key, epayload); out: kfree_sensitive(datablob); return ret; } static void encrypted_rcu_free(struct rcu_head *rcu) { struct encrypted_key_payload *epayload; epayload = container_of(rcu, struct encrypted_key_payload, rcu); kfree_sensitive(epayload); } /* * encrypted_update - update the master key description * * Change the master key description for an existing encrypted key. * The next read will return an encrypted datablob using the new * master key description. * * On success, return 0. Otherwise return errno. */ static int encrypted_update(struct key *key, struct key_preparsed_payload *prep) { struct encrypted_key_payload *epayload = key->payload.data[0]; struct encrypted_key_payload *new_epayload; char *buf; char *new_master_desc = NULL; const char *format = NULL; size_t datalen = prep->datalen; int ret = 0; if (key_is_negative(key)) return -ENOKEY; if (datalen <= 0 || datalen > 32767 || !prep->data) return -EINVAL; buf = kmalloc(datalen + 1, GFP_KERNEL); if (!buf) return -ENOMEM; buf[datalen] = 0; memcpy(buf, prep->data, datalen); ret = datablob_parse(buf, &format, &new_master_desc, NULL, NULL, NULL); if (ret < 0) goto out; ret = valid_master_desc(new_master_desc, epayload->master_desc); if (ret < 0) goto out; new_epayload = encrypted_key_alloc(key, epayload->format, new_master_desc, epayload->datalen, NULL); if (IS_ERR(new_epayload)) { ret = PTR_ERR(new_epayload); goto out; } __ekey_init(new_epayload, epayload->format, new_master_desc, epayload->datalen); memcpy(new_epayload->iv, epayload->iv, ivsize); memcpy(new_epayload->payload_data, epayload->payload_data, epayload->payload_datalen); rcu_assign_keypointer(key, new_epayload); call_rcu(&epayload->rcu, encrypted_rcu_free); out: kfree_sensitive(buf); return ret; } /* * encrypted_read - format and copy out the encrypted data * * The resulting datablob format is: * <master-key name> <decrypted data length> <encrypted iv> <encrypted data> * * On success, return to userspace the encrypted key datablob size. */ static long encrypted_read(const struct key *key, char *buffer, size_t buflen) { struct encrypted_key_payload *epayload; struct key *mkey; const u8 *master_key; size_t master_keylen; char derived_key[HASH_SIZE]; char *ascii_buf; size_t asciiblob_len; int ret; epayload = dereference_key_locked(key); /* returns the hex encoded iv, encrypted-data, and hmac as ascii */ asciiblob_len = epayload->datablob_len + ivsize + 1 + roundup(epayload->decrypted_datalen, blksize) + (HASH_SIZE * 2); if (!buffer || buflen < asciiblob_len) return asciiblob_len; mkey = request_master_key(epayload, &master_key, &master_keylen); if (IS_ERR(mkey)) return PTR_ERR(mkey); ret = get_derived_key(derived_key, ENC_KEY, master_key, master_keylen); if (ret < 0) goto out; ret = derived_key_encrypt(epayload, derived_key, sizeof derived_key); if (ret < 0) goto out; ret = datablob_hmac_append(epayload, master_key, master_keylen); if (ret < 0) goto out; ascii_buf = datablob_format(epayload, asciiblob_len); if (!ascii_buf) { ret = -ENOMEM; goto out; } up_read(&mkey->sem); key_put(mkey); memzero_explicit(derived_key, sizeof(derived_key)); memcpy(buffer, ascii_buf, asciiblob_len); kfree_sensitive(ascii_buf); return asciiblob_len; out: up_read(&mkey->sem); key_put(mkey); memzero_explicit(derived_key, sizeof(derived_key)); return ret; } /* * encrypted_destroy - clear and free the key's payload */ static void encrypted_destroy(struct key *key) { kfree_sensitive(key->payload.data[0]); } struct key_type key_type_encrypted = { .name = "encrypted", .instantiate = encrypted_instantiate, .update = encrypted_update, .destroy = encrypted_destroy, .describe = user_describe, .read = encrypted_read, }; EXPORT_SYMBOL_GPL(key_type_encrypted); static int __init init_encrypted(void) { int ret; hash_tfm = crypto_alloc_shash(hash_alg, 0, 0); if (IS_ERR(hash_tfm)) { pr_err("encrypted_key: can't allocate %s transform: %ld\n", hash_alg, PTR_ERR(hash_tfm)); return PTR_ERR(hash_tfm); } ret = aes_get_sizes(); if (ret < 0) goto out; ret = register_key_type(&key_type_encrypted); if (ret < 0) goto out; return 0; out: crypto_free_shash(hash_tfm); return ret; } static void __exit cleanup_encrypted(void) { crypto_free_shash(hash_tfm); unregister_key_type(&key_type_encrypted); } late_initcall(init_encrypted); module_exit(cleanup_encrypted); MODULE_LICENSE("GPL"); |
8 8 8 8 8 8 1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 | /* * llc_c_ac.c - actions performed during connection state transition. * * Description: * Functions in this module are implementation of connection component actions * Details of actions can be found in IEEE-802.2 standard document. * All functions have one connection and one event as input argument. All of * them return 0 On success and 1 otherwise. * * Copyright (c) 1997 by Procom Technology, Inc. * 2001-2003 by Arnaldo Carvalho de Melo <acme@conectiva.com.br> * * This program can be redistributed or modified under the terms of the * GNU General Public License as published by the Free Software Foundation. * This program is distributed without any warranty or implied warranty * of merchantability or fitness for a particular purpose. * * See the GNU General Public License for more details. */ #include <linux/netdevice.h> #include <linux/slab.h> #include <net/llc_conn.h> #include <net/llc_sap.h> #include <net/sock.h> #include <net/llc_c_ev.h> #include <net/llc_c_ac.h> #include <net/llc_c_st.h> #include <net/llc_pdu.h> #include <net/llc.h> static int llc_conn_ac_inc_vs_by_1(struct sock *sk, struct sk_buff *skb); static void llc_process_tmr_ev(struct sock *sk, struct sk_buff *skb); static int llc_conn_ac_data_confirm(struct sock *sk, struct sk_buff *ev); static int llc_conn_ac_inc_npta_value(struct sock *sk, struct sk_buff *skb); static int llc_conn_ac_send_rr_rsp_f_set_ackpf(struct sock *sk, struct sk_buff *skb); static int llc_conn_ac_set_p_flag_1(struct sock *sk, struct sk_buff *skb); #define INCORRECT 0 int llc_conn_ac_clear_remote_busy(struct sock *sk, struct sk_buff *skb) { struct llc_sock *llc = llc_sk(sk); if (llc->remote_busy_flag) { u8 nr; struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb); llc->remote_busy_flag = 0; del_timer(&llc->busy_state_timer.timer); nr = LLC_I_GET_NR(pdu); llc_conn_resend_i_pdu_as_cmd(sk, nr, 0); } return 0; } int llc_conn_ac_conn_ind(struct sock *sk, struct sk_buff *skb) { struct llc_conn_state_ev *ev = llc_conn_ev(skb); ev->ind_prim = LLC_CONN_PRIM; return 0; } int llc_conn_ac_conn_confirm(struct sock *sk, struct sk_buff *skb) { struct llc_conn_state_ev *ev = llc_conn_ev(skb); ev->cfm_prim = LLC_CONN_PRIM; return 0; } static int llc_conn_ac_data_confirm(struct sock *sk, struct sk_buff *skb) { struct llc_conn_state_ev *ev = llc_conn_ev(skb); ev->cfm_prim = LLC_DATA_PRIM; return 0; } int llc_conn_ac_data_ind(struct sock *sk, struct sk_buff *skb) { llc_conn_rtn_pdu(sk, skb); return 0; } int llc_conn_ac_disc_ind(struct sock *sk, struct sk_buff *skb) { struct llc_conn_state_ev *ev = llc_conn_ev(skb); u8 reason = 0; int rc = 0; if (ev->type == LLC_CONN_EV_TYPE_PDU) { struct llc_pdu_un *pdu = llc_pdu_un_hdr(skb); if (LLC_PDU_IS_RSP(pdu) && LLC_PDU_TYPE_IS_U(pdu) && LLC_U_PDU_RSP(pdu) == LLC_2_PDU_RSP_DM) reason = LLC_DISC_REASON_RX_DM_RSP_PDU; else if (LLC_PDU_IS_CMD(pdu) && LLC_PDU_TYPE_IS_U(pdu) && LLC_U_PDU_CMD(pdu) == LLC_2_PDU_CMD_DISC) reason = LLC_DISC_REASON_RX_DISC_CMD_PDU; } else if (ev->type == LLC_CONN_EV_TYPE_ACK_TMR) reason = LLC_DISC_REASON_ACK_TMR_EXP; else rc = -EINVAL; if (!rc) { ev->reason = reason; ev->ind_prim = LLC_DISC_PRIM; } return rc; } int llc_conn_ac_disc_confirm(struct sock *sk, struct sk_buff *skb) { struct llc_conn_state_ev *ev = llc_conn_ev(skb); ev->reason = ev->status; ev->cfm_prim = LLC_DISC_PRIM; return 0; } int llc_conn_ac_rst_ind(struct sock *sk, struct sk_buff *skb) { u8 reason = 0; int rc = 1; struct llc_conn_state_ev *ev = llc_conn_ev(skb); struct llc_pdu_un *pdu = llc_pdu_un_hdr(skb); struct llc_sock *llc = llc_sk(sk); switch (ev->type) { case LLC_CONN_EV_TYPE_PDU: if (LLC_PDU_IS_RSP(pdu) && LLC_PDU_TYPE_IS_U(pdu) && LLC_U_PDU_RSP(pdu) == LLC_2_PDU_RSP_FRMR) { reason = LLC_RESET_REASON_LOCAL; rc = 0; } else if (LLC_PDU_IS_CMD(pdu) && LLC_PDU_TYPE_IS_U(pdu) && LLC_U_PDU_CMD(pdu) == LLC_2_PDU_CMD_SABME) { reason = LLC_RESET_REASON_REMOTE; rc = 0; } break; case LLC_CONN_EV_TYPE_ACK_TMR: case LLC_CONN_EV_TYPE_P_TMR: case LLC_CONN_EV_TYPE_REJ_TMR: case LLC_CONN_EV_TYPE_BUSY_TMR: if (llc->retry_count > llc->n2) { reason = LLC_RESET_REASON_LOCAL; rc = 0; } break; } if (!rc) { ev->reason = reason; ev->ind_prim = LLC_RESET_PRIM; } return rc; } int llc_conn_ac_rst_confirm(struct sock *sk, struct sk_buff *skb) { struct llc_conn_state_ev *ev = llc_conn_ev(skb); ev->reason = 0; ev->cfm_prim = LLC_RESET_PRIM; return 0; } int llc_conn_ac_clear_remote_busy_if_f_eq_1(struct sock *sk, struct sk_buff *skb) { struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb); if (LLC_PDU_IS_RSP(pdu) && LLC_PDU_TYPE_IS_I(pdu) && LLC_I_PF_IS_1(pdu) && llc_sk(sk)->ack_pf) llc_conn_ac_clear_remote_busy(sk, skb); return 0; } int llc_conn_ac_stop_rej_tmr_if_data_flag_eq_2(struct sock *sk, struct sk_buff *skb) { struct llc_sock *llc = llc_sk(sk); if (llc->data_flag == 2) del_timer(&llc->rej_sent_timer.timer); return 0; } int llc_conn_ac_send_disc_cmd_p_set_x(struct sock *sk, struct sk_buff *skb) { int rc = -ENOBUFS; struct llc_sock *llc = llc_sk(sk); struct sk_buff *nskb = llc_alloc_frame(sk, llc->dev, LLC_PDU_TYPE_U, 0); if (nskb) { struct llc_sap *sap = llc->sap; llc_pdu_header_init(nskb, LLC_PDU_TYPE_U, sap->laddr.lsap, llc->daddr.lsap, LLC_PDU_CMD); llc_pdu_init_as_disc_cmd(nskb, 1); rc = llc_mac_hdr_init(nskb, llc->dev->dev_addr, llc->daddr.mac); if (unlikely(rc)) goto free; llc_conn_send_pdu(sk, nskb); llc_conn_ac_set_p_flag_1(sk, skb); } out: return rc; free: kfree_skb(nskb); goto out; } int llc_conn_ac_send_dm_rsp_f_set_p(struct sock *sk, struct sk_buff *skb) { int rc = -ENOBUFS; struct llc_sock *llc = llc_sk(sk); struct sk_buff *nskb = llc_alloc_frame(sk, llc->dev, LLC_PDU_TYPE_U, 0); if (nskb) { struct llc_sap *sap = llc->sap; u8 f_bit; llc_pdu_decode_pf_bit(skb, &f_bit); llc_pdu_header_init(nskb, LLC_PDU_TYPE_U, sap->laddr.lsap, llc->daddr.lsap, LLC_PDU_RSP); llc_pdu_init_as_dm_rsp(nskb, f_bit); rc = llc_mac_hdr_init(nskb, llc->dev->dev_addr, llc->daddr.mac); if (unlikely(rc)) goto free; llc_conn_send_pdu(sk, nskb); } out: return rc; free: kfree_skb(nskb); goto out; } int llc_conn_ac_send_dm_rsp_f_set_1(struct sock *sk, struct sk_buff *skb) { int rc = -ENOBUFS; struct llc_sock *llc = llc_sk(sk); struct sk_buff *nskb = llc_alloc_frame(sk, llc->dev, LLC_PDU_TYPE_U, 0); if (nskb) { struct llc_sap *sap = llc->sap; llc_pdu_header_init(nskb, LLC_PDU_TYPE_U, sap->laddr.lsap, llc->daddr.lsap, LLC_PDU_RSP); llc_pdu_init_as_dm_rsp(nskb, 1); rc = llc_mac_hdr_init(nskb, llc->dev->dev_addr, llc->daddr.mac); if (unlikely(rc)) goto free; llc_conn_send_pdu(sk, nskb); } out: return rc; free: kfree_skb(nskb); goto out; } int llc_conn_ac_send_frmr_rsp_f_set_x(struct sock *sk, struct sk_buff *skb) { u8 f_bit; int rc = -ENOBUFS; struct sk_buff *nskb; struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb); struct llc_sock *llc = llc_sk(sk); llc->rx_pdu_hdr = *((u32 *)pdu); if (LLC_PDU_IS_CMD(pdu)) llc_pdu_decode_pf_bit(skb, &f_bit); else f_bit = 0; nskb = llc_alloc_frame(sk, llc->dev, LLC_PDU_TYPE_U, sizeof(struct llc_frmr_info)); if (nskb) { struct llc_sap *sap = llc->sap; llc_pdu_header_init(nskb, LLC_PDU_TYPE_U, sap->laddr.lsap, llc->daddr.lsap, LLC_PDU_RSP); llc_pdu_init_as_frmr_rsp(nskb, pdu, f_bit, llc->vS, llc->vR, INCORRECT); rc = llc_mac_hdr_init(nskb, llc->dev->dev_addr, llc->daddr.mac); if (unlikely(rc)) goto free; llc_conn_send_pdu(sk, nskb); } out: return rc; free: kfree_skb(nskb); goto out; } int llc_conn_ac_resend_frmr_rsp_f_set_0(struct sock *sk, struct sk_buff *skb) { int rc = -ENOBUFS; struct llc_sock *llc = llc_sk(sk); struct sk_buff *nskb = llc_alloc_frame(sk, llc->dev, LLC_PDU_TYPE_U, sizeof(struct llc_frmr_info)); if (nskb) { struct llc_sap *sap = llc->sap; struct llc_pdu_sn *pdu = (struct llc_pdu_sn *)&llc->rx_pdu_hdr; llc_pdu_header_init(nskb, LLC_PDU_TYPE_U, sap->laddr.lsap, llc->daddr.lsap, LLC_PDU_RSP); llc_pdu_init_as_frmr_rsp(nskb, pdu, 0, llc->vS, llc->vR, INCORRECT); rc = llc_mac_hdr_init(nskb, llc->dev->dev_addr, llc->daddr.mac); if (unlikely(rc)) goto free; llc_conn_send_pdu(sk, nskb); } out: return rc; free: kfree_skb(nskb); goto out; } int llc_conn_ac_resend_frmr_rsp_f_set_p(struct sock *sk, struct sk_buff *skb) { u8 f_bit; int rc = -ENOBUFS; struct sk_buff *nskb; struct llc_sock *llc = llc_sk(sk); llc_pdu_decode_pf_bit(skb, &f_bit); nskb = llc_alloc_frame(sk, llc->dev, LLC_PDU_TYPE_U, sizeof(struct llc_frmr_info)); if (nskb) { struct llc_sap *sap = llc->sap; struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb); llc_pdu_header_init(nskb, LLC_PDU_TYPE_U, sap->laddr.lsap, llc->daddr.lsap, LLC_PDU_RSP); llc_pdu_init_as_frmr_rsp(nskb, pdu, f_bit, llc->vS, llc->vR, INCORRECT); rc = llc_mac_hdr_init(nskb, llc->dev->dev_addr, llc->daddr.mac); if (unlikely(rc)) goto free; llc_conn_send_pdu(sk, nskb); } out: return rc; free: kfree_skb(nskb); goto out; } int llc_conn_ac_send_i_cmd_p_set_1(struct sock *sk, struct sk_buff *skb) { int rc; struct llc_sock *llc = llc_sk(sk); struct llc_sap *sap = llc->sap; llc_pdu_header_init(skb, LLC_PDU_TYPE_I, sap->laddr.lsap, llc->daddr.lsap, LLC_PDU_CMD); llc_pdu_init_as_i_cmd(skb, 1, llc->vS, llc->vR); rc = llc_mac_hdr_init(skb, llc->dev->dev_addr, llc->daddr.mac); if (likely(!rc)) { skb_get(skb); llc_conn_send_pdu(sk, skb); llc_conn_ac_inc_vs_by_1(sk, skb); } return rc; } static int llc_conn_ac_send_i_cmd_p_set_0(struct sock *sk, struct sk_buff *skb) { int rc; struct llc_sock *llc = llc_sk(sk); struct llc_sap *sap = llc->sap; llc_pdu_header_init(skb, LLC_PDU_TYPE_I, sap->laddr.lsap, llc->daddr.lsap, LLC_PDU_CMD); llc_pdu_init_as_i_cmd(skb, 0, llc->vS, llc->vR); rc = llc_mac_hdr_init(skb, llc->dev->dev_addr, llc->daddr.mac); if (likely(!rc)) { skb_get(skb); llc_conn_send_pdu(sk, skb); llc_conn_ac_inc_vs_by_1(sk, skb); } return rc; } int llc_conn_ac_send_i_xxx_x_set_0(struct sock *sk, struct sk_buff *skb) { int rc; struct llc_sock *llc = llc_sk(sk); struct llc_sap *sap = llc->sap; llc_pdu_header_init(skb, LLC_PDU_TYPE_I, sap->laddr.lsap, llc->daddr.lsap, LLC_PDU_CMD); llc_pdu_init_as_i_cmd(skb, 0, llc->vS, llc->vR); rc = llc_mac_hdr_init(skb, llc->dev->dev_addr, llc->daddr.mac); if (likely(!rc)) { skb_get(skb); llc_conn_send_pdu(sk, skb); llc_conn_ac_inc_vs_by_1(sk, skb); } return 0; } int llc_conn_ac_resend_i_xxx_x_set_0(struct sock *sk, struct sk_buff *skb) { struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb); u8 nr = LLC_I_GET_NR(pdu); llc_conn_resend_i_pdu_as_cmd(sk, nr, 0); return 0; } int llc_conn_ac_resend_i_xxx_x_set_0_or_send_rr(struct sock *sk, struct sk_buff *skb) { u8 nr; struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb); int rc = -ENOBUFS; struct llc_sock *llc = llc_sk(sk); struct sk_buff *nskb = llc_alloc_frame(sk, llc->dev, LLC_PDU_TYPE_U, 0); if (nskb) { struct llc_sap *sap = llc->sap; llc_pdu_header_init(nskb, LLC_PDU_TYPE_U, sap->laddr.lsap, llc->daddr.lsap, LLC_PDU_RSP); llc_pdu_init_as_rr_rsp(nskb, 0, llc->vR); rc = llc_mac_hdr_init(nskb, llc->dev->dev_addr, llc->daddr.mac); if (likely(!rc)) llc_conn_send_pdu(sk, nskb); else kfree_skb(skb); } if (rc) { nr = LLC_I_GET_NR(pdu); rc = 0; llc_conn_resend_i_pdu_as_cmd(sk, nr, 0); } return rc; } int llc_conn_ac_resend_i_rsp_f_set_1(struct sock *sk, struct sk_buff *skb) { struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb); u8 nr = LLC_I_GET_NR(pdu); llc_conn_resend_i_pdu_as_rsp(sk, nr, 1); return 0; } int llc_conn_ac_send_rej_cmd_p_set_1(struct sock *sk, struct sk_buff *skb) { int rc = -ENOBUFS; struct llc_sock *llc = llc_sk(sk); struct sk_buff *nskb = llc_alloc_frame(sk, llc->dev, LLC_PDU_TYPE_S, 0); if (nskb) { struct llc_sap *sap = llc->sap; llc_pdu_header_init(nskb, LLC_PDU_TYPE_S, sap->laddr.lsap, llc->daddr.lsap, LLC_PDU_CMD); llc_pdu_init_as_rej_cmd(nskb, 1, llc->vR); rc = llc_mac_hdr_init(nskb, llc->dev->dev_addr, llc->daddr.mac); if (unlikely(rc)) goto free; llc_conn_send_pdu(sk, nskb); } out: return rc; free: kfree_skb(nskb); goto out; } int llc_conn_ac_send_rej_rsp_f_set_1(struct sock *sk, struct sk_buff *skb) { int rc = -ENOBUFS; struct llc_sock *llc = llc_sk(sk); struct sk_buff *nskb = llc_alloc_frame(sk, llc->dev, LLC_PDU_TYPE_S, 0); if (nskb) { struct llc_sap *sap = llc->sap; llc_pdu_header_init(nskb, LLC_PDU_TYPE_S, sap->laddr.lsap, llc->daddr.lsap, LLC_PDU_RSP); llc_pdu_init_as_rej_rsp(nskb, 1, llc->vR); rc = llc_mac_hdr_init(nskb, llc->dev->dev_addr, llc->daddr.mac); if (unlikely(rc)) goto free; llc_conn_send_pdu(sk, nskb); } out: return rc; free: kfree_skb(nskb); goto out; } int llc_conn_ac_send_rej_xxx_x_set_0(struct sock *sk, struct sk_buff *skb) { int rc = -ENOBUFS; struct llc_sock *llc = llc_sk(sk); struct sk_buff *nskb = llc_alloc_frame(sk, llc->dev, LLC_PDU_TYPE_S, 0); if (nskb) { struct llc_sap *sap = llc->sap; llc_pdu_header_init(nskb, LLC_PDU_TYPE_S, sap->laddr.lsap, llc->daddr.lsap, LLC_PDU_RSP); llc_pdu_init_as_rej_rsp(nskb, 0, llc->vR); rc = llc_mac_hdr_init(nskb, llc->dev->dev_addr, llc->daddr.mac); if (unlikely(rc)) goto free; llc_conn_send_pdu(sk, nskb); } out: return rc; free: kfree_skb(nskb); goto out; } int llc_conn_ac_send_rnr_cmd_p_set_1(struct sock *sk, struct sk_buff *skb) { int rc = -ENOBUFS; struct llc_sock *llc = llc_sk(sk); struct sk_buff *nskb = llc_alloc_frame(sk, llc->dev, LLC_PDU_TYPE_S, 0); if (nskb) { struct llc_sap *sap = llc->sap; llc_pdu_header_init(nskb, LLC_PDU_TYPE_S, sap->laddr.lsap, llc->daddr.lsap, LLC_PDU_CMD); llc_pdu_init_as_rnr_cmd(nskb, 1, llc->vR); rc = llc_mac_hdr_init(nskb, llc->dev->dev_addr, llc->daddr.mac); if (unlikely(rc)) goto free; llc_conn_send_pdu(sk, nskb); } out: return rc; free: kfree_skb(nskb); goto out; } int llc_conn_ac_send_rnr_rsp_f_set_1(struct sock *sk, struct sk_buff *skb) { int rc = -ENOBUFS; struct llc_sock *llc = llc_sk(sk); struct sk_buff *nskb = llc_alloc_frame(sk, llc->dev, LLC_PDU_TYPE_S, 0); if (nskb) { struct llc_sap *sap = llc->sap; llc_pdu_header_init(nskb, LLC_PDU_TYPE_S, sap->laddr.lsap, llc->daddr.lsap, LLC_PDU_RSP); llc_pdu_init_as_rnr_rsp(nskb, 1, llc->vR); rc = llc_mac_hdr_init(nskb, llc->dev->dev_addr, llc->daddr.mac); if (unlikely(rc)) goto free; llc_conn_send_pdu(sk, nskb); } out: return rc; free: kfree_skb(nskb); goto out; } int llc_conn_ac_send_rnr_xxx_x_set_0(struct sock *sk, struct sk_buff *skb) { int rc = -ENOBUFS; struct llc_sock *llc = llc_sk(sk); struct sk_buff *nskb = llc_alloc_frame(sk, llc->dev, LLC_PDU_TYPE_S, 0); if (nskb) { struct llc_sap *sap = llc->sap; llc_pdu_header_init(nskb, LLC_PDU_TYPE_S, sap->laddr.lsap, llc->daddr.lsap, LLC_PDU_RSP); llc_pdu_init_as_rnr_rsp(nskb, 0, llc->vR); rc = llc_mac_hdr_init(nskb, llc->dev->dev_addr, llc->daddr.mac); if (unlikely(rc)) goto free; llc_conn_send_pdu(sk, nskb); } out: return rc; free: kfree_skb(nskb); goto out; } int llc_conn_ac_set_remote_busy(struct sock *sk, struct sk_buff *skb) { struct llc_sock *llc = llc_sk(sk); if (!llc->remote_busy_flag) { llc->remote_busy_flag = 1; mod_timer(&llc->busy_state_timer.timer, jiffies + llc->busy_state_timer.expire); } return 0; } int llc_conn_ac_opt_send_rnr_xxx_x_set_0(struct sock *sk, struct sk_buff *skb) { int rc = -ENOBUFS; struct llc_sock *llc = llc_sk(sk); struct sk_buff *nskb = llc_alloc_frame(sk, llc->dev, LLC_PDU_TYPE_S, 0); if (nskb) { struct llc_sap *sap = llc->sap; llc_pdu_header_init(nskb, LLC_PDU_TYPE_S, sap->laddr.lsap, llc->daddr.lsap, LLC_PDU_RSP); llc_pdu_init_as_rnr_rsp(nskb, 0, llc->vR); rc = llc_mac_hdr_init(nskb, llc->dev->dev_addr, llc->daddr.mac); if (unlikely(rc)) goto free; llc_conn_send_pdu(sk, nskb); } out: return rc; free: kfree_skb(nskb); goto out; } int llc_conn_ac_send_rr_cmd_p_set_1(struct sock *sk, struct sk_buff *skb) { int rc = -ENOBUFS; struct llc_sock *llc = llc_sk(sk); struct sk_buff *nskb = llc_alloc_frame(sk, llc->dev, LLC_PDU_TYPE_S, 0); if (nskb) { struct llc_sap *sap = llc->sap; llc_pdu_header_init(nskb, LLC_PDU_TYPE_S, sap->laddr.lsap, llc->daddr.lsap, LLC_PDU_CMD); llc_pdu_init_as_rr_cmd(nskb, 1, llc->vR); rc = llc_mac_hdr_init(nskb, llc->dev->dev_addr, llc->daddr.mac); if (unlikely(rc)) goto free; llc_conn_send_pdu(sk, nskb); } out: return rc; free: kfree_skb(nskb); goto out; } int llc_conn_ac_send_rr_rsp_f_set_1(struct sock *sk, struct sk_buff *skb) { int rc = -ENOBUFS; struct llc_sock *llc = llc_sk(sk); struct sk_buff *nskb = llc_alloc_frame(sk, llc->dev, LLC_PDU_TYPE_S, 0); if (nskb) { struct llc_sap *sap = llc->sap; u8 f_bit = 1; llc_pdu_header_init(nskb, LLC_PDU_TYPE_S, sap->laddr.lsap, llc->daddr.lsap, LLC_PDU_RSP); llc_pdu_init_as_rr_rsp(nskb, f_bit, llc->vR); rc = llc_mac_hdr_init(nskb, llc->dev->dev_addr, llc->daddr.mac); if (unlikely(rc)) goto free; llc_conn_send_pdu(sk, nskb); } out: return rc; free: kfree_skb(nskb); goto out; } int llc_conn_ac_send_ack_rsp_f_set_1(struct sock *sk, struct sk_buff *skb) { int rc = -ENOBUFS; struct llc_sock *llc = llc_sk(sk); struct sk_buff *nskb = llc_alloc_frame(sk, llc->dev, LLC_PDU_TYPE_S, 0); if (nskb) { struct llc_sap *sap = llc->sap; llc_pdu_header_init(nskb, LLC_PDU_TYPE_S, sap->laddr.lsap, llc->daddr.lsap, LLC_PDU_RSP); llc_pdu_init_as_rr_rsp(nskb, 1, llc->vR); rc = llc_mac_hdr_init(nskb, llc->dev->dev_addr, llc->daddr.mac); if (unlikely(rc)) goto free; llc_conn_send_pdu(sk, nskb); } out: return rc; free: kfree_skb(nskb); goto out; } int llc_conn_ac_send_rr_xxx_x_set_0(struct sock *sk, struct sk_buff *skb) { int rc = -ENOBUFS; struct llc_sock *llc = llc_sk(sk); struct sk_buff *nskb = llc_alloc_frame(sk, llc->dev, LLC_PDU_TYPE_S, 0); if (nskb) { struct llc_sap *sap = llc->sap; llc_pdu_header_init(nskb, LLC_PDU_TYPE_S, sap->laddr.lsap, llc->daddr.lsap, LLC_PDU_RSP); llc_pdu_init_as_rr_rsp(nskb, 0, llc->vR); rc = llc_mac_hdr_init(nskb, llc->dev->dev_addr, llc->daddr.mac); if (unlikely(rc)) goto free; llc_conn_send_pdu(sk, nskb); } out: return rc; free: kfree_skb(nskb); goto out; } int llc_conn_ac_send_ack_xxx_x_set_0(struct sock *sk, struct sk_buff *skb) { int rc = -ENOBUFS; struct llc_sock *llc = llc_sk(sk); struct sk_buff *nskb = llc_alloc_frame(sk, llc->dev, LLC_PDU_TYPE_S, 0); if (nskb) { struct llc_sap *sap = llc->sap; llc_pdu_header_init(nskb, LLC_PDU_TYPE_S, sap->laddr.lsap, llc->daddr.lsap, LLC_PDU_RSP); llc_pdu_init_as_rr_rsp(nskb, 0, llc->vR); rc = llc_mac_hdr_init(nskb, llc->dev->dev_addr, llc->daddr.mac); if (unlikely(rc)) goto free; llc_conn_send_pdu(sk, nskb); } out: return rc; free: kfree_skb(nskb); goto out; } void llc_conn_set_p_flag(struct sock *sk, u8 value) { int state_changed = llc_sk(sk)->p_flag && !value; llc_sk(sk)->p_flag = value; if (state_changed) sk->sk_state_change(sk); } int llc_conn_ac_send_sabme_cmd_p_set_x(struct sock *sk, struct sk_buff *skb) { int rc = -ENOBUFS; struct llc_sock *llc = llc_sk(sk); struct sk_buff *nskb = llc_alloc_frame(sk, llc->dev, LLC_PDU_TYPE_U, 0); if (nskb) { struct llc_sap *sap = llc->sap; const u8 *dmac = llc->daddr.mac; if (llc->dev->flags & IFF_LOOPBACK) dmac = llc->dev->dev_addr; llc_pdu_header_init(nskb, LLC_PDU_TYPE_U, sap->laddr.lsap, llc->daddr.lsap, LLC_PDU_CMD); llc_pdu_init_as_sabme_cmd(nskb, 1); rc = llc_mac_hdr_init(nskb, llc->dev->dev_addr, dmac); if (unlikely(rc)) goto free; llc_conn_send_pdu(sk, nskb); llc_conn_set_p_flag(sk, 1); } out: return rc; free: kfree_skb(nskb); goto out; } int llc_conn_ac_send_ua_rsp_f_set_p(struct sock *sk, struct sk_buff *skb) { u8 f_bit; int rc = -ENOBUFS; struct llc_sock *llc = llc_sk(sk); struct sk_buff *nskb = llc_alloc_frame(sk, llc->dev, LLC_PDU_TYPE_U, 0); llc_pdu_decode_pf_bit(skb, &f_bit); if (nskb) { struct llc_sap *sap = llc->sap; nskb->dev = llc->dev; llc_pdu_header_init(nskb, LLC_PDU_TYPE_U, sap->laddr.lsap, llc->daddr.lsap, LLC_PDU_RSP); llc_pdu_init_as_ua_rsp(nskb, f_bit); rc = llc_mac_hdr_init(nskb, llc->dev->dev_addr, llc->daddr.mac); if (unlikely(rc)) goto free; llc_conn_send_pdu(sk, nskb); } out: return rc; free: kfree_skb(nskb); goto out; } int llc_conn_ac_set_s_flag_0(struct sock *sk, struct sk_buff *skb) { llc_sk(sk)->s_flag = 0; return 0; } int llc_conn_ac_set_s_flag_1(struct sock *sk, struct sk_buff *skb) { llc_sk(sk)->s_flag = 1; return 0; } int llc_conn_ac_start_p_timer(struct sock *sk, struct sk_buff *skb) { struct llc_sock *llc = llc_sk(sk); llc_conn_set_p_flag(sk, 1); mod_timer(&llc->pf_cycle_timer.timer, jiffies + llc->pf_cycle_timer.expire); return 0; } /** * llc_conn_ac_send_ack_if_needed - check if ack is needed * @sk: current connection structure * @skb: current event * * Checks number of received PDUs which have not been acknowledged, yet, * If number of them reaches to "npta"(Number of PDUs To Acknowledge) then * sends an RR response as acknowledgement for them. Returns 0 for * success, 1 otherwise. */ int llc_conn_ac_send_ack_if_needed(struct sock *sk, struct sk_buff *skb) { u8 pf_bit; struct llc_sock *llc = llc_sk(sk); llc_pdu_decode_pf_bit(skb, &pf_bit); llc->ack_pf |= pf_bit & 1; if (!llc->ack_must_be_send) { llc->first_pdu_Ns = llc->vR; llc->ack_must_be_send = 1; llc->ack_pf = pf_bit & 1; } if (((llc->vR - llc->first_pdu_Ns + 1 + LLC_2_SEQ_NBR_MODULO) % LLC_2_SEQ_NBR_MODULO) >= llc->npta) { llc_conn_ac_send_rr_rsp_f_set_ackpf(sk, skb); llc->ack_must_be_send = 0; llc->ack_pf = 0; llc_conn_ac_inc_npta_value(sk, skb); } return 0; } /** * llc_conn_ac_rst_sendack_flag - resets ack_must_be_send flag * @sk: current connection structure * @skb: current event * * This action resets ack_must_be_send flag of given connection, this flag * indicates if there is any PDU which has not been acknowledged yet. * Returns 0 for success, 1 otherwise. */ int llc_conn_ac_rst_sendack_flag(struct sock *sk, struct sk_buff *skb) { llc_sk(sk)->ack_must_be_send = llc_sk(sk)->ack_pf = 0; return 0; } /** * llc_conn_ac_send_i_rsp_f_set_ackpf - acknowledge received PDUs * @sk: current connection structure * @skb: current event * * Sends an I response PDU with f-bit set to ack_pf flag as acknowledge to * all received PDUs which have not been acknowledged, yet. ack_pf flag is * set to one if one PDU with p-bit set to one is received. Returns 0 for * success, 1 otherwise. */ static int llc_conn_ac_send_i_rsp_f_set_ackpf(struct sock *sk, struct sk_buff *skb) { int rc; struct llc_sock *llc = llc_sk(sk); struct llc_sap *sap = llc->sap; llc_pdu_header_init(skb, LLC_PDU_TYPE_I, sap->laddr.lsap, llc->daddr.lsap, LLC_PDU_RSP); llc_pdu_init_as_i_cmd(skb, llc->ack_pf, llc->vS, llc->vR); rc = llc_mac_hdr_init(skb, llc->dev->dev_addr, llc->daddr.mac); if (likely(!rc)) { skb_get(skb); llc_conn_send_pdu(sk, skb); llc_conn_ac_inc_vs_by_1(sk, skb); } return rc; } /** * llc_conn_ac_send_i_as_ack - sends an I-format PDU to acknowledge rx PDUs * @sk: current connection structure. * @skb: current event. * * This action sends an I-format PDU as acknowledge to received PDUs which * have not been acknowledged, yet, if there is any. By using of this * action number of acknowledgements decreases, this technic is called * piggy backing. Returns 0 for success, 1 otherwise. */ int llc_conn_ac_send_i_as_ack(struct sock *sk, struct sk_buff *skb) { struct llc_sock *llc = llc_sk(sk); int ret; if (llc->ack_must_be_send) { ret = llc_conn_ac_send_i_rsp_f_set_ackpf(sk, skb); llc->ack_must_be_send = 0 ; llc->ack_pf = 0; } else { ret = llc_conn_ac_send_i_cmd_p_set_0(sk, skb); } return ret; } /** * llc_conn_ac_send_rr_rsp_f_set_ackpf - ack all rx PDUs not yet acked * @sk: current connection structure. * @skb: current event. * * This action sends an RR response with f-bit set to ack_pf flag as * acknowledge to all received PDUs which have not been acknowledged, yet, * if there is any. ack_pf flag indicates if a PDU has been received with * p-bit set to one. Returns 0 for success, 1 otherwise. */ static int llc_conn_ac_send_rr_rsp_f_set_ackpf(struct sock *sk, struct sk_buff *skb) { int rc = -ENOBUFS; struct llc_sock *llc = llc_sk(sk); struct sk_buff *nskb = llc_alloc_frame(sk, llc->dev, LLC_PDU_TYPE_S, 0); if (nskb) { struct llc_sap *sap = llc->sap; llc_pdu_header_init(nskb, LLC_PDU_TYPE_S, sap->laddr.lsap, llc->daddr.lsap, LLC_PDU_RSP); llc_pdu_init_as_rr_rsp(nskb, llc->ack_pf, llc->vR); rc = llc_mac_hdr_init(nskb, llc->dev->dev_addr, llc->daddr.mac); if (unlikely(rc)) goto free; llc_conn_send_pdu(sk, nskb); } out: return rc; free: kfree_skb(nskb); goto out; } /** * llc_conn_ac_inc_npta_value - tries to make value of npta greater * @sk: current connection structure. * @skb: current event. * * After "inc_cntr" times calling of this action, "npta" increase by one. * this action tries to make vale of "npta" greater as possible; number of * acknowledgements decreases by increasing of "npta". Returns 0 for * success, 1 otherwise. */ static int llc_conn_ac_inc_npta_value(struct sock *sk, struct sk_buff *skb) { struct llc_sock *llc = llc_sk(sk); if (!llc->inc_cntr) { llc->dec_step = 0; llc->dec_cntr = llc->inc_cntr = 2; ++llc->npta; if (llc->npta > (u8) ~LLC_2_SEQ_NBR_MODULO) llc->npta = (u8) ~LLC_2_SEQ_NBR_MODULO; } else --llc->inc_cntr; return 0; } /** * llc_conn_ac_adjust_npta_by_rr - decreases "npta" by one * @sk: current connection structure. * @skb: current event. * * After receiving "dec_cntr" times RR command, this action decreases * "npta" by one. Returns 0 for success, 1 otherwise. */ int llc_conn_ac_adjust_npta_by_rr(struct sock *sk, struct sk_buff *skb) { struct llc_sock *llc = llc_sk(sk); if (!llc->connect_step && !llc->remote_busy_flag) { if (!llc->dec_step) { if (!llc->dec_cntr) { llc->inc_cntr = llc->dec_cntr = 2; if (llc->npta > 0) llc->npta = llc->npta - 1; } else llc->dec_cntr -=1; } } else llc->connect_step = 0 ; return 0; } /** * llc_conn_ac_adjust_npta_by_rnr - decreases "npta" by one * @sk: current connection structure. * @skb: current event. * * After receiving "dec_cntr" times RNR command, this action decreases * "npta" by one. Returns 0 for success, 1 otherwise. */ int llc_conn_ac_adjust_npta_by_rnr(struct sock *sk, struct sk_buff *skb) { struct llc_sock *llc = llc_sk(sk); if (llc->remote_busy_flag) if (!llc->dec_step) { if (!llc->dec_cntr) { llc->inc_cntr = llc->dec_cntr = 2; if (llc->npta > 0) --llc->npta; } else --llc->dec_cntr; } return 0; } /** * llc_conn_ac_dec_tx_win_size - decreases tx window size * @sk: current connection structure. * @skb: current event. * * After receiving of a REJ command or response, transmit window size is * decreased by number of PDUs which are outstanding yet. Returns 0 for * success, 1 otherwise. */ int llc_conn_ac_dec_tx_win_size(struct sock *sk, struct sk_buff *skb) { struct llc_sock *llc = llc_sk(sk); u8 unacked_pdu = skb_queue_len(&llc->pdu_unack_q); if (llc->k - unacked_pdu < 1) llc->k = 1; else llc->k -= unacked_pdu; return 0; } /** * llc_conn_ac_inc_tx_win_size - tx window size is inc by 1 * @sk: current connection structure. * @skb: current event. * * After receiving an RR response with f-bit set to one, transmit window * size is increased by one. Returns 0 for success, 1 otherwise. */ int llc_conn_ac_inc_tx_win_size(struct sock *sk, struct sk_buff *skb) { struct llc_sock *llc = llc_sk(sk); llc->k += 1; if (llc->k > (u8) ~LLC_2_SEQ_NBR_MODULO) llc->k = (u8) ~LLC_2_SEQ_NBR_MODULO; return 0; } int llc_conn_ac_stop_all_timers(struct sock *sk, struct sk_buff *skb) { llc_sk_stop_all_timers(sk, false); return 0; } int llc_conn_ac_stop_other_timers(struct sock *sk, struct sk_buff *skb) { struct llc_sock *llc = llc_sk(sk); del_timer(&llc->rej_sent_timer.timer); del_timer(&llc->pf_cycle_timer.timer); del_timer(&llc->busy_state_timer.timer); llc->ack_must_be_send = 0; llc->ack_pf = 0; return 0; } int llc_conn_ac_start_ack_timer(struct sock *sk, struct sk_buff *skb) { struct llc_sock *llc = llc_sk(sk); mod_timer(&llc->ack_timer.timer, jiffies + llc->ack_timer.expire); return 0; } int llc_conn_ac_start_rej_timer(struct sock *sk, struct sk_buff *skb) { struct llc_sock *llc = llc_sk(sk); mod_timer(&llc->rej_sent_timer.timer, jiffies + llc->rej_sent_timer.expire); return 0; } int llc_conn_ac_start_ack_tmr_if_not_running(struct sock *sk, struct sk_buff *skb) { struct llc_sock *llc = llc_sk(sk); if (!timer_pending(&llc->ack_timer.timer)) mod_timer(&llc->ack_timer.timer, jiffies + llc->ack_timer.expire); return 0; } int llc_conn_ac_stop_ack_timer(struct sock *sk, struct sk_buff *skb) { del_timer(&llc_sk(sk)->ack_timer.timer); return 0; } int llc_conn_ac_stop_p_timer(struct sock *sk, struct sk_buff *skb) { struct llc_sock *llc = llc_sk(sk); del_timer(&llc->pf_cycle_timer.timer); llc_conn_set_p_flag(sk, 0); return 0; } int llc_conn_ac_stop_rej_timer(struct sock *sk, struct sk_buff *skb) { del_timer(&llc_sk(sk)->rej_sent_timer.timer); return 0; } int llc_conn_ac_upd_nr_received(struct sock *sk, struct sk_buff *skb) { int acked; u16 unacked = 0; struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb); struct llc_sock *llc = llc_sk(sk); llc->last_nr = PDU_SUPV_GET_Nr(pdu); acked = llc_conn_remove_acked_pdus(sk, llc->last_nr, &unacked); /* On loopback we don't queue I frames in unack_pdu_q queue. */ if (acked > 0 || (llc->dev->flags & IFF_LOOPBACK)) { llc->retry_count = 0; del_timer(&llc->ack_timer.timer); if (llc->failed_data_req) { /* already, we did not accept data from upper layer * (tx_window full or unacceptable state). Now, we * can send data and must inform to upper layer. */ llc->failed_data_req = 0; llc_conn_ac_data_confirm(sk, skb); } if (unacked) mod_timer(&llc->ack_timer.timer, jiffies + llc->ack_timer.expire); } else if (llc->failed_data_req) { u8 f_bit; llc_pdu_decode_pf_bit(skb, &f_bit); if (f_bit == 1) { llc->failed_data_req = 0; llc_conn_ac_data_confirm(sk, skb); } } return 0; } int llc_conn_ac_upd_p_flag(struct sock *sk, struct sk_buff *skb) { struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb); if (LLC_PDU_IS_RSP(pdu)) { u8 f_bit; llc_pdu_decode_pf_bit(skb, &f_bit); if (f_bit) { llc_conn_set_p_flag(sk, 0); llc_conn_ac_stop_p_timer(sk, skb); } } return 0; } int llc_conn_ac_set_data_flag_2(struct sock *sk, struct sk_buff *skb) { llc_sk(sk)->data_flag = 2; return 0; } int llc_conn_ac_set_data_flag_0(struct sock *sk, struct sk_buff *skb) { llc_sk(sk)->data_flag = 0; return 0; } int llc_conn_ac_set_data_flag_1(struct sock *sk, struct sk_buff *skb) { llc_sk(sk)->data_flag = 1; return 0; } int llc_conn_ac_set_data_flag_1_if_data_flag_eq_0(struct sock *sk, struct sk_buff *skb) { if (!llc_sk(sk)->data_flag) llc_sk(sk)->data_flag = 1; return 0; } int llc_conn_ac_set_p_flag_0(struct sock *sk, struct sk_buff *skb) { llc_conn_set_p_flag(sk, 0); return 0; } static int llc_conn_ac_set_p_flag_1(struct sock *sk, struct sk_buff *skb) { llc_conn_set_p_flag(sk, 1); return 0; } int llc_conn_ac_set_remote_busy_0(struct sock *sk, struct sk_buff *skb) { llc_sk(sk)->remote_busy_flag = 0; return 0; } int llc_conn_ac_set_cause_flag_0(struct sock *sk, struct sk_buff *skb) { llc_sk(sk)->cause_flag = 0; return 0; } int llc_conn_ac_set_cause_flag_1(struct sock *sk, struct sk_buff *skb) { llc_sk(sk)->cause_flag = 1; return 0; } int llc_conn_ac_set_retry_cnt_0(struct sock *sk, struct sk_buff *skb) { llc_sk(sk)->retry_count = 0; return 0; } int llc_conn_ac_inc_retry_cnt_by_1(struct sock *sk, struct sk_buff *skb) { llc_sk(sk)->retry_count++; return 0; } int llc_conn_ac_set_vr_0(struct sock *sk, struct sk_buff *skb) { llc_sk(sk)->vR = 0; return 0; } int llc_conn_ac_inc_vr_by_1(struct sock *sk, struct sk_buff *skb) { llc_sk(sk)->vR = PDU_GET_NEXT_Vr(llc_sk(sk)->vR); return 0; } int llc_conn_ac_set_vs_0(struct sock *sk, struct sk_buff *skb) { llc_sk(sk)->vS = 0; return 0; } int llc_conn_ac_set_vs_nr(struct sock *sk, struct sk_buff *skb) { llc_sk(sk)->vS = llc_sk(sk)->last_nr; return 0; } static int llc_conn_ac_inc_vs_by_1(struct sock *sk, struct sk_buff *skb) { llc_sk(sk)->vS = (llc_sk(sk)->vS + 1) % LLC_2_SEQ_NBR_MODULO; return 0; } static void llc_conn_tmr_common_cb(struct sock *sk, u8 type) { struct sk_buff *skb = alloc_skb(0, GFP_ATOMIC); bh_lock_sock(sk); if (skb) { struct llc_conn_state_ev *ev = llc_conn_ev(skb); skb_set_owner_r(skb, sk); ev->type = type; llc_process_tmr_ev(sk, skb); } bh_unlock_sock(sk); } void llc_conn_pf_cycle_tmr_cb(struct timer_list *t) { struct llc_sock *llc = from_timer(llc, t, pf_cycle_timer.timer); llc_conn_tmr_common_cb(&llc->sk, LLC_CONN_EV_TYPE_P_TMR); } void llc_conn_busy_tmr_cb(struct timer_list *t) { struct llc_sock *llc = from_timer(llc, t, busy_state_timer.timer); llc_conn_tmr_common_cb(&llc->sk, LLC_CONN_EV_TYPE_BUSY_TMR); } void llc_conn_ack_tmr_cb(struct timer_list *t) { struct llc_sock *llc = from_timer(llc, t, ack_timer.timer); llc_conn_tmr_common_cb(&llc->sk, LLC_CONN_EV_TYPE_ACK_TMR); } void llc_conn_rej_tmr_cb(struct timer_list *t) { struct llc_sock *llc = from_timer(llc, t, rej_sent_timer.timer); llc_conn_tmr_common_cb(&llc->sk, LLC_CONN_EV_TYPE_REJ_TMR); } int llc_conn_ac_rst_vs(struct sock *sk, struct sk_buff *skb) { llc_sk(sk)->X = llc_sk(sk)->vS; llc_conn_ac_set_vs_nr(sk, skb); return 0; } int llc_conn_ac_upd_vs(struct sock *sk, struct sk_buff *skb) { struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb); u8 nr = PDU_SUPV_GET_Nr(pdu); if (llc_circular_between(llc_sk(sk)->vS, nr, llc_sk(sk)->X)) llc_conn_ac_set_vs_nr(sk, skb); return 0; } /* * Non-standard actions; these not contained in IEEE specification; for * our own usage */ /** * llc_conn_disc - removes connection from SAP list and frees it * @sk: closed connection * @skb: occurred event */ int llc_conn_disc(struct sock *sk, struct sk_buff *skb) { /* FIXME: this thing seems to want to die */ return 0; } /** * llc_conn_reset - resets connection * @sk : reseting connection. * @skb: occurred event. * * Stop all timers, empty all queues and reset all flags. */ int llc_conn_reset(struct sock *sk, struct sk_buff *skb) { llc_sk_reset(sk); return 0; } /** * llc_circular_between - designates that b is between a and c or not * @a: lower bound * @b: element to see if is between a and b * @c: upper bound * * This function designates that b is between a and c or not (for example, * 0 is between 127 and 1). Returns 1 if b is between a and c, 0 * otherwise. */ u8 llc_circular_between(u8 a, u8 b, u8 c) { b = b - a; c = c - a; return b <= c; } /** * llc_process_tmr_ev - timer backend * @sk: active connection * @skb: occurred event * * This function is called from timer callback functions. When connection * is busy (during sending a data frame) timer expiration event must be * queued. Otherwise this event can be sent to connection state machine. * Queued events will process by llc_backlog_rcv function after sending * data frame. */ static void llc_process_tmr_ev(struct sock *sk, struct sk_buff *skb) { if (llc_sk(sk)->state == LLC_CONN_OUT_OF_SVC) { printk(KERN_WARNING "%s: timer called on closed connection\n", __func__); kfree_skb(skb); } else { if (!sock_owned_by_user(sk)) llc_conn_state_process(sk, skb); else { llc_set_backlog_type(skb, LLC_EVENT); __sk_add_backlog(sk, skb); } } } |
8 8 8 1 2 3 2 2 1 1 2 1 1 1 1 1 1 1 1 1 3 3 1 1 1 1 1 1 1 1 1 1 1 1 1 20 1 1 3 3 3 2 1 1 5 6 6 2 1 1 2 2 2 2 2 2 2 2 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 | // SPDX-License-Identifier: GPL-2.0-only /* * cec-api.c - HDMI Consumer Electronics Control framework - API * * Copyright 2016 Cisco Systems, Inc. and/or its affiliates. All rights reserved. */ #include <linux/errno.h> #include <linux/init.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/kmod.h> #include <linux/ktime.h> #include <linux/slab.h> #include <linux/mm.h> #include <linux/string.h> #include <linux/types.h> #include <linux/uaccess.h> #include <linux/version.h> #include <media/cec-pin.h> #include "cec-priv.h" #include "cec-pin-priv.h" static inline struct cec_devnode *cec_devnode_data(struct file *filp) { struct cec_fh *fh = filp->private_data; return &fh->adap->devnode; } /* CEC file operations */ static __poll_t cec_poll(struct file *filp, struct poll_table_struct *poll) { struct cec_fh *fh = filp->private_data; struct cec_adapter *adap = fh->adap; __poll_t res = 0; poll_wait(filp, &fh->wait, poll); if (!cec_is_registered(adap)) return EPOLLERR | EPOLLHUP | EPOLLPRI; mutex_lock(&adap->lock); if (adap->is_configured && adap->transmit_queue_sz < CEC_MAX_MSG_TX_QUEUE_SZ) res |= EPOLLOUT | EPOLLWRNORM; if (fh->queued_msgs) res |= EPOLLIN | EPOLLRDNORM; if (fh->total_queued_events) res |= EPOLLPRI; mutex_unlock(&adap->lock); return res; } static bool cec_is_busy(const struct cec_adapter *adap, const struct cec_fh *fh) { bool valid_initiator = adap->cec_initiator && adap->cec_initiator == fh; bool valid_follower = adap->cec_follower && adap->cec_follower == fh; /* * Exclusive initiators and followers can always access the CEC adapter */ if (valid_initiator || valid_follower) return false; /* * All others can only access the CEC adapter if there is no * exclusive initiator and they are in INITIATOR mode. */ return adap->cec_initiator || fh->mode_initiator == CEC_MODE_NO_INITIATOR; } static long cec_adap_g_caps(struct cec_adapter *adap, struct cec_caps __user *parg) { struct cec_caps caps = {}; strscpy(caps.driver, adap->devnode.dev.parent->driver->name, sizeof(caps.driver)); strscpy(caps.name, adap->name, sizeof(caps.name)); caps.available_log_addrs = adap->available_log_addrs; caps.capabilities = adap->capabilities; caps.version = LINUX_VERSION_CODE; if (copy_to_user(parg, &caps, sizeof(caps))) return -EFAULT; return 0; } static long cec_adap_g_phys_addr(struct cec_adapter *adap, __u16 __user *parg) { u16 phys_addr; mutex_lock(&adap->lock); phys_addr = adap->phys_addr; mutex_unlock(&adap->lock); if (copy_to_user(parg, &phys_addr, sizeof(phys_addr))) return -EFAULT; return 0; } static int cec_validate_phys_addr(u16 phys_addr) { int i; if (phys_addr == CEC_PHYS_ADDR_INVALID) return 0; for (i = 0; i < 16; i += 4) if (phys_addr & (0xf << i)) break; if (i == 16) return 0; for (i += 4; i < 16; i += 4) if ((phys_addr & (0xf << i)) == 0) return -EINVAL; return 0; } static long cec_adap_s_phys_addr(struct cec_adapter *adap, struct cec_fh *fh, bool block, __u16 __user *parg) { u16 phys_addr; long err; if (!(adap->capabilities & CEC_CAP_PHYS_ADDR)) return -ENOTTY; if (copy_from_user(&phys_addr, parg, sizeof(phys_addr))) return -EFAULT; err = cec_validate_phys_addr(phys_addr); if (err) return err; mutex_lock(&adap->lock); if (cec_is_busy(adap, fh)) err = -EBUSY; else __cec_s_phys_addr(adap, phys_addr, block); mutex_unlock(&adap->lock); return err; } static long cec_adap_g_log_addrs(struct cec_adapter *adap, struct cec_log_addrs __user *parg) { struct cec_log_addrs log_addrs; mutex_lock(&adap->lock); /* * We use memcpy here instead of assignment since there is a * hole at the end of struct cec_log_addrs that an assignment * might ignore. So when we do copy_to_user() we could leak * one byte of memory. */ memcpy(&log_addrs, &adap->log_addrs, sizeof(log_addrs)); if (!adap->is_configured) memset(log_addrs.log_addr, CEC_LOG_ADDR_INVALID, sizeof(log_addrs.log_addr)); mutex_unlock(&adap->lock); if (copy_to_user(parg, &log_addrs, sizeof(log_addrs))) return -EFAULT; return 0; } static long cec_adap_s_log_addrs(struct cec_adapter *adap, struct cec_fh *fh, bool block, struct cec_log_addrs __user *parg) { struct cec_log_addrs log_addrs; long err = -EBUSY; if (!(adap->capabilities & CEC_CAP_LOG_ADDRS)) return -ENOTTY; if (copy_from_user(&log_addrs, parg, sizeof(log_addrs))) return -EFAULT; log_addrs.flags &= CEC_LOG_ADDRS_FL_ALLOW_UNREG_FALLBACK | CEC_LOG_ADDRS_FL_ALLOW_RC_PASSTHRU | CEC_LOG_ADDRS_FL_CDC_ONLY; mutex_lock(&adap->lock); if (!adap->is_configuring && (!log_addrs.num_log_addrs || !adap->is_configured) && !cec_is_busy(adap, fh)) { err = __cec_s_log_addrs(adap, &log_addrs, block); if (!err) log_addrs = adap->log_addrs; } mutex_unlock(&adap->lock); if (err) return err; if (copy_to_user(parg, &log_addrs, sizeof(log_addrs))) return -EFAULT; return 0; } static long cec_adap_g_connector_info(struct cec_adapter *adap, struct cec_log_addrs __user *parg) { int ret = 0; if (!(adap->capabilities & CEC_CAP_CONNECTOR_INFO)) return -ENOTTY; mutex_lock(&adap->lock); if (copy_to_user(parg, &adap->conn_info, sizeof(adap->conn_info))) ret = -EFAULT; mutex_unlock(&adap->lock); return ret; } static long cec_transmit(struct cec_adapter *adap, struct cec_fh *fh, bool block, struct cec_msg __user *parg) { struct cec_msg msg = {}; long err = 0; if (!(adap->capabilities & CEC_CAP_TRANSMIT)) return -ENOTTY; if (copy_from_user(&msg, parg, sizeof(msg))) return -EFAULT; mutex_lock(&adap->lock); if (adap->log_addrs.num_log_addrs == 0) err = -EPERM; else if (adap->is_configuring) err = -ENONET; else if (cec_is_busy(adap, fh)) err = -EBUSY; else err = cec_transmit_msg_fh(adap, &msg, fh, block); mutex_unlock(&adap->lock); if (err) return err; if (copy_to_user(parg, &msg, sizeof(msg))) return -EFAULT; return 0; } /* Called by CEC_RECEIVE: wait for a message to arrive */ static int cec_receive_msg(struct cec_fh *fh, struct cec_msg *msg, bool block) { u32 timeout = msg->timeout; int res; do { mutex_lock(&fh->lock); /* Are there received messages queued up? */ if (fh->queued_msgs) { /* Yes, return the first one */ struct cec_msg_entry *entry = list_first_entry(&fh->msgs, struct cec_msg_entry, list); list_del(&entry->list); *msg = entry->msg; kfree(entry); fh->queued_msgs--; mutex_unlock(&fh->lock); /* restore original timeout value */ msg->timeout = timeout; return 0; } /* No, return EAGAIN in non-blocking mode or wait */ mutex_unlock(&fh->lock); /* Return when in non-blocking mode */ if (!block) return -EAGAIN; if (msg->timeout) { /* The user specified a timeout */ res = wait_event_interruptible_timeout(fh->wait, fh->queued_msgs, msecs_to_jiffies(msg->timeout)); if (res == 0) res = -ETIMEDOUT; else if (res > 0) res = 0; } else { /* Wait indefinitely */ res = wait_event_interruptible(fh->wait, fh->queued_msgs); } /* Exit on error, otherwise loop to get the new message */ } while (!res); return res; } static long cec_receive(struct cec_adapter *adap, struct cec_fh *fh, bool block, struct cec_msg __user *parg) { struct cec_msg msg = {}; long err; if (copy_from_user(&msg, parg, sizeof(msg))) return -EFAULT; err = cec_receive_msg(fh, &msg, block); if (err) return err; msg.flags = 0; if (copy_to_user(parg, &msg, sizeof(msg))) return -EFAULT; return 0; } static long cec_dqevent(struct cec_adapter *adap, struct cec_fh *fh, bool block, struct cec_event __user *parg) { struct cec_event_entry *ev = NULL; u64 ts = ~0ULL; unsigned int i; unsigned int ev_idx; long err = 0; mutex_lock(&fh->lock); while (!fh->total_queued_events && block) { mutex_unlock(&fh->lock); err = wait_event_interruptible(fh->wait, fh->total_queued_events); if (err) return err; mutex_lock(&fh->lock); } /* Find the oldest event */ for (i = 0; i < CEC_NUM_EVENTS; i++) { struct cec_event_entry *entry = list_first_entry_or_null(&fh->events[i], struct cec_event_entry, list); if (entry && entry->ev.ts <= ts) { ev = entry; ev_idx = i; ts = ev->ev.ts; } } if (!ev) { err = -EAGAIN; goto unlock; } list_del(&ev->list); if (copy_to_user(parg, &ev->ev, sizeof(ev->ev))) err = -EFAULT; if (ev_idx >= CEC_NUM_CORE_EVENTS) kfree(ev); fh->queued_events[ev_idx]--; fh->total_queued_events--; unlock: mutex_unlock(&fh->lock); return err; } static long cec_g_mode(struct cec_adapter *adap, struct cec_fh *fh, u32 __user *parg) { u32 mode = fh->mode_initiator | fh->mode_follower; if (copy_to_user(parg, &mode, sizeof(mode))) return -EFAULT; return 0; } static long cec_s_mode(struct cec_adapter *adap, struct cec_fh *fh, u32 __user *parg) { u32 mode; u8 mode_initiator; u8 mode_follower; bool send_pin_event = false; long err = 0; if (copy_from_user(&mode, parg, sizeof(mode))) return -EFAULT; if (mode & ~(CEC_MODE_INITIATOR_MSK | CEC_MODE_FOLLOWER_MSK)) { dprintk(1, "%s: invalid mode bits set\n", __func__); return -EINVAL; } mode_initiator = mode & CEC_MODE_INITIATOR_MSK; mode_follower = mode & CEC_MODE_FOLLOWER_MSK; if (mode_initiator > CEC_MODE_EXCL_INITIATOR || mode_follower > CEC_MODE_MONITOR_ALL) { dprintk(1, "%s: unknown mode\n", __func__); return -EINVAL; } if (mode_follower == CEC_MODE_MONITOR_ALL && !(adap->capabilities & CEC_CAP_MONITOR_ALL)) { dprintk(1, "%s: MONITOR_ALL not supported\n", __func__); return -EINVAL; } if (mode_follower == CEC_MODE_MONITOR_PIN && !(adap->capabilities & CEC_CAP_MONITOR_PIN)) { dprintk(1, "%s: MONITOR_PIN not supported\n", __func__); return -EINVAL; } /* Follower modes should always be able to send CEC messages */ if ((mode_initiator == CEC_MODE_NO_INITIATOR || !(adap->capabilities & CEC_CAP_TRANSMIT)) && mode_follower >= CEC_MODE_FOLLOWER && mode_follower <= CEC_MODE_EXCL_FOLLOWER_PASSTHRU) { dprintk(1, "%s: cannot transmit\n", __func__); return -EINVAL; } /* Monitor modes require CEC_MODE_NO_INITIATOR */ if (mode_initiator && mode_follower >= CEC_MODE_MONITOR_PIN) { dprintk(1, "%s: monitor modes require NO_INITIATOR\n", __func__); return -EINVAL; } /* Monitor modes require CAP_NET_ADMIN */ if (mode_follower >= CEC_MODE_MONITOR_PIN && !capable(CAP_NET_ADMIN)) return -EPERM; mutex_lock(&adap->lock); /* * You can't become exclusive follower if someone else already * has that job. */ if ((mode_follower == CEC_MODE_EXCL_FOLLOWER || mode_follower == CEC_MODE_EXCL_FOLLOWER_PASSTHRU) && adap->cec_follower && adap->cec_follower != fh) err = -EBUSY; /* * You can't become exclusive initiator if someone else already * has that job. */ if (mode_initiator == CEC_MODE_EXCL_INITIATOR && adap->cec_initiator && adap->cec_initiator != fh) err = -EBUSY; if (!err) { bool old_mon_all = fh->mode_follower == CEC_MODE_MONITOR_ALL; bool new_mon_all = mode_follower == CEC_MODE_MONITOR_ALL; if (old_mon_all != new_mon_all) { if (new_mon_all) err = cec_monitor_all_cnt_inc(adap); else cec_monitor_all_cnt_dec(adap); } } if (!err) { bool old_mon_pin = fh->mode_follower == CEC_MODE_MONITOR_PIN; bool new_mon_pin = mode_follower == CEC_MODE_MONITOR_PIN; if (old_mon_pin != new_mon_pin) { send_pin_event = new_mon_pin; if (new_mon_pin) err = cec_monitor_pin_cnt_inc(adap); else cec_monitor_pin_cnt_dec(adap); } } if (err) { mutex_unlock(&adap->lock); return err; } if (fh->mode_follower == CEC_MODE_FOLLOWER) adap->follower_cnt--; if (mode_follower == CEC_MODE_FOLLOWER) adap->follower_cnt++; if (send_pin_event) { struct cec_event ev = { .flags = CEC_EVENT_FL_INITIAL_STATE, }; ev.event = adap->cec_pin_is_high ? CEC_EVENT_PIN_CEC_HIGH : CEC_EVENT_PIN_CEC_LOW; cec_queue_event_fh(fh, &ev, 0); } if (mode_follower == CEC_MODE_EXCL_FOLLOWER || mode_follower == CEC_MODE_EXCL_FOLLOWER_PASSTHRU) { adap->passthrough = mode_follower == CEC_MODE_EXCL_FOLLOWER_PASSTHRU; adap->cec_follower = fh; } else if (adap->cec_follower == fh) { adap->passthrough = false; adap->cec_follower = NULL; } if (mode_initiator == CEC_MODE_EXCL_INITIATOR) adap->cec_initiator = fh; else if (adap->cec_initiator == fh) adap->cec_initiator = NULL; fh->mode_initiator = mode_initiator; fh->mode_follower = mode_follower; mutex_unlock(&adap->lock); return 0; } static long cec_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { struct cec_fh *fh = filp->private_data; struct cec_adapter *adap = fh->adap; bool block = !(filp->f_flags & O_NONBLOCK); void __user *parg = (void __user *)arg; if (!cec_is_registered(adap)) return -ENODEV; switch (cmd) { case CEC_ADAP_G_CAPS: return cec_adap_g_caps(adap, parg); case CEC_ADAP_G_PHYS_ADDR: return cec_adap_g_phys_addr(adap, parg); case CEC_ADAP_S_PHYS_ADDR: return cec_adap_s_phys_addr(adap, fh, block, parg); case CEC_ADAP_G_LOG_ADDRS: return cec_adap_g_log_addrs(adap, parg); case CEC_ADAP_S_LOG_ADDRS: return cec_adap_s_log_addrs(adap, fh, block, parg); case CEC_ADAP_G_CONNECTOR_INFO: return cec_adap_g_connector_info(adap, parg); case CEC_TRANSMIT: return cec_transmit(adap, fh, block, parg); case CEC_RECEIVE: return cec_receive(adap, fh, block, parg); case CEC_DQEVENT: return cec_dqevent(adap, fh, block, parg); case CEC_G_MODE: return cec_g_mode(adap, fh, parg); case CEC_S_MODE: return cec_s_mode(adap, fh, parg); default: return -ENOTTY; } } static int cec_open(struct inode *inode, struct file *filp) { struct cec_devnode *devnode = container_of(inode->i_cdev, struct cec_devnode, cdev); struct cec_adapter *adap = to_cec_adapter(devnode); struct cec_fh *fh = kzalloc(sizeof(*fh), GFP_KERNEL); /* * Initial events that are automatically sent when the cec device is * opened. */ struct cec_event ev = { .event = CEC_EVENT_STATE_CHANGE, .flags = CEC_EVENT_FL_INITIAL_STATE, }; unsigned int i; int err; if (!fh) return -ENOMEM; INIT_LIST_HEAD(&fh->msgs); INIT_LIST_HEAD(&fh->xfer_list); for (i = 0; i < CEC_NUM_EVENTS; i++) INIT_LIST_HEAD(&fh->events[i]); mutex_init(&fh->lock); init_waitqueue_head(&fh->wait); fh->mode_initiator = CEC_MODE_INITIATOR; fh->adap = adap; err = cec_get_device(devnode); if (err) { kfree(fh); return err; } filp->private_data = fh; /* Queue up initial state events */ ev.state_change.phys_addr = adap->phys_addr; ev.state_change.log_addr_mask = adap->log_addrs.log_addr_mask; ev.state_change.have_conn_info = adap->conn_info.type != CEC_CONNECTOR_TYPE_NO_CONNECTOR; cec_queue_event_fh(fh, &ev, 0); #ifdef CONFIG_CEC_PIN if (adap->pin && adap->pin->ops->read_hpd && !adap->devnode.unregistered) { err = adap->pin->ops->read_hpd(adap); if (err >= 0) { ev.event = err ? CEC_EVENT_PIN_HPD_HIGH : CEC_EVENT_PIN_HPD_LOW; cec_queue_event_fh(fh, &ev, 0); } } if (adap->pin && adap->pin->ops->read_5v && !adap->devnode.unregistered) { err = adap->pin->ops->read_5v(adap); if (err >= 0) { ev.event = err ? CEC_EVENT_PIN_5V_HIGH : CEC_EVENT_PIN_5V_LOW; cec_queue_event_fh(fh, &ev, 0); } } #endif mutex_lock(&devnode->lock); mutex_lock(&devnode->lock_fhs); list_add(&fh->list, &devnode->fhs); mutex_unlock(&devnode->lock_fhs); mutex_unlock(&devnode->lock); return 0; } /* Override for the release function */ static int cec_release(struct inode *inode, struct file *filp) { struct cec_devnode *devnode = cec_devnode_data(filp); struct cec_adapter *adap = to_cec_adapter(devnode); struct cec_fh *fh = filp->private_data; unsigned int i; mutex_lock(&adap->lock); if (adap->cec_initiator == fh) adap->cec_initiator = NULL; if (adap->cec_follower == fh) { adap->cec_follower = NULL; adap->passthrough = false; } if (fh->mode_follower == CEC_MODE_FOLLOWER) adap->follower_cnt--; if (fh->mode_follower == CEC_MODE_MONITOR_PIN) cec_monitor_pin_cnt_dec(adap); if (fh->mode_follower == CEC_MODE_MONITOR_ALL) cec_monitor_all_cnt_dec(adap); mutex_unlock(&adap->lock); mutex_lock(&devnode->lock); mutex_lock(&devnode->lock_fhs); list_del(&fh->list); mutex_unlock(&devnode->lock_fhs); mutex_unlock(&devnode->lock); /* Unhook pending transmits from this filehandle. */ mutex_lock(&adap->lock); while (!list_empty(&fh->xfer_list)) { struct cec_data *data = list_first_entry(&fh->xfer_list, struct cec_data, xfer_list); data->blocking = false; data->fh = NULL; list_del_init(&data->xfer_list); } mutex_unlock(&adap->lock); while (!list_empty(&fh->msgs)) { struct cec_msg_entry *entry = list_first_entry(&fh->msgs, struct cec_msg_entry, list); list_del(&entry->list); kfree(entry); } for (i = CEC_NUM_CORE_EVENTS; i < CEC_NUM_EVENTS; i++) { while (!list_empty(&fh->events[i])) { struct cec_event_entry *entry = list_first_entry(&fh->events[i], struct cec_event_entry, list); list_del(&entry->list); kfree(entry); } } kfree(fh); cec_put_device(devnode); filp->private_data = NULL; return 0; } const struct file_operations cec_devnode_fops = { .owner = THIS_MODULE, .open = cec_open, .unlocked_ioctl = cec_ioctl, .compat_ioctl = cec_ioctl, .release = cec_release, .poll = cec_poll, .llseek = no_llseek, }; |
1 1 5 5 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 | // SPDX-License-Identifier: GPL-2.0-only /* * AT and PS/2 keyboard driver * * Copyright (c) 1999-2002 Vojtech Pavlik */ /* * This driver can handle standard AT keyboards and PS/2 keyboards in * Translated and Raw Set 2 and Set 3, as well as AT keyboards on dumb * input-only controllers and AT keyboards connected over a one way RS232 * converter. */ #include <linux/delay.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/interrupt.h> #include <linux/init.h> #include <linux/input.h> #include <linux/input/vivaldi-fmap.h> #include <linux/serio.h> #include <linux/workqueue.h> #include <linux/libps2.h> #include <linux/mutex.h> #include <linux/dmi.h> #include <linux/property.h> #define DRIVER_DESC "AT and PS/2 keyboard driver" MODULE_AUTHOR("Vojtech Pavlik <vojtech@suse.cz>"); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL"); static int atkbd_set = 2; module_param_named(set, atkbd_set, int, 0); MODULE_PARM_DESC(set, "Select keyboard code set (2 = default, 3 = PS/2 native)"); #if defined(__i386__) || defined(__x86_64__) || defined(__hppa__) static bool atkbd_reset; #else static bool atkbd_reset = true; #endif module_param_named(reset, atkbd_reset, bool, 0); MODULE_PARM_DESC(reset, "Reset keyboard during initialization"); static bool atkbd_softrepeat; module_param_named(softrepeat, atkbd_softrepeat, bool, 0); MODULE_PARM_DESC(softrepeat, "Use software keyboard repeat"); static bool atkbd_softraw = true; module_param_named(softraw, atkbd_softraw, bool, 0); MODULE_PARM_DESC(softraw, "Use software generated rawmode"); static bool atkbd_scroll; module_param_named(scroll, atkbd_scroll, bool, 0); MODULE_PARM_DESC(scroll, "Enable scroll-wheel on MS Office and similar keyboards"); static bool atkbd_extra; module_param_named(extra, atkbd_extra, bool, 0); MODULE_PARM_DESC(extra, "Enable extra LEDs and keys on IBM RapidAcces, EzKey and similar keyboards"); static bool atkbd_terminal; module_param_named(terminal, atkbd_terminal, bool, 0); MODULE_PARM_DESC(terminal, "Enable break codes on an IBM Terminal keyboard connected via AT/PS2"); #define SCANCODE(keymap) ((keymap >> 16) & 0xFFFF) #define KEYCODE(keymap) (keymap & 0xFFFF) /* * Scancode to keycode tables. These are just the default setting, and * are loadable via a userland utility. */ #define ATKBD_KEYMAP_SIZE 512 static const unsigned short atkbd_set2_keycode[ATKBD_KEYMAP_SIZE] = { #ifdef CONFIG_KEYBOARD_ATKBD_HP_KEYCODES /* XXX: need a more general approach */ #include "hpps2atkbd.h" /* include the keyboard scancodes */ #else 0, 67, 65, 63, 61, 59, 60, 88, 0, 68, 66, 64, 62, 15, 41,117, 0, 56, 42, 93, 29, 16, 2, 0, 0, 0, 44, 31, 30, 17, 3, 0, 0, 46, 45, 32, 18, 5, 4, 95, 0, 57, 47, 33, 20, 19, 6,183, 0, 49, 48, 35, 34, 21, 7,184, 0, 0, 50, 36, 22, 8, 9,185, 0, 51, 37, 23, 24, 11, 10, 0, 0, 52, 53, 38, 39, 25, 12, 0, 0, 89, 40, 0, 26, 13, 0, 0, 58, 54, 28, 27, 0, 43, 0, 85, 0, 86, 91, 90, 92, 0, 14, 94, 0, 79,124, 75, 71,121, 0, 0, 82, 83, 80, 76, 77, 72, 1, 69, 87, 78, 81, 74, 55, 73, 70, 99, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 217,100,255, 0, 97,165, 0, 0,156, 0, 0, 0, 0, 0, 0,125, 173,114, 0,113, 0, 0, 0,126,128, 0, 0,140, 0, 0, 0,127, 159, 0,115, 0,164, 0, 0,116,158, 0,172,166, 0, 0, 0,142, 157, 0, 0, 0, 0, 0, 0, 0,155, 0, 98, 0, 0,163, 0, 0, 226, 0, 0, 0, 0, 0, 0, 0, 0,255, 96, 0, 0, 0,143, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,107, 0,105,102, 0, 0,112, 110,111,108,112,106,103, 0,119, 0,118,109, 0, 99,104,119, 0, 0, 0, 0, 65, 99, #endif }; static const unsigned short atkbd_set3_keycode[ATKBD_KEYMAP_SIZE] = { 0, 0, 0, 0, 0, 0, 0, 59, 1,138,128,129,130, 15, 41, 60, 131, 29, 42, 86, 58, 16, 2, 61,133, 56, 44, 31, 30, 17, 3, 62, 134, 46, 45, 32, 18, 5, 4, 63,135, 57, 47, 33, 20, 19, 6, 64, 136, 49, 48, 35, 34, 21, 7, 65,137,100, 50, 36, 22, 8, 9, 66, 125, 51, 37, 23, 24, 11, 10, 67,126, 52, 53, 38, 39, 25, 12, 68, 113,114, 40, 43, 26, 13, 87, 99, 97, 54, 28, 27, 43, 43, 88, 70, 108,105,119,103,111,107, 14,110, 0, 79,106, 75, 71,109,102,104, 82, 83, 80, 76, 77, 72, 69, 98, 0, 96, 81, 0, 78, 73, 55,183, 184,185,186,187, 74, 94, 92, 93, 0, 0, 0,125,126,127,112, 0, 0,139,172,163,165,115,152,172,166,140,160,154,113,114,167,168, 148,149,147,140 }; static const unsigned short atkbd_unxlate_table[128] = { 0,118, 22, 30, 38, 37, 46, 54, 61, 62, 70, 69, 78, 85,102, 13, 21, 29, 36, 45, 44, 53, 60, 67, 68, 77, 84, 91, 90, 20, 28, 27, 35, 43, 52, 51, 59, 66, 75, 76, 82, 14, 18, 93, 26, 34, 33, 42, 50, 49, 58, 65, 73, 74, 89,124, 17, 41, 88, 5, 6, 4, 12, 3, 11, 2, 10, 1, 9,119,126,108,117,125,123,107,115,116,121,105, 114,122,112,113,127, 96, 97,120, 7, 15, 23, 31, 39, 47, 55, 63, 71, 79, 86, 94, 8, 16, 24, 32, 40, 48, 56, 64, 72, 80, 87,111, 19, 25, 57, 81, 83, 92, 95, 98, 99,100,101,103,104,106,109,110 }; #define ATKBD_CMD_SETLEDS 0x10ed #define ATKBD_CMD_GSCANSET 0x11f0 #define ATKBD_CMD_SSCANSET 0x10f0 #define ATKBD_CMD_GETID 0x02f2 #define ATKBD_CMD_SETREP 0x10f3 #define ATKBD_CMD_ENABLE 0x00f4 #define ATKBD_CMD_RESET_DIS 0x00f5 /* Reset to defaults and disable */ #define ATKBD_CMD_RESET_DEF 0x00f6 /* Reset to defaults */ #define ATKBD_CMD_SETALL_MB 0x00f8 /* Set all keys to give break codes */ #define ATKBD_CMD_SETALL_MBR 0x00fa /* ... and repeat */ #define ATKBD_CMD_RESET_BAT 0x02ff #define ATKBD_CMD_RESEND 0x00fe #define ATKBD_CMD_EX_ENABLE 0x10ea #define ATKBD_CMD_EX_SETLEDS 0x20eb #define ATKBD_CMD_OK_GETID 0x02e8 #define ATKBD_RET_ACK 0xfa #define ATKBD_RET_NAK 0xfe #define ATKBD_RET_BAT 0xaa #define ATKBD_RET_EMUL0 0xe0 #define ATKBD_RET_EMUL1 0xe1 #define ATKBD_RET_RELEASE 0xf0 #define ATKBD_RET_HANJA 0xf1 #define ATKBD_RET_HANGEUL 0xf2 #define ATKBD_RET_ERR 0xff #define ATKBD_KEY_UNKNOWN 0 #define ATKBD_KEY_NULL 255 #define ATKBD_SCR_1 0xfffe #define ATKBD_SCR_2 0xfffd #define ATKBD_SCR_4 0xfffc #define ATKBD_SCR_8 0xfffb #define ATKBD_SCR_CLICK 0xfffa #define ATKBD_SCR_LEFT 0xfff9 #define ATKBD_SCR_RIGHT 0xfff8 #define ATKBD_SPECIAL ATKBD_SCR_RIGHT #define ATKBD_LED_EVENT_BIT 0 #define ATKBD_REP_EVENT_BIT 1 #define ATKBD_XL_ERR 0x01 #define ATKBD_XL_BAT 0x02 #define ATKBD_XL_ACK 0x04 #define ATKBD_XL_NAK 0x08 #define ATKBD_XL_HANGEUL 0x10 #define ATKBD_XL_HANJA 0x20 static const struct { unsigned short keycode; unsigned char set2; } atkbd_scroll_keys[] = { { ATKBD_SCR_1, 0xc5 }, { ATKBD_SCR_2, 0x9d }, { ATKBD_SCR_4, 0xa4 }, { ATKBD_SCR_8, 0x9b }, { ATKBD_SCR_CLICK, 0xe0 }, { ATKBD_SCR_LEFT, 0xcb }, { ATKBD_SCR_RIGHT, 0xd2 }, }; /* * The atkbd control structure */ struct atkbd { struct ps2dev ps2dev; struct input_dev *dev; /* Written only during init */ char name[64]; char phys[32]; unsigned short id; unsigned short keycode[ATKBD_KEYMAP_SIZE]; DECLARE_BITMAP(force_release_mask, ATKBD_KEYMAP_SIZE); unsigned char set; bool translated; bool extra; bool write; bool softrepeat; bool softraw; bool scroll; bool enabled; /* Accessed only from interrupt */ unsigned char emul; bool resend; bool release; unsigned long xl_bit; unsigned int last; unsigned long time; unsigned long err_count; struct delayed_work event_work; unsigned long event_jiffies; unsigned long event_mask; /* Serializes reconnect(), attr->set() and event work */ struct mutex mutex; struct vivaldi_data vdata; }; /* * System-specific keymap fixup routine */ static void (*atkbd_platform_fixup)(struct atkbd *, const void *data); static void *atkbd_platform_fixup_data; static unsigned int (*atkbd_platform_scancode_fixup)(struct atkbd *, unsigned int); /* * Certain keyboards to not like ATKBD_CMD_RESET_DIS and stop responding * to many commands until full reset (ATKBD_CMD_RESET_BAT) is performed. */ static bool atkbd_skip_deactivate; static ssize_t atkbd_attr_show_helper(struct device *dev, char *buf, ssize_t (*handler)(struct atkbd *, char *)); static ssize_t atkbd_attr_set_helper(struct device *dev, const char *buf, size_t count, ssize_t (*handler)(struct atkbd *, const char *, size_t)); #define ATKBD_DEFINE_ATTR(_name) \ static ssize_t atkbd_show_##_name(struct atkbd *, char *); \ static ssize_t atkbd_set_##_name(struct atkbd *, const char *, size_t); \ static ssize_t atkbd_do_show_##_name(struct device *d, \ struct device_attribute *attr, char *b) \ { \ return atkbd_attr_show_helper(d, b, atkbd_show_##_name); \ } \ static ssize_t atkbd_do_set_##_name(struct device *d, \ struct device_attribute *attr, const char *b, size_t s) \ { \ return atkbd_attr_set_helper(d, b, s, atkbd_set_##_name); \ } \ static struct device_attribute atkbd_attr_##_name = \ __ATTR(_name, S_IWUSR | S_IRUGO, atkbd_do_show_##_name, atkbd_do_set_##_name); ATKBD_DEFINE_ATTR(extra); ATKBD_DEFINE_ATTR(force_release); ATKBD_DEFINE_ATTR(scroll); ATKBD_DEFINE_ATTR(set); ATKBD_DEFINE_ATTR(softrepeat); ATKBD_DEFINE_ATTR(softraw); #define ATKBD_DEFINE_RO_ATTR(_name) \ static ssize_t atkbd_show_##_name(struct atkbd *, char *); \ static ssize_t atkbd_do_show_##_name(struct device *d, \ struct device_attribute *attr, char *b) \ { \ return atkbd_attr_show_helper(d, b, atkbd_show_##_name); \ } \ static struct device_attribute atkbd_attr_##_name = \ __ATTR(_name, S_IRUGO, atkbd_do_show_##_name, NULL); ATKBD_DEFINE_RO_ATTR(err_count); ATKBD_DEFINE_RO_ATTR(function_row_physmap); static struct attribute *atkbd_attributes[] = { &atkbd_attr_extra.attr, &atkbd_attr_force_release.attr, &atkbd_attr_scroll.attr, &atkbd_attr_set.attr, &atkbd_attr_softrepeat.attr, &atkbd_attr_softraw.attr, &atkbd_attr_err_count.attr, &atkbd_attr_function_row_physmap.attr, NULL }; static ssize_t atkbd_show_function_row_physmap(struct atkbd *atkbd, char *buf) { return vivaldi_function_row_physmap_show(&atkbd->vdata, buf); } static struct atkbd *atkbd_from_serio(struct serio *serio) { struct ps2dev *ps2dev = serio_get_drvdata(serio); return container_of(ps2dev, struct atkbd, ps2dev); } static umode_t atkbd_attr_is_visible(struct kobject *kobj, struct attribute *attr, int i) { struct device *dev = kobj_to_dev(kobj); struct serio *serio = to_serio_port(dev); struct atkbd *atkbd = atkbd_from_serio(serio); if (attr == &atkbd_attr_function_row_physmap.attr && !atkbd->vdata.num_function_row_keys) return 0; return attr->mode; } static const struct attribute_group atkbd_attribute_group = { .attrs = atkbd_attributes, .is_visible = atkbd_attr_is_visible, }; __ATTRIBUTE_GROUPS(atkbd_attribute); static const unsigned int xl_table[] = { ATKBD_RET_BAT, ATKBD_RET_ERR, ATKBD_RET_ACK, ATKBD_RET_NAK, ATKBD_RET_HANJA, ATKBD_RET_HANGEUL, }; /* * Checks if we should mangle the scancode to extract 'release' bit * in translated mode. */ static bool atkbd_need_xlate(unsigned long xl_bit, unsigned char code) { int i; if (code == ATKBD_RET_EMUL0 || code == ATKBD_RET_EMUL1) return false; for (i = 0; i < ARRAY_SIZE(xl_table); i++) if (code == xl_table[i]) return test_bit(i, &xl_bit); return true; } /* * Calculates new value of xl_bit so the driver can distinguish * between make/break pair of scancodes for select keys and PS/2 * protocol responses. */ static void atkbd_calculate_xl_bit(struct atkbd *atkbd, unsigned char code) { int i; for (i = 0; i < ARRAY_SIZE(xl_table); i++) { if (!((code ^ xl_table[i]) & 0x7f)) { if (code & 0x80) __clear_bit(i, &atkbd->xl_bit); else __set_bit(i, &atkbd->xl_bit); break; } } } /* * Encode the scancode, 0xe0 prefix, and high bit into a single integer, * keeping kernel 2.4 compatibility for set 2 */ static unsigned int atkbd_compat_scancode(struct atkbd *atkbd, unsigned int code) { if (atkbd->set == 3) { if (atkbd->emul == 1) code |= 0x100; } else { code = (code & 0x7f) | ((code & 0x80) << 1); if (atkbd->emul == 1) code |= 0x80; } return code; } /* * Tries to handle frame or parity error by requesting the keyboard controller * to resend the last byte. This historically not done on x86 as controllers * there typically do not implement this command. */ static bool __maybe_unused atkbd_handle_frame_error(struct ps2dev *ps2dev, u8 data, unsigned int flags) { struct atkbd *atkbd = container_of(ps2dev, struct atkbd, ps2dev); struct serio *serio = ps2dev->serio; if ((flags & (SERIO_FRAME | SERIO_PARITY)) && (~flags & SERIO_TIMEOUT) && !atkbd->resend && atkbd->write) { dev_warn(&serio->dev, "Frame/parity error: %02x\n", flags); serio_write(serio, ATKBD_CMD_RESEND); atkbd->resend = true; return true; } if (!flags && data == ATKBD_RET_ACK) atkbd->resend = false; return false; } static enum ps2_disposition atkbd_pre_receive_byte(struct ps2dev *ps2dev, u8 data, unsigned int flags) { struct serio *serio = ps2dev->serio; dev_dbg(&serio->dev, "Received %02x flags %02x\n", data, flags); #if !defined(__i386__) && !defined (__x86_64__) if (atkbd_handle_frame_error(ps2dev, data, flags)) return PS2_IGNORE; #endif return PS2_PROCESS; } static void atkbd_receive_byte(struct ps2dev *ps2dev, u8 data) { struct serio *serio = ps2dev->serio; struct atkbd *atkbd = container_of(ps2dev, struct atkbd, ps2dev); struct input_dev *dev = atkbd->dev; unsigned int code = data; int scroll = 0, hscroll = 0, click = -1; int value; unsigned short keycode; pm_wakeup_event(&serio->dev, 0); if (!atkbd->enabled) return; input_event(dev, EV_MSC, MSC_RAW, code); if (atkbd_platform_scancode_fixup) code = atkbd_platform_scancode_fixup(atkbd, code); if (atkbd->translated) { if (atkbd->emul || atkbd_need_xlate(atkbd->xl_bit, code)) { atkbd->release = code >> 7; code &= 0x7f; } if (!atkbd->emul) atkbd_calculate_xl_bit(atkbd, data); } switch (code) { case ATKBD_RET_BAT: atkbd->enabled = false; serio_reconnect(atkbd->ps2dev.serio); return; case ATKBD_RET_EMUL0: atkbd->emul = 1; return; case ATKBD_RET_EMUL1: atkbd->emul = 2; return; case ATKBD_RET_RELEASE: atkbd->release = true; return; case ATKBD_RET_ACK: case ATKBD_RET_NAK: if (printk_ratelimit()) dev_warn(&serio->dev, "Spurious %s on %s. " "Some program might be trying to access hardware directly.\n", data == ATKBD_RET_ACK ? "ACK" : "NAK", serio->phys); return; case ATKBD_RET_ERR: atkbd->err_count++; dev_dbg(&serio->dev, "Keyboard on %s reports too many keys pressed.\n", serio->phys); return; } code = atkbd_compat_scancode(atkbd, code); if (atkbd->emul && --atkbd->emul) return; keycode = atkbd->keycode[code]; if (!(atkbd->release && test_bit(code, atkbd->force_release_mask))) if (keycode != ATKBD_KEY_NULL) input_event(dev, EV_MSC, MSC_SCAN, code); switch (keycode) { case ATKBD_KEY_NULL: break; case ATKBD_KEY_UNKNOWN: dev_warn(&serio->dev, "Unknown key %s (%s set %d, code %#x on %s).\n", atkbd->release ? "released" : "pressed", atkbd->translated ? "translated" : "raw", atkbd->set, code, serio->phys); dev_warn(&serio->dev, "Use 'setkeycodes %s%02x <keycode>' to make it known.\n", code & 0x80 ? "e0" : "", code & 0x7f); input_sync(dev); break; case ATKBD_SCR_1: scroll = 1; break; case ATKBD_SCR_2: scroll = 2; break; case ATKBD_SCR_4: scroll = 4; break; case ATKBD_SCR_8: scroll = 8; break; case ATKBD_SCR_CLICK: click = !atkbd->release; break; case ATKBD_SCR_LEFT: hscroll = -1; break; case ATKBD_SCR_RIGHT: hscroll = 1; break; default: if (atkbd->release) { value = 0; atkbd->last = 0; } else if (!atkbd->softrepeat && test_bit(keycode, dev->key)) { /* Workaround Toshiba laptop multiple keypress */ value = time_before(jiffies, atkbd->time) && atkbd->last == code ? 1 : 2; } else { value = 1; atkbd->last = code; atkbd->time = jiffies + msecs_to_jiffies(dev->rep[REP_DELAY]) / 2; } input_event(dev, EV_KEY, keycode, value); input_sync(dev); if (value && test_bit(code, atkbd->force_release_mask)) { input_event(dev, EV_MSC, MSC_SCAN, code); input_report_key(dev, keycode, 0); input_sync(dev); } } if (atkbd->scroll) { if (click != -1) input_report_key(dev, BTN_MIDDLE, click); input_report_rel(dev, REL_WHEEL, atkbd->release ? -scroll : scroll); input_report_rel(dev, REL_HWHEEL, hscroll); input_sync(dev); } atkbd->release = false; } static int atkbd_set_repeat_rate(struct atkbd *atkbd) { const short period[32] = { 33, 37, 42, 46, 50, 54, 58, 63, 67, 75, 83, 92, 100, 109, 116, 125, 133, 149, 167, 182, 200, 217, 232, 250, 270, 303, 333, 370, 400, 435, 470, 500 }; const short delay[4] = { 250, 500, 750, 1000 }; struct input_dev *dev = atkbd->dev; unsigned char param; int i = 0, j = 0; while (i < ARRAY_SIZE(period) - 1 && period[i] < dev->rep[REP_PERIOD]) i++; dev->rep[REP_PERIOD] = period[i]; while (j < ARRAY_SIZE(delay) - 1 && delay[j] < dev->rep[REP_DELAY]) j++; dev->rep[REP_DELAY] = delay[j]; param = i | (j << 5); return ps2_command(&atkbd->ps2dev, ¶m, ATKBD_CMD_SETREP); } static int atkbd_set_leds(struct atkbd *atkbd) { struct input_dev *dev = atkbd->dev; unsigned char param[2]; param[0] = (test_bit(LED_SCROLLL, dev->led) ? 1 : 0) | (test_bit(LED_NUML, dev->led) ? 2 : 0) | (test_bit(LED_CAPSL, dev->led) ? 4 : 0); if (ps2_command(&atkbd->ps2dev, param, ATKBD_CMD_SETLEDS)) return -1; if (atkbd->extra) { param[0] = 0; param[1] = (test_bit(LED_COMPOSE, dev->led) ? 0x01 : 0) | (test_bit(LED_SLEEP, dev->led) ? 0x02 : 0) | (test_bit(LED_SUSPEND, dev->led) ? 0x04 : 0) | (test_bit(LED_MISC, dev->led) ? 0x10 : 0) | (test_bit(LED_MUTE, dev->led) ? 0x20 : 0); if (ps2_command(&atkbd->ps2dev, param, ATKBD_CMD_EX_SETLEDS)) return -1; } return 0; } /* * atkbd_event_work() is used to complete processing of events that * can not be processed by input_event() which is often called from * interrupt context. */ static void atkbd_event_work(struct work_struct *work) { struct atkbd *atkbd = container_of(work, struct atkbd, event_work.work); mutex_lock(&atkbd->mutex); if (!atkbd->enabled) { /* * Serio ports are resumed asynchronously so while driver core * thinks that device is already fully operational in reality * it may not be ready yet. In this case we need to keep * rescheduling till reconnect completes. */ schedule_delayed_work(&atkbd->event_work, msecs_to_jiffies(100)); } else { if (test_and_clear_bit(ATKBD_LED_EVENT_BIT, &atkbd->event_mask)) atkbd_set_leds(atkbd); if (test_and_clear_bit(ATKBD_REP_EVENT_BIT, &atkbd->event_mask)) atkbd_set_repeat_rate(atkbd); } mutex_unlock(&atkbd->mutex); } /* * Schedule switch for execution. We need to throttle requests, * otherwise keyboard may become unresponsive. */ static void atkbd_schedule_event_work(struct atkbd *atkbd, int event_bit) { unsigned long delay = msecs_to_jiffies(50); if (time_after(jiffies, atkbd->event_jiffies + delay)) delay = 0; atkbd->event_jiffies = jiffies; set_bit(event_bit, &atkbd->event_mask); mb(); schedule_delayed_work(&atkbd->event_work, delay); } /* * Event callback from the input module. Events that change the state of * the hardware are processed here. If action can not be performed in * interrupt context it is offloaded to atkbd_event_work. */ static int atkbd_event(struct input_dev *dev, unsigned int type, unsigned int code, int value) { struct atkbd *atkbd = input_get_drvdata(dev); if (!atkbd->write) return -1; switch (type) { case EV_LED: atkbd_schedule_event_work(atkbd, ATKBD_LED_EVENT_BIT); return 0; case EV_REP: if (!atkbd->softrepeat) atkbd_schedule_event_work(atkbd, ATKBD_REP_EVENT_BIT); return 0; default: return -1; } } /* * atkbd_enable() signals that interrupt handler is allowed to * generate input events. */ static inline void atkbd_enable(struct atkbd *atkbd) { serio_pause_rx(atkbd->ps2dev.serio); atkbd->enabled = true; serio_continue_rx(atkbd->ps2dev.serio); } /* * atkbd_disable() tells input handler that all incoming data except * for ACKs and command response should be dropped. */ static inline void atkbd_disable(struct atkbd *atkbd) { serio_pause_rx(atkbd->ps2dev.serio); atkbd->enabled = false; serio_continue_rx(atkbd->ps2dev.serio); } static int atkbd_activate(struct atkbd *atkbd) { struct ps2dev *ps2dev = &atkbd->ps2dev; /* * Enable the keyboard to receive keystrokes. */ if (ps2_command(ps2dev, NULL, ATKBD_CMD_ENABLE)) { dev_err(&ps2dev->serio->dev, "Failed to enable keyboard on %s\n", ps2dev->serio->phys); return -1; } return 0; } /* * atkbd_deactivate() resets and disables the keyboard from sending * keystrokes. */ static void atkbd_deactivate(struct atkbd *atkbd) { struct ps2dev *ps2dev = &atkbd->ps2dev; if (ps2_command(ps2dev, NULL, ATKBD_CMD_RESET_DIS)) dev_err(&ps2dev->serio->dev, "Failed to deactivate keyboard on %s\n", ps2dev->serio->phys); } #ifdef CONFIG_X86 static bool atkbd_is_portable_device(void) { static const char * const chassis_types[] = { "8", /* Portable */ "9", /* Laptop */ "10", /* Notebook */ "14", /* Sub-Notebook */ "31", /* Convertible */ "32", /* Detachable */ }; int i; for (i = 0; i < ARRAY_SIZE(chassis_types); i++) if (dmi_match(DMI_CHASSIS_TYPE, chassis_types[i])) return true; return false; } /* * On many modern laptops ATKBD_CMD_GETID may cause problems, on these laptops * the controller is always in translated mode. In this mode mice/touchpads will * not work. So in this case simply assume a keyboard is connected to avoid * confusing some laptop keyboards. * * Skipping ATKBD_CMD_GETID ends up using a fake keyboard id. Using the standard * 0xab83 id is ok in translated mode, only atkbd_select_set() checks atkbd->id * and in translated mode that is a no-op. */ static bool atkbd_skip_getid(struct atkbd *atkbd) { return atkbd->translated && atkbd_is_portable_device(); } #else static inline bool atkbd_skip_getid(struct atkbd *atkbd) { return false; } #endif /* * atkbd_probe() probes for an AT keyboard on a serio port. */ static int atkbd_probe(struct atkbd *atkbd) { struct ps2dev *ps2dev = &atkbd->ps2dev; unsigned char param[2]; /* * Some systems, where the bit-twiddling when testing the io-lines of the * controller may confuse the keyboard need a full reset of the keyboard. On * these systems the BIOS also usually doesn't do it for us. */ if (atkbd_reset) if (ps2_command(ps2dev, NULL, ATKBD_CMD_RESET_BAT)) dev_warn(&ps2dev->serio->dev, "keyboard reset failed on %s\n", ps2dev->serio->phys); if (atkbd_skip_getid(atkbd)) { atkbd->id = 0xab83; goto deactivate_kbd; } /* * Then we check the keyboard ID. We should get 0xab83 under normal conditions. * Some keyboards report different values, but the first byte is always 0xab or * 0xac. Some old AT keyboards don't report anything. If a mouse is connected, this * should make sure we don't try to set the LEDs on it. */ param[0] = param[1] = 0xa5; /* initialize with invalid values */ if (ps2_command(ps2dev, param, ATKBD_CMD_GETID)) { /* * If the get ID command failed, we check if we can at least set * the LEDs on the keyboard. This should work on every keyboard out there. * It also turns the LEDs off, which we want anyway. */ param[0] = 0; if (ps2_command(ps2dev, param, ATKBD_CMD_SETLEDS)) return -1; atkbd->id = 0xabba; return 0; } if (!ps2_is_keyboard_id(param[0])) return -1; atkbd->id = (param[0] << 8) | param[1]; if (atkbd->id == 0xaca1 && atkbd->translated) { dev_err(&ps2dev->serio->dev, "NCD terminal keyboards are only supported on non-translating controllers. " "Use i8042.direct=1 to disable translation.\n"); return -1; } deactivate_kbd: /* * Make sure nothing is coming from the keyboard and disturbs our * internal state. */ if (!atkbd_skip_deactivate) atkbd_deactivate(atkbd); return 0; } /* * atkbd_select_set checks if a keyboard has a working Set 3 support, and * sets it into that. Unfortunately there are keyboards that can be switched * to Set 3, but don't work well in that (BTC Multimedia ...) */ static int atkbd_select_set(struct atkbd *atkbd, int target_set, int allow_extra) { struct ps2dev *ps2dev = &atkbd->ps2dev; unsigned char param[2]; atkbd->extra = false; /* * For known special keyboards we can go ahead and set the correct set. * We check for NCD PS/2 Sun, NorthGate OmniKey 101 and * IBM RapidAccess / IBM EzButton / Chicony KBP-8993 keyboards. */ if (atkbd->translated) return 2; if (atkbd->id == 0xaca1) { param[0] = 3; ps2_command(ps2dev, param, ATKBD_CMD_SSCANSET); return 3; } if (allow_extra) { param[0] = 0x71; if (!ps2_command(ps2dev, param, ATKBD_CMD_EX_ENABLE)) { atkbd->extra = true; return 2; } } if (atkbd_terminal) { ps2_command(ps2dev, param, ATKBD_CMD_SETALL_MB); return 3; } if (target_set != 3) return 2; if (!ps2_command(ps2dev, param, ATKBD_CMD_OK_GETID)) { atkbd->id = param[0] << 8 | param[1]; return 2; } param[0] = 3; if (ps2_command(ps2dev, param, ATKBD_CMD_SSCANSET)) return 2; param[0] = 0; if (ps2_command(ps2dev, param, ATKBD_CMD_GSCANSET)) return 2; if (param[0] != 3) { param[0] = 2; if (ps2_command(ps2dev, param, ATKBD_CMD_SSCANSET)) return 2; } ps2_command(ps2dev, param, ATKBD_CMD_SETALL_MBR); return 3; } static int atkbd_reset_state(struct atkbd *atkbd) { struct ps2dev *ps2dev = &atkbd->ps2dev; unsigned char param[1]; /* * Set the LEDs to a predefined state (all off). */ param[0] = 0; if (ps2_command(ps2dev, param, ATKBD_CMD_SETLEDS)) return -1; /* * Set autorepeat to fastest possible. */ param[0] = 0; if (ps2_command(ps2dev, param, ATKBD_CMD_SETREP)) return -1; return 0; } /* * atkbd_cleanup() restores the keyboard state so that BIOS is happy after a * reboot. */ static void atkbd_cleanup(struct serio *serio) { struct atkbd *atkbd = atkbd_from_serio(serio); atkbd_disable(atkbd); ps2_command(&atkbd->ps2dev, NULL, ATKBD_CMD_RESET_DEF); } /* * atkbd_disconnect() closes and frees. */ static void atkbd_disconnect(struct serio *serio) { struct atkbd *atkbd = atkbd_from_serio(serio); atkbd_disable(atkbd); input_unregister_device(atkbd->dev); /* * Make sure we don't have a command in flight. * Note that since atkbd->enabled is false event work will keep * rescheduling itself until it gets canceled and will not try * accessing freed input device or serio port. */ cancel_delayed_work_sync(&atkbd->event_work); serio_close(serio); serio_set_drvdata(serio, NULL); kfree(atkbd); } /* * generate release events for the keycodes given in data */ static void atkbd_apply_forced_release_keylist(struct atkbd* atkbd, const void *data) { const unsigned int *keys = data; unsigned int i; if (atkbd->set == 2) for (i = 0; keys[i] != -1U; i++) __set_bit(keys[i], atkbd->force_release_mask); } /* * Most special keys (Fn+F?) on Dell laptops do not generate release * events so we have to do it ourselves. */ static unsigned int atkbd_dell_laptop_forced_release_keys[] = { 0x85, 0x86, 0x87, 0x88, 0x89, 0x8a, 0x8b, 0x8f, 0x93, -1U }; /* * Perform fixup for HP system that doesn't generate release * for its video switch */ static unsigned int atkbd_hp_forced_release_keys[] = { 0x94, -1U }; /* * Samsung NC10,NC20 with Fn+F? key release not working */ static unsigned int atkbd_samsung_forced_release_keys[] = { 0x82, 0x83, 0x84, 0x86, 0x88, 0x89, 0xb3, 0xf7, 0xf9, -1U }; /* * Amilo Pi 3525 key release for Fn+Volume keys not working */ static unsigned int atkbd_amilo_pi3525_forced_release_keys[] = { 0x20, 0xa0, 0x2e, 0xae, 0x30, 0xb0, -1U }; /* * Amilo Xi 3650 key release for light touch bar not working */ static unsigned int atkbd_amilo_xi3650_forced_release_keys[] = { 0x67, 0xed, 0x90, 0xa2, 0x99, 0xa4, 0xae, 0xb0, -1U }; /* * Soltech TA12 system with broken key release on volume keys and mute key */ static unsigned int atkdb_soltech_ta12_forced_release_keys[] = { 0xa0, 0xae, 0xb0, -1U }; /* * Many notebooks don't send key release event for volume up/down * keys, with key list below common among them */ static unsigned int atkbd_volume_forced_release_keys[] = { 0xae, 0xb0, -1U }; /* * OQO 01+ multimedia keys (64--66) generate e0 6x upon release whereas * they should be generating e4-e6 (0x80 | code). */ static unsigned int atkbd_oqo_01plus_scancode_fixup(struct atkbd *atkbd, unsigned int code) { if (atkbd->translated && atkbd->emul == 1 && (code == 0x64 || code == 0x65 || code == 0x66)) { atkbd->emul = 0; code |= 0x80; } return code; } static int atkbd_get_keymap_from_fwnode(struct atkbd *atkbd) { struct device *dev = &atkbd->ps2dev.serio->dev; int i, n; u32 *ptr; u16 scancode, keycode; /* Parse "linux,keymap" property */ n = device_property_count_u32(dev, "linux,keymap"); if (n <= 0 || n > ATKBD_KEYMAP_SIZE) return -ENXIO; ptr = kcalloc(n, sizeof(u32), GFP_KERNEL); if (!ptr) return -ENOMEM; if (device_property_read_u32_array(dev, "linux,keymap", ptr, n)) { dev_err(dev, "problem parsing FW keymap property\n"); kfree(ptr); return -EINVAL; } memset(atkbd->keycode, 0, sizeof(atkbd->keycode)); for (i = 0; i < n; i++) { scancode = SCANCODE(ptr[i]); keycode = KEYCODE(ptr[i]); atkbd->keycode[scancode] = keycode; } kfree(ptr); return 0; } /* * atkbd_set_keycode_table() initializes keyboard's keycode table * according to the selected scancode set */ static void atkbd_set_keycode_table(struct atkbd *atkbd) { struct device *dev = &atkbd->ps2dev.serio->dev; unsigned int scancode; int i, j; memset(atkbd->keycode, 0, sizeof(atkbd->keycode)); bitmap_zero(atkbd->force_release_mask, ATKBD_KEYMAP_SIZE); if (!atkbd_get_keymap_from_fwnode(atkbd)) { dev_dbg(dev, "Using FW keymap\n"); } else if (atkbd->translated) { for (i = 0; i < 128; i++) { scancode = atkbd_unxlate_table[i]; atkbd->keycode[i] = atkbd_set2_keycode[scancode]; atkbd->keycode[i | 0x80] = atkbd_set2_keycode[scancode | 0x80]; if (atkbd->scroll) for (j = 0; j < ARRAY_SIZE(atkbd_scroll_keys); j++) if ((scancode | 0x80) == atkbd_scroll_keys[j].set2) atkbd->keycode[i | 0x80] = atkbd_scroll_keys[j].keycode; } } else if (atkbd->set == 3) { memcpy(atkbd->keycode, atkbd_set3_keycode, sizeof(atkbd->keycode)); } else { memcpy(atkbd->keycode, atkbd_set2_keycode, sizeof(atkbd->keycode)); if (atkbd->scroll) for (i = 0; i < ARRAY_SIZE(atkbd_scroll_keys); i++) { scancode = atkbd_scroll_keys[i].set2; atkbd->keycode[scancode] = atkbd_scroll_keys[i].keycode; } } /* * HANGEUL and HANJA keys do not send release events so we need to * generate such events ourselves */ scancode = atkbd_compat_scancode(atkbd, ATKBD_RET_HANGEUL); atkbd->keycode[scancode] = KEY_HANGEUL; __set_bit(scancode, atkbd->force_release_mask); scancode = atkbd_compat_scancode(atkbd, ATKBD_RET_HANJA); atkbd->keycode[scancode] = KEY_HANJA; __set_bit(scancode, atkbd->force_release_mask); /* * Perform additional fixups */ if (atkbd_platform_fixup) atkbd_platform_fixup(atkbd, atkbd_platform_fixup_data); } /* * atkbd_set_device_attrs() sets up keyboard's input device structure */ static void atkbd_set_device_attrs(struct atkbd *atkbd) { struct input_dev *input_dev = atkbd->dev; int i; if (atkbd->extra) snprintf(atkbd->name, sizeof(atkbd->name), "AT Set 2 Extra keyboard"); else snprintf(atkbd->name, sizeof(atkbd->name), "AT %s Set %d keyboard", atkbd->translated ? "Translated" : "Raw", atkbd->set); snprintf(atkbd->phys, sizeof(atkbd->phys), "%s/input0", atkbd->ps2dev.serio->phys); input_dev->name = atkbd->name; input_dev->phys = atkbd->phys; input_dev->id.bustype = BUS_I8042; input_dev->id.vendor = 0x0001; input_dev->id.product = atkbd->translated ? 1 : atkbd->set; input_dev->id.version = atkbd->id; input_dev->event = atkbd_event; input_dev->dev.parent = &atkbd->ps2dev.serio->dev; input_set_drvdata(input_dev, atkbd); input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REP) | BIT_MASK(EV_MSC); if (atkbd->write) { input_dev->evbit[0] |= BIT_MASK(EV_LED); input_dev->ledbit[0] = BIT_MASK(LED_NUML) | BIT_MASK(LED_CAPSL) | BIT_MASK(LED_SCROLLL); } if (atkbd->extra) input_dev->ledbit[0] |= BIT_MASK(LED_COMPOSE) | BIT_MASK(LED_SUSPEND) | BIT_MASK(LED_SLEEP) | BIT_MASK(LED_MUTE) | BIT_MASK(LED_MISC); if (!atkbd->softrepeat) { input_dev->rep[REP_DELAY] = 250; input_dev->rep[REP_PERIOD] = 33; } input_dev->mscbit[0] = atkbd->softraw ? BIT_MASK(MSC_SCAN) : BIT_MASK(MSC_RAW) | BIT_MASK(MSC_SCAN); if (atkbd->scroll) { input_dev->evbit[0] |= BIT_MASK(EV_REL); input_dev->relbit[0] = BIT_MASK(REL_WHEEL) | BIT_MASK(REL_HWHEEL); __set_bit(BTN_MIDDLE, input_dev->keybit); } input_dev->keycode = atkbd->keycode; input_dev->keycodesize = sizeof(unsigned short); input_dev->keycodemax = ARRAY_SIZE(atkbd_set2_keycode); for (i = 0; i < ATKBD_KEYMAP_SIZE; i++) { if (atkbd->keycode[i] != KEY_RESERVED && atkbd->keycode[i] != ATKBD_KEY_NULL && atkbd->keycode[i] < ATKBD_SPECIAL) { __set_bit(atkbd->keycode[i], input_dev->keybit); } } } static void atkbd_parse_fwnode_data(struct serio *serio) { struct atkbd *atkbd = atkbd_from_serio(serio); struct device *dev = &serio->dev; int n; /* Parse "function-row-physmap" property */ n = device_property_count_u32(dev, "function-row-physmap"); if (n > 0 && n <= VIVALDI_MAX_FUNCTION_ROW_KEYS && !device_property_read_u32_array(dev, "function-row-physmap", atkbd->vdata.function_row_physmap, n)) { atkbd->vdata.num_function_row_keys = n; dev_dbg(dev, "FW reported %d function-row key locations\n", n); } } /* * atkbd_connect() is called when the serio module finds an interface * that isn't handled yet by an appropriate device driver. We check if * there is an AT keyboard out there and if yes, we register ourselves * to the input module. */ static int atkbd_connect(struct serio *serio, struct serio_driver *drv) { struct atkbd *atkbd; struct input_dev *dev; int err = -ENOMEM; atkbd = kzalloc(sizeof(struct atkbd), GFP_KERNEL); dev = input_allocate_device(); if (!atkbd || !dev) goto fail1; atkbd->dev = dev; ps2_init(&atkbd->ps2dev, serio, atkbd_pre_receive_byte, atkbd_receive_byte); INIT_DELAYED_WORK(&atkbd->event_work, atkbd_event_work); mutex_init(&atkbd->mutex); switch (serio->id.type) { case SERIO_8042_XL: atkbd->translated = true; fallthrough; case SERIO_8042: if (serio->write) atkbd->write = true; break; } atkbd->softraw = atkbd_softraw; atkbd->softrepeat = atkbd_softrepeat; atkbd->scroll = atkbd_scroll; if (atkbd->softrepeat) atkbd->softraw = true; serio_set_drvdata(serio, atkbd); err = serio_open(serio, drv); if (err) goto fail2; if (atkbd->write) { if (atkbd_probe(atkbd)) { err = -ENODEV; goto fail3; } atkbd->set = atkbd_select_set(atkbd, atkbd_set, atkbd_extra); atkbd_reset_state(atkbd); } else { atkbd->set = 2; atkbd->id = 0xab00; } atkbd_parse_fwnode_data(serio); atkbd_set_keycode_table(atkbd); atkbd_set_device_attrs(atkbd); atkbd_enable(atkbd); if (serio->write) atkbd_activate(atkbd); err = input_register_device(atkbd->dev); if (err) goto fail3; return 0; fail3: serio_close(serio); fail2: serio_set_drvdata(serio, NULL); fail1: input_free_device(dev); kfree(atkbd); return err; } /* * atkbd_reconnect() tries to restore keyboard into a sane state and is * most likely called on resume. */ static int atkbd_reconnect(struct serio *serio) { struct atkbd *atkbd = atkbd_from_serio(serio); struct serio_driver *drv = serio->drv; int retval = -1; if (!atkbd || !drv) { dev_dbg(&serio->dev, "reconnect request, but serio is disconnected, ignoring...\n"); return -1; } mutex_lock(&atkbd->mutex); atkbd_disable(atkbd); if (atkbd->write) { if (atkbd_probe(atkbd)) goto out; if (atkbd->set != atkbd_select_set(atkbd, atkbd->set, atkbd->extra)) goto out; /* * Restore LED state and repeat rate. While input core * will do this for us at resume time reconnect may happen * because user requested it via sysfs or simply because * keyboard was unplugged and plugged in again so we need * to do it ourselves here. */ atkbd_set_leds(atkbd); if (!atkbd->softrepeat) atkbd_set_repeat_rate(atkbd); } /* * Reset our state machine in case reconnect happened in the middle * of multi-byte scancode. */ atkbd->xl_bit = 0; atkbd->emul = 0; atkbd_enable(atkbd); if (atkbd->write) atkbd_activate(atkbd); retval = 0; out: mutex_unlock(&atkbd->mutex); return retval; } static const struct serio_device_id atkbd_serio_ids[] = { { .type = SERIO_8042, .proto = SERIO_ANY, .id = SERIO_ANY, .extra = SERIO_ANY, }, { .type = SERIO_8042_XL, .proto = SERIO_ANY, .id = SERIO_ANY, .extra = SERIO_ANY, }, { .type = SERIO_RS232, .proto = SERIO_PS2SER, .id = SERIO_ANY, .extra = SERIO_ANY, }, { 0 } }; MODULE_DEVICE_TABLE(serio, atkbd_serio_ids); static struct serio_driver atkbd_drv = { .driver = { .name = "atkbd", .dev_groups = atkbd_attribute_groups, }, .description = DRIVER_DESC, .id_table = atkbd_serio_ids, .interrupt = ps2_interrupt, .connect = atkbd_connect, .reconnect = atkbd_reconnect, .disconnect = atkbd_disconnect, .cleanup = atkbd_cleanup, }; static ssize_t atkbd_attr_show_helper(struct device *dev, char *buf, ssize_t (*handler)(struct atkbd *, char *)) { struct serio *serio = to_serio_port(dev); struct atkbd *atkbd = atkbd_from_serio(serio); return handler(atkbd, buf); } static ssize_t atkbd_attr_set_helper(struct device *dev, const char *buf, size_t count, ssize_t (*handler)(struct atkbd *, const char *, size_t)) { struct serio *serio = to_serio_port(dev); struct atkbd *atkbd = atkbd_from_serio(serio); int retval; retval = mutex_lock_interruptible(&atkbd->mutex); if (retval) return retval; atkbd_disable(atkbd); retval = handler(atkbd, buf, count); atkbd_enable(atkbd); mutex_unlock(&atkbd->mutex); return retval; } static ssize_t atkbd_show_extra(struct atkbd *atkbd, char *buf) { return sprintf(buf, "%d\n", atkbd->extra ? 1 : 0); } static ssize_t atkbd_set_extra(struct atkbd *atkbd, const char *buf, size_t count) { struct input_dev *old_dev, *new_dev; unsigned int value; int err; bool old_extra; unsigned char old_set; if (!atkbd->write) return -EIO; err = kstrtouint(buf, 10, &value); if (err) return err; if (value > 1) return -EINVAL; if (atkbd->extra != value) { /* * Since device's properties will change we need to * unregister old device. But allocate and register * new one first to make sure we have it. */ old_dev = atkbd->dev; old_extra = atkbd->extra; old_set = atkbd->set; new_dev = input_allocate_device(); if (!new_dev) return -ENOMEM; atkbd->dev = new_dev; atkbd->set = atkbd_select_set(atkbd, atkbd->set, value); atkbd_reset_state(atkbd); atkbd_activate(atkbd); atkbd_set_keycode_table(atkbd); atkbd_set_device_attrs(atkbd); err = input_register_device(atkbd->dev); if (err) { input_free_device(new_dev); atkbd->dev = old_dev; atkbd->set = atkbd_select_set(atkbd, old_set, old_extra); atkbd_set_keycode_table(atkbd); atkbd_set_device_attrs(atkbd); return err; } input_unregister_device(old_dev); } return count; } static ssize_t atkbd_show_force_release(struct atkbd *atkbd, char *buf) { size_t len = scnprintf(buf, PAGE_SIZE - 1, "%*pbl", ATKBD_KEYMAP_SIZE, atkbd->force_release_mask); buf[len++] = '\n'; buf[len] = '\0'; return len; } static ssize_t atkbd_set_force_release(struct atkbd *atkbd, const char *buf, size_t count) { /* 64 bytes on stack should be acceptable */ DECLARE_BITMAP(new_mask, ATKBD_KEYMAP_SIZE); int err; err = bitmap_parselist(buf, new_mask, ATKBD_KEYMAP_SIZE); if (err) return err; memcpy(atkbd->force_release_mask, new_mask, sizeof(atkbd->force_release_mask)); return count; } static ssize_t atkbd_show_scroll(struct atkbd *atkbd, char *buf) { return sprintf(buf, "%d\n", atkbd->scroll ? 1 : 0); } static ssize_t atkbd_set_scroll(struct atkbd *atkbd, const char *buf, size_t count) { struct input_dev *old_dev, *new_dev; unsigned int value; int err; bool old_scroll; err = kstrtouint(buf, 10, &value); if (err) return err; if (value > 1) return -EINVAL; if (atkbd->scroll != value) { old_dev = atkbd->dev; old_scroll = atkbd->scroll; new_dev = input_allocate_device(); if (!new_dev) return -ENOMEM; atkbd->dev = new_dev; atkbd->scroll = value; atkbd_set_keycode_table(atkbd); atkbd_set_device_attrs(atkbd); err = input_register_device(atkbd->dev); if (err) { input_free_device(new_dev); atkbd->scroll = old_scroll; atkbd->dev = old_dev; atkbd_set_keycode_table(atkbd); atkbd_set_device_attrs(atkbd); return err; } input_unregister_device(old_dev); } return count; } static ssize_t atkbd_show_set(struct atkbd *atkbd, char *buf) { return sprintf(buf, "%d\n", atkbd->set); } static ssize_t atkbd_set_set(struct atkbd *atkbd, const char *buf, size_t count) { struct input_dev *old_dev, *new_dev; unsigned int value; int err; unsigned char old_set; bool old_extra; if (!atkbd->write) return -EIO; err = kstrtouint(buf, 10, &value); if (err) return err; if (value != 2 && value != 3) return -EINVAL; if (atkbd->set != value) { old_dev = atkbd->dev; old_extra = atkbd->extra; old_set = atkbd->set; new_dev = input_allocate_device(); if (!new_dev) return -ENOMEM; atkbd->dev = new_dev; atkbd->set = atkbd_select_set(atkbd, value, atkbd->extra); atkbd_reset_state(atkbd); atkbd_activate(atkbd); atkbd_set_keycode_table(atkbd); atkbd_set_device_attrs(atkbd); err = input_register_device(atkbd->dev); if (err) { input_free_device(new_dev); atkbd->dev = old_dev; atkbd->set = atkbd_select_set(atkbd, old_set, old_extra); atkbd_set_keycode_table(atkbd); atkbd_set_device_attrs(atkbd); return err; } input_unregister_device(old_dev); } return count; } static ssize_t atkbd_show_softrepeat(struct atkbd *atkbd, char *buf) { return sprintf(buf, "%d\n", atkbd->softrepeat ? 1 : 0); } static ssize_t atkbd_set_softrepeat(struct atkbd *atkbd, const char *buf, size_t count) { struct input_dev *old_dev, *new_dev; unsigned int value; int err; bool old_softrepeat, old_softraw; if (!atkbd->write) return -EIO; err = kstrtouint(buf, 10, &value); if (err) return err; if (value > 1) return -EINVAL; if (atkbd->softrepeat != value) { old_dev = atkbd->dev; old_softrepeat = atkbd->softrepeat; old_softraw = atkbd->softraw; new_dev = input_allocate_device(); if (!new_dev) return -ENOMEM; atkbd->dev = new_dev; atkbd->softrepeat = value; if (atkbd->softrepeat) atkbd->softraw = true; atkbd_set_device_attrs(atkbd); err = input_register_device(atkbd->dev); if (err) { input_free_device(new_dev); atkbd->dev = old_dev; atkbd->softrepeat = old_softrepeat; atkbd->softraw = old_softraw; atkbd_set_device_attrs(atkbd); return err; } input_unregister_device(old_dev); } return count; } static ssize_t atkbd_show_softraw(struct atkbd *atkbd, char *buf) { return sprintf(buf, "%d\n", atkbd->softraw ? 1 : 0); } static ssize_t atkbd_set_softraw(struct atkbd *atkbd, const char *buf, size_t count) { struct input_dev *old_dev, *new_dev; unsigned int value; int err; bool old_softraw; err = kstrtouint(buf, 10, &value); if (err) return err; if (value > 1) return -EINVAL; if (atkbd->softraw != value) { old_dev = atkbd->dev; old_softraw = atkbd->softraw; new_dev = input_allocate_device(); if (!new_dev) return -ENOMEM; atkbd->dev = new_dev; atkbd->softraw = value; atkbd_set_device_attrs(atkbd); err = input_register_device(atkbd->dev); if (err) { input_free_device(new_dev); atkbd->dev = old_dev; atkbd->softraw = old_softraw; atkbd_set_device_attrs(atkbd); return err; } input_unregister_device(old_dev); } return count; } static ssize_t atkbd_show_err_count(struct atkbd *atkbd, char *buf) { return sprintf(buf, "%lu\n", atkbd->err_count); } static int __init atkbd_setup_forced_release(const struct dmi_system_id *id) { atkbd_platform_fixup = atkbd_apply_forced_release_keylist; atkbd_platform_fixup_data = id->driver_data; return 1; } static int __init atkbd_setup_scancode_fixup(const struct dmi_system_id *id) { atkbd_platform_scancode_fixup = id->driver_data; return 1; } static int __init atkbd_deactivate_fixup(const struct dmi_system_id *id) { atkbd_skip_deactivate = true; return 1; } /* * NOTE: do not add any more "force release" quirks to this table. The * task of adjusting list of keys that should be "released" automatically * by the driver is now delegated to userspace tools, such as udev, so * submit such quirks there. */ static const struct dmi_system_id atkbd_dmi_quirk_table[] __initconst = { { .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), DMI_MATCH(DMI_CHASSIS_TYPE, "8"), /* Portable */ }, .callback = atkbd_setup_forced_release, .driver_data = atkbd_dell_laptop_forced_release_keys, }, { .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Dell Computer Corporation"), DMI_MATCH(DMI_CHASSIS_TYPE, "8"), /* Portable */ }, .callback = atkbd_setup_forced_release, .driver_data = atkbd_dell_laptop_forced_release_keys, }, { .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), DMI_MATCH(DMI_PRODUCT_NAME, "HP 2133"), }, .callback = atkbd_setup_forced_release, .driver_data = atkbd_hp_forced_release_keys, }, { .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), DMI_MATCH(DMI_PRODUCT_NAME, "Pavilion ZV6100"), }, .callback = atkbd_setup_forced_release, .driver_data = atkbd_volume_forced_release_keys, }, { .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), DMI_MATCH(DMI_PRODUCT_NAME, "Presario R4000"), }, .callback = atkbd_setup_forced_release, .driver_data = atkbd_volume_forced_release_keys, }, { .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), DMI_MATCH(DMI_PRODUCT_NAME, "Presario R4100"), }, .callback = atkbd_setup_forced_release, .driver_data = atkbd_volume_forced_release_keys, }, { .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), DMI_MATCH(DMI_PRODUCT_NAME, "Presario R4200"), }, .callback = atkbd_setup_forced_release, .driver_data = atkbd_volume_forced_release_keys, }, { /* Inventec Symphony */ .matches = { DMI_MATCH(DMI_SYS_VENDOR, "INVENTEC"), DMI_MATCH(DMI_PRODUCT_NAME, "SYMPHONY 6.0/7.0"), }, .callback = atkbd_setup_forced_release, .driver_data = atkbd_volume_forced_release_keys, }, { /* Samsung NC10 */ .matches = { DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."), DMI_MATCH(DMI_PRODUCT_NAME, "NC10"), }, .callback = atkbd_setup_forced_release, .driver_data = atkbd_samsung_forced_release_keys, }, { /* Samsung NC20 */ .matches = { DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."), DMI_MATCH(DMI_PRODUCT_NAME, "NC20"), }, .callback = atkbd_setup_forced_release, .driver_data = atkbd_samsung_forced_release_keys, }, { /* Samsung SQ45S70S */ .matches = { DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."), DMI_MATCH(DMI_PRODUCT_NAME, "SQ45S70S"), }, .callback = atkbd_setup_forced_release, .driver_data = atkbd_samsung_forced_release_keys, }, { /* Fujitsu Amilo PA 1510 */ .matches = { DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"), DMI_MATCH(DMI_PRODUCT_NAME, "AMILO Pa 1510"), }, .callback = atkbd_setup_forced_release, .driver_data = atkbd_volume_forced_release_keys, }, { /* Fujitsu Amilo Pi 3525 */ .matches = { DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"), DMI_MATCH(DMI_PRODUCT_NAME, "AMILO Pi 3525"), }, .callback = atkbd_setup_forced_release, .driver_data = atkbd_amilo_pi3525_forced_release_keys, }, { /* Fujitsu Amilo Xi 3650 */ .matches = { DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"), DMI_MATCH(DMI_PRODUCT_NAME, "AMILO Xi 3650"), }, .callback = atkbd_setup_forced_release, .driver_data = atkbd_amilo_xi3650_forced_release_keys, }, { .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Soltech Corporation"), DMI_MATCH(DMI_PRODUCT_NAME, "TA12"), }, .callback = atkbd_setup_forced_release, .driver_data = atkdb_soltech_ta12_forced_release_keys, }, { /* OQO Model 01+ */ .matches = { DMI_MATCH(DMI_SYS_VENDOR, "OQO"), DMI_MATCH(DMI_PRODUCT_NAME, "ZEPTO"), }, .callback = atkbd_setup_scancode_fixup, .driver_data = atkbd_oqo_01plus_scancode_fixup, }, { .matches = { DMI_MATCH(DMI_SYS_VENDOR, "LG Electronics"), }, .callback = atkbd_deactivate_fixup, }, { } }; static int __init atkbd_init(void) { dmi_check_system(atkbd_dmi_quirk_table); return serio_register_driver(&atkbd_drv); } static void __exit atkbd_exit(void) { serio_unregister_driver(&atkbd_drv); } module_init(atkbd_init); module_exit(atkbd_exit); |
1 1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 | /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved. */ #ifndef __INCORE_DOT_H__ #define __INCORE_DOT_H__ #include <linux/fs.h> #include <linux/kobject.h> #include <linux/workqueue.h> #include <linux/dlm.h> #include <linux/buffer_head.h> #include <linux/rcupdate.h> #include <linux/rculist_bl.h> #include <linux/completion.h> #include <linux/rbtree.h> #include <linux/ktime.h> #include <linux/percpu.h> #include <linux/lockref.h> #include <linux/rhashtable.h> #include <linux/mutex.h> #define DIO_WAIT 0x00000010 #define DIO_METADATA 0x00000020 struct gfs2_log_operations; struct gfs2_bufdata; struct gfs2_holder; struct gfs2_glock; struct gfs2_quota_data; struct gfs2_trans; struct gfs2_jdesc; struct gfs2_sbd; struct lm_lockops; typedef void (*gfs2_glop_bh_t) (struct gfs2_glock *gl, unsigned int ret); struct gfs2_log_header_host { u64 lh_sequence; /* Sequence number of this transaction */ u32 lh_flags; /* GFS2_LOG_HEAD_... */ u32 lh_tail; /* Block number of log tail */ u32 lh_blkno; s64 lh_local_total; s64 lh_local_free; s64 lh_local_dinodes; }; /* * Structure of operations that are associated with each * type of element in the log. */ struct gfs2_log_operations { void (*lo_before_commit) (struct gfs2_sbd *sdp, struct gfs2_trans *tr); void (*lo_after_commit) (struct gfs2_sbd *sdp, struct gfs2_trans *tr); void (*lo_before_scan) (struct gfs2_jdesc *jd, struct gfs2_log_header_host *head, int pass); int (*lo_scan_elements) (struct gfs2_jdesc *jd, unsigned int start, struct gfs2_log_descriptor *ld, __be64 *ptr, int pass); void (*lo_after_scan) (struct gfs2_jdesc *jd, int error, int pass); const char *lo_name; }; #define GBF_FULL 1 /** * Clone bitmaps (bi_clone): * * - When a block is freed, we remember the previous state of the block in the * clone bitmap, and only mark the block as free in the real bitmap. * * - When looking for a block to allocate, we check for a free block in the * clone bitmap, and if no clone bitmap exists, in the real bitmap. * * - For allocating a block, we mark it as allocated in the real bitmap, and if * a clone bitmap exists, also in the clone bitmap. * * - At the end of a log_flush, we copy the real bitmap into the clone bitmap * to make the clone bitmap reflect the current allocation state. * (Alternatively, we could remove the clone bitmap.) * * The clone bitmaps are in-core only, and is never written to disk. * * These steps ensure that blocks which have been freed in a transaction cannot * be reallocated in that same transaction. */ struct gfs2_bitmap { struct buffer_head *bi_bh; char *bi_clone; unsigned long bi_flags; u32 bi_offset; u32 bi_start; u32 bi_bytes; u32 bi_blocks; }; struct gfs2_rgrpd { struct rb_node rd_node; /* Link with superblock */ struct gfs2_glock *rd_gl; /* Glock for this rgrp */ u64 rd_addr; /* grp block disk address */ u64 rd_data0; /* first data location */ u32 rd_length; /* length of rgrp header in fs blocks */ u32 rd_data; /* num of data blocks in rgrp */ u32 rd_bitbytes; /* number of bytes in data bitmaps */ u32 rd_free; u32 rd_requested; /* number of blocks in rd_rstree */ u32 rd_reserved; /* number of reserved blocks */ u32 rd_free_clone; u32 rd_dinodes; u64 rd_igeneration; struct gfs2_bitmap *rd_bits; struct gfs2_sbd *rd_sbd; struct gfs2_rgrp_lvb *rd_rgl; u32 rd_last_alloc; u32 rd_flags; u32 rd_extfail_pt; /* extent failure point */ #define GFS2_RDF_CHECK 0x10000000 /* check for unlinked inodes */ #define GFS2_RDF_ERROR 0x40000000 /* error in rg */ #define GFS2_RDF_PREFERRED 0x80000000 /* This rgrp is preferred */ #define GFS2_RDF_MASK 0xf0000000 /* mask for internal flags */ spinlock_t rd_rsspin; /* protects reservation related vars */ struct mutex rd_mutex; struct rb_root rd_rstree; /* multi-block reservation tree */ }; enum gfs2_state_bits { BH_Pinned = BH_PrivateStart, BH_Escaped = BH_PrivateStart + 1, }; BUFFER_FNS(Pinned, pinned) TAS_BUFFER_FNS(Pinned, pinned) BUFFER_FNS(Escaped, escaped) TAS_BUFFER_FNS(Escaped, escaped) struct gfs2_bufdata { struct buffer_head *bd_bh; struct gfs2_glock *bd_gl; u64 bd_blkno; struct list_head bd_list; struct gfs2_trans *bd_tr; struct list_head bd_ail_st_list; struct list_head bd_ail_gl_list; }; /* * Internally, we prefix things with gdlm_ and GDLM_ (for gfs-dlm) since a * prefix of lock_dlm_ gets awkward. */ #define GDLM_STRNAME_BYTES 25 #define GDLM_LVB_SIZE 32 /* * ls_recover_flags: * * DFL_BLOCK_LOCKS: dlm is in recovery and will grant locks that had been * held by failed nodes whose journals need recovery. Those locks should * only be used for journal recovery until the journal recovery is done. * This is set by the dlm recover_prep callback and cleared by the * gfs2_control thread when journal recovery is complete. To avoid * races between recover_prep setting and gfs2_control clearing, recover_spin * is held while changing this bit and reading/writing recover_block * and recover_start. * * DFL_NO_DLM_OPS: dlm lockspace ops/callbacks are not being used. * * DFL_FIRST_MOUNT: this node is the first to mount this fs and is doing * recovery of all journals before allowing other nodes to mount the fs. * This is cleared when FIRST_MOUNT_DONE is set. * * DFL_FIRST_MOUNT_DONE: this node was the first mounter, and has finished * recovery of all journals, and now allows other nodes to mount the fs. * * DFL_MOUNT_DONE: gdlm_mount has completed successfully and cleared * BLOCK_LOCKS for the first time. The gfs2_control thread should now * control clearing BLOCK_LOCKS for further recoveries. * * DFL_UNMOUNT: gdlm_unmount sets to keep sdp off gfs2_control_wq. * * DFL_DLM_RECOVERY: set while dlm is in recovery, between recover_prep() * and recover_done(), i.e. set while recover_block == recover_start. */ enum { DFL_BLOCK_LOCKS = 0, DFL_NO_DLM_OPS = 1, DFL_FIRST_MOUNT = 2, DFL_FIRST_MOUNT_DONE = 3, DFL_MOUNT_DONE = 4, DFL_UNMOUNT = 5, DFL_DLM_RECOVERY = 6, }; /* * We are using struct lm_lockname as an rhashtable key. Avoid holes within * the struct; padding at the end is fine. */ struct lm_lockname { u64 ln_number; struct gfs2_sbd *ln_sbd; unsigned int ln_type; }; #define lm_name_equal(name1, name2) \ (((name1)->ln_number == (name2)->ln_number) && \ ((name1)->ln_type == (name2)->ln_type) && \ ((name1)->ln_sbd == (name2)->ln_sbd)) struct gfs2_glock_operations { int (*go_sync) (struct gfs2_glock *gl); int (*go_xmote_bh)(struct gfs2_glock *gl); void (*go_inval) (struct gfs2_glock *gl, int flags); int (*go_demote_ok) (const struct gfs2_glock *gl); int (*go_instantiate) (struct gfs2_glock *gl); int (*go_held)(struct gfs2_holder *gh); void (*go_dump)(struct seq_file *seq, const struct gfs2_glock *gl, const char *fs_id_buf); void (*go_callback)(struct gfs2_glock *gl, bool remote); void (*go_free)(struct gfs2_glock *gl); const int go_subclass; const int go_type; const unsigned long go_flags; #define GLOF_ASPACE 1 /* address space attached */ #define GLOF_LVB 2 /* Lock Value Block attached */ #define GLOF_LRU 4 /* LRU managed */ #define GLOF_NONDISK 8 /* not I/O related */ }; enum { GFS2_LKS_SRTT = 0, /* Non blocking smoothed round trip time */ GFS2_LKS_SRTTVAR = 1, /* Non blocking smoothed variance */ GFS2_LKS_SRTTB = 2, /* Blocking smoothed round trip time */ GFS2_LKS_SRTTVARB = 3, /* Blocking smoothed variance */ GFS2_LKS_SIRT = 4, /* Smoothed Inter-request time */ GFS2_LKS_SIRTVAR = 5, /* Smoothed Inter-request variance */ GFS2_LKS_DCOUNT = 6, /* Count of dlm requests */ GFS2_LKS_QCOUNT = 7, /* Count of gfs2_holder queues */ GFS2_NR_LKSTATS }; struct gfs2_lkstats { u64 stats[GFS2_NR_LKSTATS]; }; enum { /* States */ HIF_HOLDER = 6, /* Set for gh that "holds" the glock */ HIF_WAIT = 10, }; struct gfs2_holder { struct list_head gh_list; struct gfs2_glock *gh_gl; struct pid *gh_owner_pid; u16 gh_flags; u16 gh_state; int gh_error; unsigned long gh_iflags; /* HIF_... */ unsigned long gh_ip; }; /* Number of quota types we support */ #define GFS2_MAXQUOTAS 2 struct gfs2_qadata { /* quota allocation data */ /* Quota stuff */ struct gfs2_quota_data *qa_qd[2 * GFS2_MAXQUOTAS]; struct gfs2_holder qa_qd_ghs[2 * GFS2_MAXQUOTAS]; unsigned int qa_qd_num; int qa_ref; }; /* Resource group multi-block reservation, in order of appearance: Step 1. Function prepares to write, allocates a mb, sets the size hint. Step 2. User calls inplace_reserve to target an rgrp, sets the rgrp info Step 3. Function get_local_rgrp locks the rgrp, determines which bits to use Step 4. Bits are assigned from the rgrp based on either the reservation or wherever it can. */ struct gfs2_blkreserv { struct rb_node rs_node; /* node within rd_rstree */ struct gfs2_rgrpd *rs_rgd; u64 rs_start; u32 rs_requested; u32 rs_reserved; /* number of reserved blocks */ }; /* * Allocation parameters * @target: The number of blocks we'd ideally like to allocate * @aflags: The flags (e.g. Orlov flag) * * The intent is to gradually expand this structure over time in * order to give more information, e.g. alignment, min extent size * to the allocation code. */ struct gfs2_alloc_parms { u64 target; u32 min_target; u32 aflags; u64 allowed; }; enum { GLF_LOCK = 1, GLF_INSTANTIATE_NEEDED = 2, /* needs instantiate */ GLF_DEMOTE = 3, GLF_PENDING_DEMOTE = 4, GLF_DEMOTE_IN_PROGRESS = 5, GLF_DIRTY = 6, GLF_LFLUSH = 7, GLF_INVALIDATE_IN_PROGRESS = 8, GLF_REPLY_PENDING = 9, GLF_INITIAL = 10, GLF_FROZEN = 11, GLF_INSTANTIATE_IN_PROG = 12, /* instantiate happening now */ GLF_LRU = 13, GLF_OBJECT = 14, /* Used only for tracing */ GLF_BLOCKING = 15, GLF_FREEING = 16, /* Wait for glock to be freed */ GLF_TRY_TO_EVICT = 17, /* iopen glocks only */ GLF_VERIFY_EVICT = 18, /* iopen glocks only */ }; struct gfs2_glock { unsigned long gl_flags; /* GLF_... */ struct lm_lockname gl_name; struct lockref gl_lockref; /* State fields protected by gl_lockref.lock */ unsigned int gl_state:2, /* Current state */ gl_target:2, /* Target state */ gl_demote_state:2, /* State requested by remote node */ gl_req:2, /* State in last dlm request */ gl_reply:8; /* Last reply from the dlm */ unsigned long gl_demote_time; /* time of first demote request */ long gl_hold_time; struct list_head gl_holders; const struct gfs2_glock_operations *gl_ops; ktime_t gl_dstamp; struct gfs2_lkstats gl_stats; struct dlm_lksb gl_lksb; unsigned long gl_tchange; void *gl_object; struct list_head gl_lru; struct list_head gl_ail_list; atomic_t gl_ail_count; atomic_t gl_revokes; struct delayed_work gl_work; /* For iopen glocks only */ struct { struct delayed_work gl_delete; u64 gl_no_formal_ino; }; struct rcu_head gl_rcu; struct rhash_head gl_node; }; enum { GIF_QD_LOCKED = 1, GIF_ALLOC_FAILED = 2, GIF_SW_PAGED = 3, GIF_FREE_VFS_INODE = 5, GIF_GLOP_PENDING = 6, GIF_DEFERRED_DELETE = 7, }; struct gfs2_inode { struct inode i_inode; u64 i_no_addr; u64 i_no_formal_ino; u64 i_generation; u64 i_eattr; unsigned long i_flags; /* GIF_... */ struct gfs2_glock *i_gl; struct gfs2_holder i_iopen_gh; struct gfs2_qadata *i_qadata; /* quota allocation data */ struct gfs2_holder i_rgd_gh; struct gfs2_blkreserv i_res; /* rgrp multi-block reservation */ u64 i_goal; /* goal block for allocations */ atomic_t i_sizehint; /* hint of the write size */ struct rw_semaphore i_rw_mutex; struct list_head i_ordered; __be64 *i_hash_cache; u32 i_entries; u32 i_diskflags; u8 i_height; u8 i_depth; u16 i_rahead; }; /* * Since i_inode is the first element of struct gfs2_inode, * this is effectively a cast. */ static inline struct gfs2_inode *GFS2_I(struct inode *inode) { return container_of(inode, struct gfs2_inode, i_inode); } static inline struct gfs2_sbd *GFS2_SB(const struct inode *inode) { return inode->i_sb->s_fs_info; } struct gfs2_file { struct mutex f_fl_mutex; struct gfs2_holder f_fl_gh; }; struct gfs2_revoke_replay { struct list_head rr_list; u64 rr_blkno; unsigned int rr_where; }; enum { QDF_CHANGE = 1, QDF_LOCKED = 2, QDF_REFRESH = 3, QDF_QMSG_QUIET = 4, }; struct gfs2_quota_data { struct hlist_bl_node qd_hlist; struct list_head qd_list; struct kqid qd_id; struct gfs2_sbd *qd_sbd; struct lockref qd_lockref; struct list_head qd_lru; unsigned qd_hash; unsigned long qd_flags; /* QDF_... */ s64 qd_change; s64 qd_change_sync; unsigned int qd_slot; unsigned int qd_slot_ref; struct buffer_head *qd_bh; struct gfs2_quota_change *qd_bh_qc; unsigned int qd_bh_count; struct gfs2_glock *qd_gl; struct gfs2_quota_lvb qd_qb; u64 qd_sync_gen; unsigned long qd_last_warn; struct rcu_head qd_rcu; }; enum { TR_TOUCHED = 1, TR_ATTACHED = 2, TR_ONSTACK = 3, }; struct gfs2_trans { unsigned long tr_ip; unsigned int tr_blocks; unsigned int tr_revokes; unsigned int tr_reserved; unsigned long tr_flags; unsigned int tr_num_buf_new; unsigned int tr_num_databuf_new; unsigned int tr_num_buf_rm; unsigned int tr_num_databuf_rm; unsigned int tr_num_revoke; struct list_head tr_list; struct list_head tr_databuf; struct list_head tr_buf; unsigned int tr_first; struct list_head tr_ail1_list; struct list_head tr_ail2_list; }; struct gfs2_journal_extent { struct list_head list; unsigned int lblock; /* First logical block */ u64 dblock; /* First disk block */ u64 blocks; }; struct gfs2_jdesc { struct list_head jd_list; struct list_head extent_list; unsigned int nr_extents; struct work_struct jd_work; struct inode *jd_inode; struct bio *jd_log_bio; unsigned long jd_flags; #define JDF_RECOVERY 1 unsigned int jd_jid; u32 jd_blocks; int jd_recover_error; /* Replay stuff */ unsigned int jd_found_blocks; unsigned int jd_found_revokes; unsigned int jd_replayed_blocks; struct list_head jd_revoke_list; unsigned int jd_replay_tail; u64 jd_no_addr; }; struct gfs2_statfs_change_host { s64 sc_total; s64 sc_free; s64 sc_dinodes; }; #define GFS2_QUOTA_DEFAULT GFS2_QUOTA_OFF #define GFS2_QUOTA_OFF 0 #define GFS2_QUOTA_ACCOUNT 1 #define GFS2_QUOTA_ON 2 #define GFS2_QUOTA_QUIET 3 /* on but not complaining */ #define GFS2_DATA_DEFAULT GFS2_DATA_ORDERED #define GFS2_DATA_WRITEBACK 1 #define GFS2_DATA_ORDERED 2 #define GFS2_ERRORS_DEFAULT GFS2_ERRORS_WITHDRAW #define GFS2_ERRORS_WITHDRAW 0 #define GFS2_ERRORS_CONTINUE 1 /* place holder for future feature */ #define GFS2_ERRORS_RO 2 /* place holder for future feature */ #define GFS2_ERRORS_PANIC 3 struct gfs2_args { char ar_lockproto[GFS2_LOCKNAME_LEN]; /* Name of the Lock Protocol */ char ar_locktable[GFS2_LOCKNAME_LEN]; /* Name of the Lock Table */ char ar_hostdata[GFS2_LOCKNAME_LEN]; /* Host specific data */ unsigned int ar_spectator:1; /* Don't get a journal */ unsigned int ar_localflocks:1; /* Let the VFS do flock|fcntl */ unsigned int ar_debug:1; /* Oops on errors */ unsigned int ar_posix_acl:1; /* Enable posix acls */ unsigned int ar_quota:2; /* off/account/on */ unsigned int ar_suiddir:1; /* suiddir support */ unsigned int ar_data:2; /* ordered/writeback */ unsigned int ar_meta:1; /* mount metafs */ unsigned int ar_discard:1; /* discard requests */ unsigned int ar_errors:2; /* errors=withdraw | panic */ unsigned int ar_nobarrier:1; /* do not send barriers */ unsigned int ar_rgrplvb:1; /* use lvbs for rgrp info */ unsigned int ar_got_rgrplvb:1; /* Was the rgrplvb opt given? */ unsigned int ar_loccookie:1; /* use location based readdir cookies */ s32 ar_commit; /* Commit interval */ s32 ar_statfs_quantum; /* The fast statfs interval */ s32 ar_quota_quantum; /* The quota interval */ s32 ar_statfs_percent; /* The % change to force sync */ }; struct gfs2_tune { spinlock_t gt_spin; unsigned int gt_logd_secs; unsigned int gt_quota_warn_period; /* Secs between quota warn msgs */ unsigned int gt_quota_scale_num; /* Numerator */ unsigned int gt_quota_scale_den; /* Denominator */ unsigned int gt_quota_quantum; /* Secs between syncs to quota file */ unsigned int gt_new_files_jdata; unsigned int gt_max_readahead; /* Max bytes to read-ahead from disk */ unsigned int gt_complain_secs; unsigned int gt_statfs_quantum; unsigned int gt_statfs_slow; }; enum { SDF_JOURNAL_CHECKED = 0, SDF_JOURNAL_LIVE = 1, SDF_WITHDRAWN = 2, SDF_NOBARRIERS = 3, SDF_NORECOVERY = 4, SDF_DEMOTE = 5, SDF_NOJOURNALID = 6, SDF_RORECOVERY = 7, /* read only recovery */ SDF_SKIP_DLM_UNLOCK = 8, SDF_FORCE_AIL_FLUSH = 9, SDF_FREEZE_INITIATOR = 10, SDF_WITHDRAWING = 11, /* Will withdraw eventually */ SDF_WITHDRAW_IN_PROG = 12, /* Withdraw is in progress */ SDF_REMOTE_WITHDRAW = 13, /* Performing remote recovery */ SDF_WITHDRAW_RECOVERY = 14, /* Wait for journal recovery when we are withdrawing */ SDF_KILL = 15, SDF_EVICTING = 16, SDF_FROZEN = 17, }; #define GFS2_FSNAME_LEN 256 struct gfs2_inum_host { u64 no_formal_ino; u64 no_addr; }; struct gfs2_sb_host { u32 sb_magic; u32 sb_type; u32 sb_fs_format; u32 sb_multihost_format; u32 sb_bsize; u32 sb_bsize_shift; struct gfs2_inum_host sb_master_dir; struct gfs2_inum_host sb_root_dir; char sb_lockproto[GFS2_LOCKNAME_LEN]; char sb_locktable[GFS2_LOCKNAME_LEN]; }; /* * lm_mount() return values * * ls_jid - the journal ID this node should use * ls_first - this node is the first to mount the file system * ls_lockspace - lock module's context for this file system * ls_ops - lock module's functions */ struct lm_lockstruct { int ls_jid; unsigned int ls_first; const struct lm_lockops *ls_ops; dlm_lockspace_t *ls_dlm; int ls_recover_jid_done; /* These two are deprecated, */ int ls_recover_jid_status; /* used previously by gfs_controld */ struct dlm_lksb ls_mounted_lksb; /* mounted_lock */ struct dlm_lksb ls_control_lksb; /* control_lock */ char ls_control_lvb[GDLM_LVB_SIZE]; /* control_lock lvb */ struct completion ls_sync_wait; /* {control,mounted}_{lock,unlock} */ char *ls_lvb_bits; spinlock_t ls_recover_spin; /* protects following fields */ unsigned long ls_recover_flags; /* DFL_ */ uint32_t ls_recover_mount; /* gen in first recover_done cb */ uint32_t ls_recover_start; /* gen in last recover_done cb */ uint32_t ls_recover_block; /* copy recover_start in last recover_prep */ uint32_t ls_recover_size; /* size of recover_submit, recover_result */ uint32_t *ls_recover_submit; /* gen in last recover_slot cb per jid */ uint32_t *ls_recover_result; /* result of last jid recovery */ }; struct gfs2_pcpu_lkstats { /* One struct for each glock type */ struct gfs2_lkstats lkstats[10]; }; /* List of local (per node) statfs inodes */ struct local_statfs_inode { struct list_head si_list; struct inode *si_sc_inode; unsigned int si_jid; /* journal id this statfs inode corresponds to */ }; struct gfs2_sbd { struct super_block *sd_vfs; struct gfs2_pcpu_lkstats __percpu *sd_lkstats; struct kobject sd_kobj; struct completion sd_kobj_unregister; unsigned long sd_flags; /* SDF_... */ struct gfs2_sb_host sd_sb; /* Constants computed on mount */ u32 sd_fsb2bb; u32 sd_fsb2bb_shift; u32 sd_diptrs; /* Number of pointers in a dinode */ u32 sd_inptrs; /* Number of pointers in a indirect block */ u32 sd_ldptrs; /* Number of pointers in a log descriptor block */ u32 sd_jbsize; /* Size of a journaled data block */ u32 sd_hash_bsize; /* sizeof(exhash block) */ u32 sd_hash_bsize_shift; u32 sd_hash_ptrs; /* Number of pointers in a hash block */ u32 sd_qc_per_block; u32 sd_blocks_per_bitmap; u32 sd_max_dirres; /* Max blocks needed to add a directory entry */ u32 sd_max_height; /* Max height of a file's metadata tree */ u64 sd_heightsize[GFS2_MAX_META_HEIGHT + 1]; u32 sd_max_dents_per_leaf; /* Max number of dirents in a leaf block */ struct gfs2_args sd_args; /* Mount arguments */ struct gfs2_tune sd_tune; /* Filesystem tuning structure */ /* Lock Stuff */ struct lm_lockstruct sd_lockstruct; struct gfs2_holder sd_live_gh; struct gfs2_glock *sd_rename_gl; struct gfs2_glock *sd_freeze_gl; struct work_struct sd_freeze_work; wait_queue_head_t sd_kill_wait; wait_queue_head_t sd_async_glock_wait; atomic_t sd_glock_disposal; struct completion sd_locking_init; struct completion sd_wdack; struct delayed_work sd_control_work; /* Inode Stuff */ struct dentry *sd_master_dir; struct dentry *sd_root_dir; struct inode *sd_jindex; struct inode *sd_statfs_inode; struct inode *sd_sc_inode; struct list_head sd_sc_inodes_list; struct inode *sd_qc_inode; struct inode *sd_rindex; struct inode *sd_quota_inode; /* StatFS stuff */ spinlock_t sd_statfs_spin; struct gfs2_statfs_change_host sd_statfs_master; struct gfs2_statfs_change_host sd_statfs_local; int sd_statfs_force_sync; /* Resource group stuff */ int sd_rindex_uptodate; spinlock_t sd_rindex_spin; struct rb_root sd_rindex_tree; unsigned int sd_rgrps; unsigned int sd_max_rg_data; /* Journal index stuff */ struct list_head sd_jindex_list; spinlock_t sd_jindex_spin; struct mutex sd_jindex_mutex; unsigned int sd_journals; struct gfs2_jdesc *sd_jdesc; struct gfs2_holder sd_journal_gh; struct gfs2_holder sd_jinode_gh; struct gfs2_glock *sd_jinode_gl; struct gfs2_holder sd_sc_gh; struct buffer_head *sd_sc_bh; struct gfs2_holder sd_qc_gh; struct completion sd_journal_ready; /* Workqueue stuff */ struct workqueue_struct *sd_delete_wq; /* Daemon stuff */ struct task_struct *sd_logd_process; struct task_struct *sd_quotad_process; /* Quota stuff */ struct list_head sd_quota_list; atomic_t sd_quota_count; struct mutex sd_quota_mutex; struct mutex sd_quota_sync_mutex; wait_queue_head_t sd_quota_wait; unsigned int sd_quota_slots; unsigned long *sd_quota_bitmap; spinlock_t sd_bitmap_lock; u64 sd_quota_sync_gen; /* Log stuff */ struct address_space sd_aspace; spinlock_t sd_log_lock; struct gfs2_trans *sd_log_tr; unsigned int sd_log_blks_reserved; atomic_t sd_log_pinned; unsigned int sd_log_num_revoke; struct list_head sd_log_revokes; struct list_head sd_log_ordered; spinlock_t sd_ordered_lock; atomic_t sd_log_thresh1; atomic_t sd_log_thresh2; atomic_t sd_log_blks_free; atomic_t sd_log_blks_needed; atomic_t sd_log_revokes_available; wait_queue_head_t sd_log_waitq; wait_queue_head_t sd_logd_waitq; u64 sd_log_sequence; int sd_log_idle; struct rw_semaphore sd_log_flush_lock; atomic_t sd_log_in_flight; wait_queue_head_t sd_log_flush_wait; int sd_log_error; /* First log error */ wait_queue_head_t sd_withdraw_wait; unsigned int sd_log_tail; unsigned int sd_log_flush_tail; unsigned int sd_log_head; unsigned int sd_log_flush_head; spinlock_t sd_ail_lock; struct list_head sd_ail1_list; struct list_head sd_ail2_list; /* For quiescing the filesystem */ struct gfs2_holder sd_freeze_gh; struct mutex sd_freeze_mutex; char sd_fsname[GFS2_FSNAME_LEN + 3 * sizeof(int) + 2]; char sd_table_name[GFS2_FSNAME_LEN]; char sd_proto_name[GFS2_FSNAME_LEN]; /* Debugging crud */ unsigned long sd_last_warning; struct dentry *debugfs_dir; /* debugfs directory */ unsigned long sd_glock_dqs_held; }; static inline void gfs2_glstats_inc(struct gfs2_glock *gl, int which) { gl->gl_stats.stats[which]++; } static inline void gfs2_sbstats_inc(const struct gfs2_glock *gl, int which) { const struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; preempt_disable(); this_cpu_ptr(sdp->sd_lkstats)->lkstats[gl->gl_name.ln_type].stats[which]++; preempt_enable(); } struct gfs2_rgrpd *gfs2_glock2rgrp(struct gfs2_glock *gl); static inline unsigned gfs2_max_stuffed_size(const struct gfs2_inode *ip) { return GFS2_SB(&ip->i_inode)->sd_sb.sb_bsize - sizeof(struct gfs2_dinode); } #endif /* __INCORE_DOT_H__ */ |
55 55 55 55 54 55 55 55 55 55 55 54 55 53 55 1 53 1 53 58 53 54 53 54 4 1 1 1 57 4 54 38 16 54 14 39 2 2 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 | // SPDX-License-Identifier: GPL-2.0-only /* * kallsyms.c: in-kernel printing of symbolic oopses and stack traces. * * Rewritten and vastly simplified by Rusty Russell for in-kernel * module loader: * Copyright 2002 Rusty Russell <rusty@rustcorp.com.au> IBM Corporation * * ChangeLog: * * (25/Aug/2004) Paulo Marques <pmarques@grupopie.com> * Changed the compression method from stem compression to "table lookup" * compression (see scripts/kallsyms.c for a more complete description) */ #include <linux/kallsyms.h> #include <linux/init.h> #include <linux/seq_file.h> #include <linux/fs.h> #include <linux/kdb.h> #include <linux/err.h> #include <linux/proc_fs.h> #include <linux/sched.h> /* for cond_resched */ #include <linux/ctype.h> #include <linux/slab.h> #include <linux/filter.h> #include <linux/ftrace.h> #include <linux/kprobes.h> #include <linux/build_bug.h> #include <linux/compiler.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/bsearch.h> #include <linux/btf_ids.h> #include "kallsyms_internal.h" /* * Expand a compressed symbol data into the resulting uncompressed string, * if uncompressed string is too long (>= maxlen), it will be truncated, * given the offset to where the symbol is in the compressed stream. */ static unsigned int kallsyms_expand_symbol(unsigned int off, char *result, size_t maxlen) { int len, skipped_first = 0; const char *tptr; const u8 *data; /* Get the compressed symbol length from the first symbol byte. */ data = &kallsyms_names[off]; len = *data; data++; off++; /* If MSB is 1, it is a "big" symbol, so needs an additional byte. */ if ((len & 0x80) != 0) { len = (len & 0x7F) | (*data << 7); data++; off++; } /* * Update the offset to return the offset for the next symbol on * the compressed stream. */ off += len; /* * For every byte on the compressed symbol data, copy the table * entry for that byte. */ while (len) { tptr = &kallsyms_token_table[kallsyms_token_index[*data]]; data++; len--; while (*tptr) { if (skipped_first) { if (maxlen <= 1) goto tail; *result = *tptr; result++; maxlen--; } else skipped_first = 1; tptr++; } } tail: if (maxlen) *result = '\0'; /* Return to offset to the next symbol. */ return off; } /* * Get symbol type information. This is encoded as a single char at the * beginning of the symbol name. */ static char kallsyms_get_symbol_type(unsigned int off) { /* * Get just the first code, look it up in the token table, * and return the first char from this token. */ return kallsyms_token_table[kallsyms_token_index[kallsyms_names[off + 1]]]; } /* * Find the offset on the compressed stream given and index in the * kallsyms array. */ static unsigned int get_symbol_offset(unsigned long pos) { const u8 *name; int i, len; /* * Use the closest marker we have. We have markers every 256 positions, * so that should be close enough. */ name = &kallsyms_names[kallsyms_markers[pos >> 8]]; /* * Sequentially scan all the symbols up to the point we're searching * for. Every symbol is stored in a [<len>][<len> bytes of data] format, * so we just need to add the len to the current pointer for every * symbol we wish to skip. */ for (i = 0; i < (pos & 0xFF); i++) { len = *name; /* * If MSB is 1, it is a "big" symbol, so we need to look into * the next byte (and skip it, too). */ if ((len & 0x80) != 0) len = ((len & 0x7F) | (name[1] << 7)) + 1; name = name + len + 1; } return name - kallsyms_names; } unsigned long kallsyms_sym_address(int idx) { if (!IS_ENABLED(CONFIG_KALLSYMS_BASE_RELATIVE)) return kallsyms_addresses[idx]; /* values are unsigned offsets if --absolute-percpu is not in effect */ if (!IS_ENABLED(CONFIG_KALLSYMS_ABSOLUTE_PERCPU)) return kallsyms_relative_base + (u32)kallsyms_offsets[idx]; /* ...otherwise, positive offsets are absolute values */ if (kallsyms_offsets[idx] >= 0) return kallsyms_offsets[idx]; /* ...and negative offsets are relative to kallsyms_relative_base - 1 */ return kallsyms_relative_base - 1 - kallsyms_offsets[idx]; } static void cleanup_symbol_name(char *s) { char *res; if (!IS_ENABLED(CONFIG_LTO_CLANG)) return; /* * LLVM appends various suffixes for local functions and variables that * must be promoted to global scope as part of LTO. This can break * hooking of static functions with kprobes. '.' is not a valid * character in an identifier in C. Suffixes only in LLVM LTO observed: * - foo.llvm.[0-9a-f]+ */ res = strstr(s, ".llvm."); if (res) *res = '\0'; return; } static int compare_symbol_name(const char *name, char *namebuf) { /* The kallsyms_seqs_of_names is sorted based on names after * cleanup_symbol_name() (see scripts/kallsyms.c) if clang lto is enabled. * To ensure correct bisection in kallsyms_lookup_names(), do * cleanup_symbol_name(namebuf) before comparing name and namebuf. */ cleanup_symbol_name(namebuf); return strcmp(name, namebuf); } static unsigned int get_symbol_seq(int index) { unsigned int i, seq = 0; for (i = 0; i < 3; i++) seq = (seq << 8) | kallsyms_seqs_of_names[3 * index + i]; return seq; } static int kallsyms_lookup_names(const char *name, unsigned int *start, unsigned int *end) { int ret; int low, mid, high; unsigned int seq, off; char namebuf[KSYM_NAME_LEN]; low = 0; high = kallsyms_num_syms - 1; while (low <= high) { mid = low + (high - low) / 2; seq = get_symbol_seq(mid); off = get_symbol_offset(seq); kallsyms_expand_symbol(off, namebuf, ARRAY_SIZE(namebuf)); ret = compare_symbol_name(name, namebuf); if (ret > 0) low = mid + 1; else if (ret < 0) high = mid - 1; else break; } if (low > high) return -ESRCH; low = mid; while (low) { seq = get_symbol_seq(low - 1); off = get_symbol_offset(seq); kallsyms_expand_symbol(off, namebuf, ARRAY_SIZE(namebuf)); if (compare_symbol_name(name, namebuf)) break; low--; } *start = low; if (end) { high = mid; while (high < kallsyms_num_syms - 1) { seq = get_symbol_seq(high + 1); off = get_symbol_offset(seq); kallsyms_expand_symbol(off, namebuf, ARRAY_SIZE(namebuf)); if (compare_symbol_name(name, namebuf)) break; high++; } *end = high; } return 0; } /* Lookup the address for this symbol. Returns 0 if not found. */ unsigned long kallsyms_lookup_name(const char *name) { int ret; unsigned int i; /* Skip the search for empty string. */ if (!*name) return 0; ret = kallsyms_lookup_names(name, &i, NULL); if (!ret) return kallsyms_sym_address(get_symbol_seq(i)); return module_kallsyms_lookup_name(name); } /* * Iterate over all symbols in vmlinux. For symbols from modules use * module_kallsyms_on_each_symbol instead. */ int kallsyms_on_each_symbol(int (*fn)(void *, const char *, unsigned long), void *data) { char namebuf[KSYM_NAME_LEN]; unsigned long i; unsigned int off; int ret; for (i = 0, off = 0; i < kallsyms_num_syms; i++) { off = kallsyms_expand_symbol(off, namebuf, ARRAY_SIZE(namebuf)); ret = fn(data, namebuf, kallsyms_sym_address(i)); if (ret != 0) return ret; cond_resched(); } return 0; } int kallsyms_on_each_match_symbol(int (*fn)(void *, unsigned long), const char *name, void *data) { int ret; unsigned int i, start, end; ret = kallsyms_lookup_names(name, &start, &end); if (ret) return 0; for (i = start; !ret && i <= end; i++) { ret = fn(data, kallsyms_sym_address(get_symbol_seq(i))); cond_resched(); } return ret; } static unsigned long get_symbol_pos(unsigned long addr, unsigned long *symbolsize, unsigned long *offset) { unsigned long symbol_start = 0, symbol_end = 0; unsigned long i, low, high, mid; /* This kernel should never had been booted. */ if (!IS_ENABLED(CONFIG_KALLSYMS_BASE_RELATIVE)) BUG_ON(!kallsyms_addresses); else BUG_ON(!kallsyms_offsets); /* Do a binary search on the sorted kallsyms_addresses array. */ low = 0; high = kallsyms_num_syms; while (high - low > 1) { mid = low + (high - low) / 2; if (kallsyms_sym_address(mid) <= addr) low = mid; else high = mid; } /* * Search for the first aliased symbol. Aliased * symbols are symbols with the same address. */ while (low && kallsyms_sym_address(low-1) == kallsyms_sym_address(low)) --low; symbol_start = kallsyms_sym_address(low); /* Search for next non-aliased symbol. */ for (i = low + 1; i < kallsyms_num_syms; i++) { if (kallsyms_sym_address(i) > symbol_start) { symbol_end = kallsyms_sym_address(i); break; } } /* If we found no next symbol, we use the end of the section. */ if (!symbol_end) { if (is_kernel_inittext(addr)) symbol_end = (unsigned long)_einittext; else if (IS_ENABLED(CONFIG_KALLSYMS_ALL)) symbol_end = (unsigned long)_end; else symbol_end = (unsigned long)_etext; } if (symbolsize) *symbolsize = symbol_end - symbol_start; if (offset) *offset = addr - symbol_start; return low; } /* * Lookup an address but don't bother to find any names. */ int kallsyms_lookup_size_offset(unsigned long addr, unsigned long *symbolsize, unsigned long *offset) { char namebuf[KSYM_NAME_LEN]; if (is_ksym_addr(addr)) { get_symbol_pos(addr, symbolsize, offset); return 1; } return !!module_address_lookup(addr, symbolsize, offset, NULL, NULL, namebuf) || !!__bpf_address_lookup(addr, symbolsize, offset, namebuf); } static const char *kallsyms_lookup_buildid(unsigned long addr, unsigned long *symbolsize, unsigned long *offset, char **modname, const unsigned char **modbuildid, char *namebuf) { const char *ret; namebuf[KSYM_NAME_LEN - 1] = 0; namebuf[0] = 0; if (is_ksym_addr(addr)) { unsigned long pos; pos = get_symbol_pos(addr, symbolsize, offset); /* Grab name */ kallsyms_expand_symbol(get_symbol_offset(pos), namebuf, KSYM_NAME_LEN); if (modname) *modname = NULL; if (modbuildid) *modbuildid = NULL; ret = namebuf; goto found; } /* See if it's in a module or a BPF JITed image. */ ret = module_address_lookup(addr, symbolsize, offset, modname, modbuildid, namebuf); if (!ret) ret = bpf_address_lookup(addr, symbolsize, offset, modname, namebuf); if (!ret) ret = ftrace_mod_address_lookup(addr, symbolsize, offset, modname, namebuf); found: cleanup_symbol_name(namebuf); return ret; } /* * Lookup an address * - modname is set to NULL if it's in the kernel. * - We guarantee that the returned name is valid until we reschedule even if. * It resides in a module. * - We also guarantee that modname will be valid until rescheduled. */ const char *kallsyms_lookup(unsigned long addr, unsigned long *symbolsize, unsigned long *offset, char **modname, char *namebuf) { return kallsyms_lookup_buildid(addr, symbolsize, offset, modname, NULL, namebuf); } int lookup_symbol_name(unsigned long addr, char *symname) { int res; symname[0] = '\0'; symname[KSYM_NAME_LEN - 1] = '\0'; if (is_ksym_addr(addr)) { unsigned long pos; pos = get_symbol_pos(addr, NULL, NULL); /* Grab name */ kallsyms_expand_symbol(get_symbol_offset(pos), symname, KSYM_NAME_LEN); goto found; } /* See if it's in a module. */ res = lookup_module_symbol_name(addr, symname); if (res) return res; found: cleanup_symbol_name(symname); return 0; } /* Look up a kernel symbol and return it in a text buffer. */ static int __sprint_symbol(char *buffer, unsigned long address, int symbol_offset, int add_offset, int add_buildid) { char *modname; const unsigned char *buildid; const char *name; unsigned long offset, size; int len; address += symbol_offset; name = kallsyms_lookup_buildid(address, &size, &offset, &modname, &buildid, buffer); if (!name) return sprintf(buffer, "0x%lx", address - symbol_offset); if (name != buffer) strcpy(buffer, name); len = strlen(buffer); offset -= symbol_offset; if (add_offset) len += sprintf(buffer + len, "+%#lx/%#lx", offset, size); if (modname) { len += sprintf(buffer + len, " [%s", modname); #if IS_ENABLED(CONFIG_STACKTRACE_BUILD_ID) if (add_buildid && buildid) { /* build ID should match length of sprintf */ #if IS_ENABLED(CONFIG_MODULES) static_assert(sizeof(typeof_member(struct module, build_id)) == 20); #endif len += sprintf(buffer + len, " %20phN", buildid); } #endif len += sprintf(buffer + len, "]"); } return len; } /** * sprint_symbol - Look up a kernel symbol and return it in a text buffer * @buffer: buffer to be stored * @address: address to lookup * * This function looks up a kernel symbol with @address and stores its name, * offset, size and module name to @buffer if possible. If no symbol was found, * just saves its @address as is. * * This function returns the number of bytes stored in @buffer. */ int sprint_symbol(char *buffer, unsigned long address) { return __sprint_symbol(buffer, address, 0, 1, 0); } EXPORT_SYMBOL_GPL(sprint_symbol); /** * sprint_symbol_build_id - Look up a kernel symbol and return it in a text buffer * @buffer: buffer to be stored * @address: address to lookup * * This function looks up a kernel symbol with @address and stores its name, * offset, size, module name and module build ID to @buffer if possible. If no * symbol was found, just saves its @address as is. * * This function returns the number of bytes stored in @buffer. */ int sprint_symbol_build_id(char *buffer, unsigned long address) { return __sprint_symbol(buffer, address, 0, 1, 1); } EXPORT_SYMBOL_GPL(sprint_symbol_build_id); /** * sprint_symbol_no_offset - Look up a kernel symbol and return it in a text buffer * @buffer: buffer to be stored * @address: address to lookup * * This function looks up a kernel symbol with @address and stores its name * and module name to @buffer if possible. If no symbol was found, just saves * its @address as is. * * This function returns the number of bytes stored in @buffer. */ int sprint_symbol_no_offset(char *buffer, unsigned long address) { return __sprint_symbol(buffer, address, 0, 0, 0); } EXPORT_SYMBOL_GPL(sprint_symbol_no_offset); /** * sprint_backtrace - Look up a backtrace symbol and return it in a text buffer * @buffer: buffer to be stored * @address: address to lookup * * This function is for stack backtrace and does the same thing as * sprint_symbol() but with modified/decreased @address. If there is a * tail-call to the function marked "noreturn", gcc optimized out code after * the call so that the stack-saved return address could point outside of the * caller. This function ensures that kallsyms will find the original caller * by decreasing @address. * * This function returns the number of bytes stored in @buffer. */ int sprint_backtrace(char *buffer, unsigned long address) { return __sprint_symbol(buffer, address, -1, 1, 0); } /** * sprint_backtrace_build_id - Look up a backtrace symbol and return it in a text buffer * @buffer: buffer to be stored * @address: address to lookup * * This function is for stack backtrace and does the same thing as * sprint_symbol() but with modified/decreased @address. If there is a * tail-call to the function marked "noreturn", gcc optimized out code after * the call so that the stack-saved return address could point outside of the * caller. This function ensures that kallsyms will find the original caller * by decreasing @address. This function also appends the module build ID to * the @buffer if @address is within a kernel module. * * This function returns the number of bytes stored in @buffer. */ int sprint_backtrace_build_id(char *buffer, unsigned long address) { return __sprint_symbol(buffer, address, -1, 1, 1); } /* To avoid using get_symbol_offset for every symbol, we carry prefix along. */ struct kallsym_iter { loff_t pos; loff_t pos_mod_end; loff_t pos_ftrace_mod_end; loff_t pos_bpf_end; unsigned long value; unsigned int nameoff; /* If iterating in core kernel symbols. */ char type; char name[KSYM_NAME_LEN]; char module_name[MODULE_NAME_LEN]; int exported; int show_value; }; static int get_ksymbol_mod(struct kallsym_iter *iter) { int ret = module_get_kallsym(iter->pos - kallsyms_num_syms, &iter->value, &iter->type, iter->name, iter->module_name, &iter->exported); if (ret < 0) { iter->pos_mod_end = iter->pos; return 0; } return 1; } /* * ftrace_mod_get_kallsym() may also get symbols for pages allocated for ftrace * purposes. In that case "__builtin__ftrace" is used as a module name, even * though "__builtin__ftrace" is not a module. */ static int get_ksymbol_ftrace_mod(struct kallsym_iter *iter) { int ret = ftrace_mod_get_kallsym(iter->pos - iter->pos_mod_end, &iter->value, &iter->type, iter->name, iter->module_name, &iter->exported); if (ret < 0) { iter->pos_ftrace_mod_end = iter->pos; return 0; } return 1; } static int get_ksymbol_bpf(struct kallsym_iter *iter) { int ret; strscpy(iter->module_name, "bpf", MODULE_NAME_LEN); iter->exported = 0; ret = bpf_get_kallsym(iter->pos - iter->pos_ftrace_mod_end, &iter->value, &iter->type, iter->name); if (ret < 0) { iter->pos_bpf_end = iter->pos; return 0; } return 1; } /* * This uses "__builtin__kprobes" as a module name for symbols for pages * allocated for kprobes' purposes, even though "__builtin__kprobes" is not a * module. */ static int get_ksymbol_kprobe(struct kallsym_iter *iter) { strscpy(iter->module_name, "__builtin__kprobes", MODULE_NAME_LEN); iter->exported = 0; return kprobe_get_kallsym(iter->pos - iter->pos_bpf_end, &iter->value, &iter->type, iter->name) < 0 ? 0 : 1; } /* Returns space to next name. */ static unsigned long get_ksymbol_core(struct kallsym_iter *iter) { unsigned off = iter->nameoff; iter->module_name[0] = '\0'; iter->value = kallsyms_sym_address(iter->pos); iter->type = kallsyms_get_symbol_type(off); off = kallsyms_expand_symbol(off, iter->name, ARRAY_SIZE(iter->name)); return off - iter->nameoff; } static void reset_iter(struct kallsym_iter *iter, loff_t new_pos) { iter->name[0] = '\0'; iter->nameoff = get_symbol_offset(new_pos); iter->pos = new_pos; if (new_pos == 0) { iter->pos_mod_end = 0; iter->pos_ftrace_mod_end = 0; iter->pos_bpf_end = 0; } } /* * The end position (last + 1) of each additional kallsyms section is recorded * in iter->pos_..._end as each section is added, and so can be used to * determine which get_ksymbol_...() function to call next. */ static int update_iter_mod(struct kallsym_iter *iter, loff_t pos) { iter->pos = pos; if ((!iter->pos_mod_end || iter->pos_mod_end > pos) && get_ksymbol_mod(iter)) return 1; if ((!iter->pos_ftrace_mod_end || iter->pos_ftrace_mod_end > pos) && get_ksymbol_ftrace_mod(iter)) return 1; if ((!iter->pos_bpf_end || iter->pos_bpf_end > pos) && get_ksymbol_bpf(iter)) return 1; return get_ksymbol_kprobe(iter); } /* Returns false if pos at or past end of file. */ static int update_iter(struct kallsym_iter *iter, loff_t pos) { /* Module symbols can be accessed randomly. */ if (pos >= kallsyms_num_syms) return update_iter_mod(iter, pos); /* If we're not on the desired position, reset to new position. */ if (pos != iter->pos) reset_iter(iter, pos); iter->nameoff += get_ksymbol_core(iter); iter->pos++; return 1; } static void *s_next(struct seq_file *m, void *p, loff_t *pos) { (*pos)++; if (!update_iter(m->private, *pos)) return NULL; return p; } static void *s_start(struct seq_file *m, loff_t *pos) { if (!update_iter(m->private, *pos)) return NULL; return m->private; } static void s_stop(struct seq_file *m, void *p) { } static int s_show(struct seq_file *m, void *p) { void *value; struct kallsym_iter *iter = m->private; /* Some debugging symbols have no name. Ignore them. */ if (!iter->name[0]) return 0; value = iter->show_value ? (void *)iter->value : NULL; if (iter->module_name[0]) { char type; /* * Label it "global" if it is exported, * "local" if not exported. */ type = iter->exported ? toupper(iter->type) : tolower(iter->type); seq_printf(m, "%px %c %s\t[%s]\n", value, type, iter->name, iter->module_name); } else seq_printf(m, "%px %c %s\n", value, iter->type, iter->name); return 0; } static const struct seq_operations kallsyms_op = { .start = s_start, .next = s_next, .stop = s_stop, .show = s_show }; #ifdef CONFIG_BPF_SYSCALL struct bpf_iter__ksym { __bpf_md_ptr(struct bpf_iter_meta *, meta); __bpf_md_ptr(struct kallsym_iter *, ksym); }; static int ksym_prog_seq_show(struct seq_file *m, bool in_stop) { struct bpf_iter__ksym ctx; struct bpf_iter_meta meta; struct bpf_prog *prog; meta.seq = m; prog = bpf_iter_get_info(&meta, in_stop); if (!prog) return 0; ctx.meta = &meta; ctx.ksym = m ? m->private : NULL; return bpf_iter_run_prog(prog, &ctx); } static int bpf_iter_ksym_seq_show(struct seq_file *m, void *p) { return ksym_prog_seq_show(m, false); } static void bpf_iter_ksym_seq_stop(struct seq_file *m, void *p) { if (!p) (void) ksym_prog_seq_show(m, true); else s_stop(m, p); } static const struct seq_operations bpf_iter_ksym_ops = { .start = s_start, .next = s_next, .stop = bpf_iter_ksym_seq_stop, .show = bpf_iter_ksym_seq_show, }; static int bpf_iter_ksym_init(void *priv_data, struct bpf_iter_aux_info *aux) { struct kallsym_iter *iter = priv_data; reset_iter(iter, 0); /* cache here as in kallsyms_open() case; use current process * credentials to tell BPF iterators if values should be shown. */ iter->show_value = kallsyms_show_value(current_cred()); return 0; } DEFINE_BPF_ITER_FUNC(ksym, struct bpf_iter_meta *meta, struct kallsym_iter *ksym) static const struct bpf_iter_seq_info ksym_iter_seq_info = { .seq_ops = &bpf_iter_ksym_ops, .init_seq_private = bpf_iter_ksym_init, .fini_seq_private = NULL, .seq_priv_size = sizeof(struct kallsym_iter), }; static struct bpf_iter_reg ksym_iter_reg_info = { .target = "ksym", .feature = BPF_ITER_RESCHED, .ctx_arg_info_size = 1, .ctx_arg_info = { { offsetof(struct bpf_iter__ksym, ksym), PTR_TO_BTF_ID_OR_NULL }, }, .seq_info = &ksym_iter_seq_info, }; BTF_ID_LIST(btf_ksym_iter_id) BTF_ID(struct, kallsym_iter) static int __init bpf_ksym_iter_register(void) { ksym_iter_reg_info.ctx_arg_info[0].btf_id = *btf_ksym_iter_id; return bpf_iter_reg_target(&ksym_iter_reg_info); } late_initcall(bpf_ksym_iter_register); #endif /* CONFIG_BPF_SYSCALL */ static int kallsyms_open(struct inode *inode, struct file *file) { /* * We keep iterator in m->private, since normal case is to * s_start from where we left off, so we avoid doing * using get_symbol_offset for every symbol. */ struct kallsym_iter *iter; iter = __seq_open_private(file, &kallsyms_op, sizeof(*iter)); if (!iter) return -ENOMEM; reset_iter(iter, 0); /* * Instead of checking this on every s_show() call, cache * the result here at open time. */ iter->show_value = kallsyms_show_value(file->f_cred); return 0; } #ifdef CONFIG_KGDB_KDB const char *kdb_walk_kallsyms(loff_t *pos) { static struct kallsym_iter kdb_walk_kallsyms_iter; if (*pos == 0) { memset(&kdb_walk_kallsyms_iter, 0, sizeof(kdb_walk_kallsyms_iter)); reset_iter(&kdb_walk_kallsyms_iter, 0); } while (1) { if (!update_iter(&kdb_walk_kallsyms_iter, *pos)) return NULL; ++*pos; /* Some debugging symbols have no name. Ignore them. */ if (kdb_walk_kallsyms_iter.name[0]) return kdb_walk_kallsyms_iter.name; } } #endif /* CONFIG_KGDB_KDB */ static const struct proc_ops kallsyms_proc_ops = { .proc_open = kallsyms_open, .proc_read = seq_read, .proc_lseek = seq_lseek, .proc_release = seq_release_private, }; static int __init kallsyms_init(void) { proc_create("kallsyms", 0444, NULL, &kallsyms_proc_ops); return 0; } device_initcall(kallsyms_init); |
2 2 2 1 2 1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 | // SPDX-License-Identifier: GPL-2.0-only /* * pcrypt - Parallel crypto wrapper. * * Copyright (C) 2009 secunet Security Networks AG * Copyright (C) 2009 Steffen Klassert <steffen.klassert@secunet.com> */ #include <crypto/algapi.h> #include <crypto/internal/aead.h> #include <linux/atomic.h> #include <linux/err.h> #include <linux/init.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/kobject.h> #include <linux/cpu.h> #include <crypto/pcrypt.h> static struct padata_instance *pencrypt; static struct padata_instance *pdecrypt; static struct kset *pcrypt_kset; struct pcrypt_instance_ctx { struct crypto_aead_spawn spawn; struct padata_shell *psenc; struct padata_shell *psdec; atomic_t tfm_count; }; struct pcrypt_aead_ctx { struct crypto_aead *child; unsigned int cb_cpu; }; static inline struct pcrypt_instance_ctx *pcrypt_tfm_ictx( struct crypto_aead *tfm) { return aead_instance_ctx(aead_alg_instance(tfm)); } static int pcrypt_aead_setkey(struct crypto_aead *parent, const u8 *key, unsigned int keylen) { struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(parent); return crypto_aead_setkey(ctx->child, key, keylen); } static int pcrypt_aead_setauthsize(struct crypto_aead *parent, unsigned int authsize) { struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(parent); return crypto_aead_setauthsize(ctx->child, authsize); } static void pcrypt_aead_serial(struct padata_priv *padata) { struct pcrypt_request *preq = pcrypt_padata_request(padata); struct aead_request *req = pcrypt_request_ctx(preq); aead_request_complete(req->base.data, padata->info); } static void pcrypt_aead_done(void *data, int err) { struct aead_request *req = data; struct pcrypt_request *preq = aead_request_ctx(req); struct padata_priv *padata = pcrypt_request_padata(preq); padata->info = err; padata_do_serial(padata); } static void pcrypt_aead_enc(struct padata_priv *padata) { struct pcrypt_request *preq = pcrypt_padata_request(padata); struct aead_request *req = pcrypt_request_ctx(preq); int ret; ret = crypto_aead_encrypt(req); if (ret == -EINPROGRESS) return; padata->info = ret; padata_do_serial(padata); } static int pcrypt_aead_encrypt(struct aead_request *req) { int err; struct pcrypt_request *preq = aead_request_ctx(req); struct aead_request *creq = pcrypt_request_ctx(preq); struct padata_priv *padata = pcrypt_request_padata(preq); struct crypto_aead *aead = crypto_aead_reqtfm(req); struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(aead); u32 flags = aead_request_flags(req); struct pcrypt_instance_ctx *ictx; ictx = pcrypt_tfm_ictx(aead); memset(padata, 0, sizeof(struct padata_priv)); padata->parallel = pcrypt_aead_enc; padata->serial = pcrypt_aead_serial; aead_request_set_tfm(creq, ctx->child); aead_request_set_callback(creq, flags & ~CRYPTO_TFM_REQ_MAY_SLEEP, pcrypt_aead_done, req); aead_request_set_crypt(creq, req->src, req->dst, req->cryptlen, req->iv); aead_request_set_ad(creq, req->assoclen); err = padata_do_parallel(ictx->psenc, padata, &ctx->cb_cpu); if (!err) return -EINPROGRESS; if (err == -EBUSY) return -EAGAIN; return err; } static void pcrypt_aead_dec(struct padata_priv *padata) { struct pcrypt_request *preq = pcrypt_padata_request(padata); struct aead_request *req = pcrypt_request_ctx(preq); int ret; ret = crypto_aead_decrypt(req); if (ret == -EINPROGRESS) return; padata->info = ret; padata_do_serial(padata); } static int pcrypt_aead_decrypt(struct aead_request *req) { int err; struct pcrypt_request *preq = aead_request_ctx(req); struct aead_request *creq = pcrypt_request_ctx(preq); struct padata_priv *padata = pcrypt_request_padata(preq); struct crypto_aead *aead = crypto_aead_reqtfm(req); struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(aead); u32 flags = aead_request_flags(req); struct pcrypt_instance_ctx *ictx; ictx = pcrypt_tfm_ictx(aead); memset(padata, 0, sizeof(struct padata_priv)); padata->parallel = pcrypt_aead_dec; padata->serial = pcrypt_aead_serial; aead_request_set_tfm(creq, ctx->child); aead_request_set_callback(creq, flags & ~CRYPTO_TFM_REQ_MAY_SLEEP, pcrypt_aead_done, req); aead_request_set_crypt(creq, req->src, req->dst, req->cryptlen, req->iv); aead_request_set_ad(creq, req->assoclen); err = padata_do_parallel(ictx->psdec, padata, &ctx->cb_cpu); if (!err) return -EINPROGRESS; if (err == -EBUSY) return -EAGAIN; return err; } static int pcrypt_aead_init_tfm(struct crypto_aead *tfm) { int cpu, cpu_index; struct aead_instance *inst = aead_alg_instance(tfm); struct pcrypt_instance_ctx *ictx = aead_instance_ctx(inst); struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(tfm); struct crypto_aead *cipher; cpu_index = (unsigned int)atomic_inc_return(&ictx->tfm_count) % cpumask_weight(cpu_online_mask); ctx->cb_cpu = cpumask_first(cpu_online_mask); for (cpu = 0; cpu < cpu_index; cpu++) ctx->cb_cpu = cpumask_next(ctx->cb_cpu, cpu_online_mask); cipher = crypto_spawn_aead(&ictx->spawn); if (IS_ERR(cipher)) return PTR_ERR(cipher); ctx->child = cipher; crypto_aead_set_reqsize(tfm, sizeof(struct pcrypt_request) + sizeof(struct aead_request) + crypto_aead_reqsize(cipher)); return 0; } static void pcrypt_aead_exit_tfm(struct crypto_aead *tfm) { struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(tfm); crypto_free_aead(ctx->child); } static void pcrypt_free(struct aead_instance *inst) { struct pcrypt_instance_ctx *ctx = aead_instance_ctx(inst); crypto_drop_aead(&ctx->spawn); padata_free_shell(ctx->psdec); padata_free_shell(ctx->psenc); kfree(inst); } static int pcrypt_init_instance(struct crypto_instance *inst, struct crypto_alg *alg) { if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, "pcrypt(%s)", alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME) return -ENAMETOOLONG; memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME); inst->alg.cra_priority = alg->cra_priority + 100; inst->alg.cra_blocksize = alg->cra_blocksize; inst->alg.cra_alignmask = alg->cra_alignmask; return 0; } static int pcrypt_create_aead(struct crypto_template *tmpl, struct rtattr **tb, struct crypto_attr_type *algt) { struct pcrypt_instance_ctx *ctx; struct aead_instance *inst; struct aead_alg *alg; u32 mask = crypto_algt_inherited_mask(algt); int err; inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); if (!inst) return -ENOMEM; err = -ENOMEM; ctx = aead_instance_ctx(inst); ctx->psenc = padata_alloc_shell(pencrypt); if (!ctx->psenc) goto err_free_inst; ctx->psdec = padata_alloc_shell(pdecrypt); if (!ctx->psdec) goto err_free_inst; err = crypto_grab_aead(&ctx->spawn, aead_crypto_instance(inst), crypto_attr_alg_name(tb[1]), 0, mask); if (err) goto err_free_inst; alg = crypto_spawn_aead_alg(&ctx->spawn); err = pcrypt_init_instance(aead_crypto_instance(inst), &alg->base); if (err) goto err_free_inst; inst->alg.base.cra_flags |= CRYPTO_ALG_ASYNC; inst->alg.ivsize = crypto_aead_alg_ivsize(alg); inst->alg.maxauthsize = crypto_aead_alg_maxauthsize(alg); inst->alg.base.cra_ctxsize = sizeof(struct pcrypt_aead_ctx); inst->alg.init = pcrypt_aead_init_tfm; inst->alg.exit = pcrypt_aead_exit_tfm; inst->alg.setkey = pcrypt_aead_setkey; inst->alg.setauthsize = pcrypt_aead_setauthsize; inst->alg.encrypt = pcrypt_aead_encrypt; inst->alg.decrypt = pcrypt_aead_decrypt; inst->free = pcrypt_free; err = aead_register_instance(tmpl, inst); if (err) { err_free_inst: pcrypt_free(inst); } return err; } static int pcrypt_create(struct crypto_template *tmpl, struct rtattr **tb) { struct crypto_attr_type *algt; algt = crypto_get_attr_type(tb); if (IS_ERR(algt)) return PTR_ERR(algt); switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) { case CRYPTO_ALG_TYPE_AEAD: return pcrypt_create_aead(tmpl, tb, algt); } return -EINVAL; } static int pcrypt_sysfs_add(struct padata_instance *pinst, const char *name) { int ret; pinst->kobj.kset = pcrypt_kset; ret = kobject_add(&pinst->kobj, NULL, "%s", name); if (!ret) kobject_uevent(&pinst->kobj, KOBJ_ADD); return ret; } static int pcrypt_init_padata(struct padata_instance **pinst, const char *name) { int ret = -ENOMEM; *pinst = padata_alloc(name); if (!*pinst) return ret; ret = pcrypt_sysfs_add(*pinst, name); if (ret) padata_free(*pinst); return ret; } static struct crypto_template pcrypt_tmpl = { .name = "pcrypt", .create = pcrypt_create, .module = THIS_MODULE, }; static int __init pcrypt_init(void) { int err = -ENOMEM; pcrypt_kset = kset_create_and_add("pcrypt", NULL, kernel_kobj); if (!pcrypt_kset) goto err; err = pcrypt_init_padata(&pencrypt, "pencrypt"); if (err) goto err_unreg_kset; err = pcrypt_init_padata(&pdecrypt, "pdecrypt"); if (err) goto err_deinit_pencrypt; return crypto_register_template(&pcrypt_tmpl); err_deinit_pencrypt: padata_free(pencrypt); err_unreg_kset: kset_unregister(pcrypt_kset); err: return err; } static void __exit pcrypt_exit(void) { crypto_unregister_template(&pcrypt_tmpl); padata_free(pencrypt); padata_free(pdecrypt); kset_unregister(pcrypt_kset); } subsys_initcall(pcrypt_init); module_exit(pcrypt_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Steffen Klassert <steffen.klassert@secunet.com>"); MODULE_DESCRIPTION("Parallel crypto wrapper"); MODULE_ALIAS_CRYPTO("pcrypt"); |
7 2 6 1 5 3 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 | // SPDX-License-Identifier: GPL-2.0-only /* * xt_HMARK - Netfilter module to set mark by means of hashing * * (C) 2012 by Hans Schillstrom <hans.schillstrom@ericsson.com> * (C) 2012 by Pablo Neira Ayuso <pablo@netfilter.org> */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/skbuff.h> #include <linux/icmp.h> #include <linux/netfilter/x_tables.h> #include <linux/netfilter/xt_HMARK.h> #include <net/ip.h> #if IS_ENABLED(CONFIG_NF_CONNTRACK) #include <net/netfilter/nf_conntrack.h> #endif #if IS_ENABLED(CONFIG_IP6_NF_IPTABLES) #include <net/ipv6.h> #include <linux/netfilter_ipv6/ip6_tables.h> #endif MODULE_LICENSE("GPL"); MODULE_AUTHOR("Hans Schillstrom <hans.schillstrom@ericsson.com>"); MODULE_DESCRIPTION("Xtables: packet marking using hash calculation"); MODULE_ALIAS("ipt_HMARK"); MODULE_ALIAS("ip6t_HMARK"); struct hmark_tuple { __be32 src; __be32 dst; union hmark_ports uports; u8 proto; }; static inline __be32 hmark_addr6_mask(const __be32 *addr32, const __be32 *mask) { return (addr32[0] & mask[0]) ^ (addr32[1] & mask[1]) ^ (addr32[2] & mask[2]) ^ (addr32[3] & mask[3]); } static inline __be32 hmark_addr_mask(int l3num, const __be32 *addr32, const __be32 *mask) { switch (l3num) { case AF_INET: return *addr32 & *mask; case AF_INET6: return hmark_addr6_mask(addr32, mask); } return 0; } static inline void hmark_swap_ports(union hmark_ports *uports, const struct xt_hmark_info *info) { union hmark_ports hp; u16 src, dst; hp.b32 = (uports->b32 & info->port_mask.b32) | info->port_set.b32; src = ntohs(hp.b16.src); dst = ntohs(hp.b16.dst); if (dst > src) uports->v32 = (dst << 16) | src; else uports->v32 = (src << 16) | dst; } static int hmark_ct_set_htuple(const struct sk_buff *skb, struct hmark_tuple *t, const struct xt_hmark_info *info) { #if IS_ENABLED(CONFIG_NF_CONNTRACK) enum ip_conntrack_info ctinfo; struct nf_conn *ct = nf_ct_get(skb, &ctinfo); struct nf_conntrack_tuple *otuple; struct nf_conntrack_tuple *rtuple; if (ct == NULL) return -1; otuple = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple; rtuple = &ct->tuplehash[IP_CT_DIR_REPLY].tuple; t->src = hmark_addr_mask(otuple->src.l3num, otuple->src.u3.ip6, info->src_mask.ip6); t->dst = hmark_addr_mask(otuple->src.l3num, rtuple->src.u3.ip6, info->dst_mask.ip6); if (info->flags & XT_HMARK_FLAG(XT_HMARK_METHOD_L3)) return 0; t->proto = nf_ct_protonum(ct); if (t->proto != IPPROTO_ICMP) { t->uports.b16.src = otuple->src.u.all; t->uports.b16.dst = rtuple->src.u.all; hmark_swap_ports(&t->uports, info); } return 0; #else return -1; #endif } /* This hash function is endian independent, to ensure consistent hashing if * the cluster is composed of big and little endian systems. */ static inline u32 hmark_hash(struct hmark_tuple *t, const struct xt_hmark_info *info) { u32 hash; u32 src = ntohl(t->src); u32 dst = ntohl(t->dst); if (dst < src) swap(src, dst); hash = jhash_3words(src, dst, t->uports.v32, info->hashrnd); hash = hash ^ (t->proto & info->proto_mask); return reciprocal_scale(hash, info->hmodulus) + info->hoffset; } static void hmark_set_tuple_ports(const struct sk_buff *skb, unsigned int nhoff, struct hmark_tuple *t, const struct xt_hmark_info *info) { int protoff; protoff = proto_ports_offset(t->proto); if (protoff < 0) return; nhoff += protoff; if (skb_copy_bits(skb, nhoff, &t->uports, sizeof(t->uports)) < 0) return; hmark_swap_ports(&t->uports, info); } #if IS_ENABLED(CONFIG_IP6_NF_IPTABLES) static int get_inner6_hdr(const struct sk_buff *skb, int *offset) { struct icmp6hdr *icmp6h, _ih6; icmp6h = skb_header_pointer(skb, *offset, sizeof(_ih6), &_ih6); if (icmp6h == NULL) return 0; if (icmp6h->icmp6_type && icmp6h->icmp6_type < 128) { *offset += sizeof(struct icmp6hdr); return 1; } return 0; } static int hmark_pkt_set_htuple_ipv6(const struct sk_buff *skb, struct hmark_tuple *t, const struct xt_hmark_info *info) { struct ipv6hdr *ip6, _ip6; int flag = IP6_FH_F_AUTH; unsigned int nhoff = 0; u16 fragoff = 0; int nexthdr; ip6 = (struct ipv6hdr *) (skb->data + skb_network_offset(skb)); nexthdr = ipv6_find_hdr(skb, &nhoff, -1, &fragoff, &flag); if (nexthdr < 0) return 0; /* No need to check for icmp errors on fragments */ if ((flag & IP6_FH_F_FRAG) || (nexthdr != IPPROTO_ICMPV6)) goto noicmp; /* Use inner header in case of ICMP errors */ if (get_inner6_hdr(skb, &nhoff)) { ip6 = skb_header_pointer(skb, nhoff, sizeof(_ip6), &_ip6); if (ip6 == NULL) return -1; /* If AH present, use SPI like in ESP. */ flag = IP6_FH_F_AUTH; nexthdr = ipv6_find_hdr(skb, &nhoff, -1, &fragoff, &flag); if (nexthdr < 0) return -1; } noicmp: t->src = hmark_addr6_mask(ip6->saddr.s6_addr32, info->src_mask.ip6); t->dst = hmark_addr6_mask(ip6->daddr.s6_addr32, info->dst_mask.ip6); if (info->flags & XT_HMARK_FLAG(XT_HMARK_METHOD_L3)) return 0; t->proto = nexthdr; if (t->proto == IPPROTO_ICMPV6) return 0; if (flag & IP6_FH_F_FRAG) return 0; hmark_set_tuple_ports(skb, nhoff, t, info); return 0; } static unsigned int hmark_tg_v6(struct sk_buff *skb, const struct xt_action_param *par) { const struct xt_hmark_info *info = par->targinfo; struct hmark_tuple t; memset(&t, 0, sizeof(struct hmark_tuple)); if (info->flags & XT_HMARK_FLAG(XT_HMARK_CT)) { if (hmark_ct_set_htuple(skb, &t, info) < 0) return XT_CONTINUE; } else { if (hmark_pkt_set_htuple_ipv6(skb, &t, info) < 0) return XT_CONTINUE; } skb->mark = hmark_hash(&t, info); return XT_CONTINUE; } #endif static int get_inner_hdr(const struct sk_buff *skb, int iphsz, int *nhoff) { const struct icmphdr *icmph; struct icmphdr _ih; /* Not enough header? */ icmph = skb_header_pointer(skb, *nhoff + iphsz, sizeof(_ih), &_ih); if (icmph == NULL || icmph->type > NR_ICMP_TYPES) return 0; /* Error message? */ if (!icmp_is_err(icmph->type)) return 0; *nhoff += iphsz + sizeof(_ih); return 1; } static int hmark_pkt_set_htuple_ipv4(const struct sk_buff *skb, struct hmark_tuple *t, const struct xt_hmark_info *info) { struct iphdr *ip, _ip; int nhoff = skb_network_offset(skb); ip = (struct iphdr *) (skb->data + nhoff); if (ip->protocol == IPPROTO_ICMP) { /* Use inner header in case of ICMP errors */ if (get_inner_hdr(skb, ip->ihl * 4, &nhoff)) { ip = skb_header_pointer(skb, nhoff, sizeof(_ip), &_ip); if (ip == NULL) return -1; } } t->src = ip->saddr & info->src_mask.ip; t->dst = ip->daddr & info->dst_mask.ip; if (info->flags & XT_HMARK_FLAG(XT_HMARK_METHOD_L3)) return 0; t->proto = ip->protocol; /* ICMP has no ports, skip */ if (t->proto == IPPROTO_ICMP) return 0; /* follow-up fragments don't contain ports, skip all fragments */ if (ip_is_fragment(ip)) return 0; hmark_set_tuple_ports(skb, (ip->ihl * 4) + nhoff, t, info); return 0; } static unsigned int hmark_tg_v4(struct sk_buff *skb, const struct xt_action_param *par) { const struct xt_hmark_info *info = par->targinfo; struct hmark_tuple t; memset(&t, 0, sizeof(struct hmark_tuple)); if (info->flags & XT_HMARK_FLAG(XT_HMARK_CT)) { if (hmark_ct_set_htuple(skb, &t, info) < 0) return XT_CONTINUE; } else { if (hmark_pkt_set_htuple_ipv4(skb, &t, info) < 0) return XT_CONTINUE; } skb->mark = hmark_hash(&t, info); return XT_CONTINUE; } static int hmark_tg_check(const struct xt_tgchk_param *par) { const struct xt_hmark_info *info = par->targinfo; const char *errmsg = "proto mask must be zero with L3 mode"; if (!info->hmodulus) return -EINVAL; if (info->proto_mask && (info->flags & XT_HMARK_FLAG(XT_HMARK_METHOD_L3))) goto err; if (info->flags & XT_HMARK_FLAG(XT_HMARK_SPI_MASK) && (info->flags & (XT_HMARK_FLAG(XT_HMARK_SPORT_MASK) | XT_HMARK_FLAG(XT_HMARK_DPORT_MASK)))) return -EINVAL; if (info->flags & XT_HMARK_FLAG(XT_HMARK_SPI) && (info->flags & (XT_HMARK_FLAG(XT_HMARK_SPORT) | XT_HMARK_FLAG(XT_HMARK_DPORT)))) { errmsg = "spi-set and port-set can't be combined"; goto err; } return 0; err: pr_info_ratelimited("%s\n", errmsg); return -EINVAL; } static struct xt_target hmark_tg_reg[] __read_mostly = { { .name = "HMARK", .family = NFPROTO_IPV4, .target = hmark_tg_v4, .targetsize = sizeof(struct xt_hmark_info), .checkentry = hmark_tg_check, .me = THIS_MODULE, }, #if IS_ENABLED(CONFIG_IP6_NF_IPTABLES) { .name = "HMARK", .family = NFPROTO_IPV6, .target = hmark_tg_v6, .targetsize = sizeof(struct xt_hmark_info), .checkentry = hmark_tg_check, .me = THIS_MODULE, }, #endif }; static int __init hmark_tg_init(void) { return xt_register_targets(hmark_tg_reg, ARRAY_SIZE(hmark_tg_reg)); } static void __exit hmark_tg_exit(void) { xt_unregister_targets(hmark_tg_reg, ARRAY_SIZE(hmark_tg_reg)); } module_init(hmark_tg_init); module_exit(hmark_tg_exit); |
282 281 78 78 78 78 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 | // SPDX-License-Identifier: GPL-2.0-only /* * Copyright(c) 2017 Intel Corporation. All rights reserved. */ #include <linux/pagemap.h> #include <linux/module.h> #include <linux/mount.h> #include <linux/pseudo_fs.h> #include <linux/magic.h> #include <linux/pfn_t.h> #include <linux/cdev.h> #include <linux/slab.h> #include <linux/uio.h> #include <linux/dax.h> #include <linux/fs.h> #include "dax-private.h" /** * struct dax_device - anchor object for dax services * @inode: core vfs * @cdev: optional character interface for "device dax" * @private: dax driver private data * @flags: state and boolean properties * @ops: operations for this device * @holder_data: holder of a dax_device: could be filesystem or mapped device * @holder_ops: operations for the inner holder */ struct dax_device { struct inode inode; struct cdev cdev; void *private; unsigned long flags; const struct dax_operations *ops; void *holder_data; const struct dax_holder_operations *holder_ops; }; static dev_t dax_devt; DEFINE_STATIC_SRCU(dax_srcu); static struct vfsmount *dax_mnt; static DEFINE_IDA(dax_minor_ida); static struct kmem_cache *dax_cache __read_mostly; static struct super_block *dax_superblock __read_mostly; int dax_read_lock(void) { return srcu_read_lock(&dax_srcu); } EXPORT_SYMBOL_GPL(dax_read_lock); void dax_read_unlock(int id) { srcu_read_unlock(&dax_srcu, id); } EXPORT_SYMBOL_GPL(dax_read_unlock); #if defined(CONFIG_BLOCK) && defined(CONFIG_FS_DAX) #include <linux/blkdev.h> static DEFINE_XARRAY(dax_hosts); int dax_add_host(struct dax_device *dax_dev, struct gendisk *disk) { return xa_insert(&dax_hosts, (unsigned long)disk, dax_dev, GFP_KERNEL); } EXPORT_SYMBOL_GPL(dax_add_host); void dax_remove_host(struct gendisk *disk) { xa_erase(&dax_hosts, (unsigned long)disk); } EXPORT_SYMBOL_GPL(dax_remove_host); /** * fs_dax_get_by_bdev() - temporary lookup mechanism for filesystem-dax * @bdev: block device to find a dax_device for * @start_off: returns the byte offset into the dax_device that @bdev starts * @holder: filesystem or mapped device inside the dax_device * @ops: operations for the inner holder */ struct dax_device *fs_dax_get_by_bdev(struct block_device *bdev, u64 *start_off, void *holder, const struct dax_holder_operations *ops) { struct dax_device *dax_dev; u64 part_size; int id; if (!blk_queue_dax(bdev->bd_disk->queue)) return NULL; *start_off = get_start_sect(bdev) * SECTOR_SIZE; part_size = bdev_nr_sectors(bdev) * SECTOR_SIZE; if (*start_off % PAGE_SIZE || part_size % PAGE_SIZE) { pr_info("%pg: error: unaligned partition for dax\n", bdev); return NULL; } id = dax_read_lock(); dax_dev = xa_load(&dax_hosts, (unsigned long)bdev->bd_disk); if (!dax_dev || !dax_alive(dax_dev) || !igrab(&dax_dev->inode)) dax_dev = NULL; else if (holder) { if (!cmpxchg(&dax_dev->holder_data, NULL, holder)) dax_dev->holder_ops = ops; else dax_dev = NULL; } dax_read_unlock(id); return dax_dev; } EXPORT_SYMBOL_GPL(fs_dax_get_by_bdev); void fs_put_dax(struct dax_device *dax_dev, void *holder) { if (dax_dev && holder && cmpxchg(&dax_dev->holder_data, holder, NULL) == holder) dax_dev->holder_ops = NULL; put_dax(dax_dev); } EXPORT_SYMBOL_GPL(fs_put_dax); #endif /* CONFIG_BLOCK && CONFIG_FS_DAX */ enum dax_device_flags { /* !alive + rcu grace period == no new operations / mappings */ DAXDEV_ALIVE, /* gate whether dax_flush() calls the low level flush routine */ DAXDEV_WRITE_CACHE, /* flag to check if device supports synchronous flush */ DAXDEV_SYNC, /* do not leave the caches dirty after writes */ DAXDEV_NOCACHE, /* handle CPU fetch exceptions during reads */ DAXDEV_NOMC, }; /** * dax_direct_access() - translate a device pgoff to an absolute pfn * @dax_dev: a dax_device instance representing the logical memory range * @pgoff: offset in pages from the start of the device to translate * @nr_pages: number of consecutive pages caller can handle relative to @pfn * @mode: indicator on normal access or recovery write * @kaddr: output parameter that returns a virtual address mapping of pfn * @pfn: output parameter that returns an absolute pfn translation of @pgoff * * Return: negative errno if an error occurs, otherwise the number of * pages accessible at the device relative @pgoff. */ long dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, long nr_pages, enum dax_access_mode mode, void **kaddr, pfn_t *pfn) { long avail; if (!dax_dev) return -EOPNOTSUPP; if (!dax_alive(dax_dev)) return -ENXIO; if (nr_pages < 0) return -EINVAL; avail = dax_dev->ops->direct_access(dax_dev, pgoff, nr_pages, mode, kaddr, pfn); if (!avail) return -ERANGE; return min(avail, nr_pages); } EXPORT_SYMBOL_GPL(dax_direct_access); size_t dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr, size_t bytes, struct iov_iter *i) { if (!dax_alive(dax_dev)) return 0; /* * The userspace address for the memory copy has already been validated * via access_ok() in vfs_write, so use the 'no check' version to bypass * the HARDENED_USERCOPY overhead. */ if (test_bit(DAXDEV_NOCACHE, &dax_dev->flags)) return _copy_from_iter_flushcache(addr, bytes, i); return _copy_from_iter(addr, bytes, i); } size_t dax_copy_to_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr, size_t bytes, struct iov_iter *i) { if (!dax_alive(dax_dev)) return 0; /* * The userspace address for the memory copy has already been validated * via access_ok() in vfs_red, so use the 'no check' version to bypass * the HARDENED_USERCOPY overhead. */ if (test_bit(DAXDEV_NOMC, &dax_dev->flags)) return _copy_mc_to_iter(addr, bytes, i); return _copy_to_iter(addr, bytes, i); } int dax_zero_page_range(struct dax_device *dax_dev, pgoff_t pgoff, size_t nr_pages) { int ret; if (!dax_alive(dax_dev)) return -ENXIO; /* * There are no callers that want to zero more than one page as of now. * Once users are there, this check can be removed after the * device mapper code has been updated to split ranges across targets. */ if (nr_pages != 1) return -EIO; ret = dax_dev->ops->zero_page_range(dax_dev, pgoff, nr_pages); return dax_mem2blk_err(ret); } EXPORT_SYMBOL_GPL(dax_zero_page_range); size_t dax_recovery_write(struct dax_device *dax_dev, pgoff_t pgoff, void *addr, size_t bytes, struct iov_iter *iter) { if (!dax_dev->ops->recovery_write) return 0; return dax_dev->ops->recovery_write(dax_dev, pgoff, addr, bytes, iter); } EXPORT_SYMBOL_GPL(dax_recovery_write); int dax_holder_notify_failure(struct dax_device *dax_dev, u64 off, u64 len, int mf_flags) { int rc, id; id = dax_read_lock(); if (!dax_alive(dax_dev)) { rc = -ENXIO; goto out; } if (!dax_dev->holder_ops) { rc = -EOPNOTSUPP; goto out; } rc = dax_dev->holder_ops->notify_failure(dax_dev, off, len, mf_flags); out: dax_read_unlock(id); return rc; } EXPORT_SYMBOL_GPL(dax_holder_notify_failure); #ifdef CONFIG_ARCH_HAS_PMEM_API void arch_wb_cache_pmem(void *addr, size_t size); void dax_flush(struct dax_device *dax_dev, void *addr, size_t size) { if (unlikely(!dax_write_cache_enabled(dax_dev))) return; arch_wb_cache_pmem(addr, size); } #else void dax_flush(struct dax_device *dax_dev, void *addr, size_t size) { } #endif EXPORT_SYMBOL_GPL(dax_flush); void dax_write_cache(struct dax_device *dax_dev, bool wc) { if (wc) set_bit(DAXDEV_WRITE_CACHE, &dax_dev->flags); else clear_bit(DAXDEV_WRITE_CACHE, &dax_dev->flags); } EXPORT_SYMBOL_GPL(dax_write_cache); bool dax_write_cache_enabled(struct dax_device *dax_dev) { return test_bit(DAXDEV_WRITE_CACHE, &dax_dev->flags); } EXPORT_SYMBOL_GPL(dax_write_cache_enabled); bool dax_synchronous(struct dax_device *dax_dev) { return test_bit(DAXDEV_SYNC, &dax_dev->flags); } EXPORT_SYMBOL_GPL(dax_synchronous); void set_dax_synchronous(struct dax_device *dax_dev) { set_bit(DAXDEV_SYNC, &dax_dev->flags); } EXPORT_SYMBOL_GPL(set_dax_synchronous); void set_dax_nocache(struct dax_device *dax_dev) { set_bit(DAXDEV_NOCACHE, &dax_dev->flags); } EXPORT_SYMBOL_GPL(set_dax_nocache); void set_dax_nomc(struct dax_device *dax_dev) { set_bit(DAXDEV_NOMC, &dax_dev->flags); } EXPORT_SYMBOL_GPL(set_dax_nomc); bool dax_alive(struct dax_device *dax_dev) { lockdep_assert_held(&dax_srcu); return test_bit(DAXDEV_ALIVE, &dax_dev->flags); } EXPORT_SYMBOL_GPL(dax_alive); /* * Note, rcu is not protecting the liveness of dax_dev, rcu is ensuring * that any fault handlers or operations that might have seen * dax_alive(), have completed. Any operations that start after * synchronize_srcu() has run will abort upon seeing !dax_alive(). */ void kill_dax(struct dax_device *dax_dev) { if (!dax_dev) return; if (dax_dev->holder_data != NULL) dax_holder_notify_failure(dax_dev, 0, U64_MAX, MF_MEM_PRE_REMOVE); clear_bit(DAXDEV_ALIVE, &dax_dev->flags); synchronize_srcu(&dax_srcu); /* clear holder data */ dax_dev->holder_ops = NULL; dax_dev->holder_data = NULL; } EXPORT_SYMBOL_GPL(kill_dax); void run_dax(struct dax_device *dax_dev) { set_bit(DAXDEV_ALIVE, &dax_dev->flags); } EXPORT_SYMBOL_GPL(run_dax); static struct inode *dax_alloc_inode(struct super_block *sb) { struct dax_device *dax_dev; struct inode *inode; dax_dev = alloc_inode_sb(sb, dax_cache, GFP_KERNEL); if (!dax_dev) return NULL; inode = &dax_dev->inode; inode->i_rdev = 0; return inode; } static struct dax_device *to_dax_dev(struct inode *inode) { return container_of(inode, struct dax_device, inode); } static void dax_free_inode(struct inode *inode) { struct dax_device *dax_dev = to_dax_dev(inode); if (inode->i_rdev) ida_free(&dax_minor_ida, iminor(inode)); kmem_cache_free(dax_cache, dax_dev); } static void dax_destroy_inode(struct inode *inode) { struct dax_device *dax_dev = to_dax_dev(inode); WARN_ONCE(test_bit(DAXDEV_ALIVE, &dax_dev->flags), "kill_dax() must be called before final iput()\n"); } static const struct super_operations dax_sops = { .statfs = simple_statfs, .alloc_inode = dax_alloc_inode, .destroy_inode = dax_destroy_inode, .free_inode = dax_free_inode, .drop_inode = generic_delete_inode, }; static int dax_init_fs_context(struct fs_context *fc) { struct pseudo_fs_context *ctx = init_pseudo(fc, DAXFS_MAGIC); if (!ctx) return -ENOMEM; ctx->ops = &dax_sops; return 0; } static struct file_system_type dax_fs_type = { .name = "dax", .init_fs_context = dax_init_fs_context, .kill_sb = kill_anon_super, }; static int dax_test(struct inode *inode, void *data) { dev_t devt = *(dev_t *) data; return inode->i_rdev == devt; } static int dax_set(struct inode *inode, void *data) { dev_t devt = *(dev_t *) data; inode->i_rdev = devt; return 0; } static struct dax_device *dax_dev_get(dev_t devt) { struct dax_device *dax_dev; struct inode *inode; inode = iget5_locked(dax_superblock, hash_32(devt + DAXFS_MAGIC, 31), dax_test, dax_set, &devt); if (!inode) return NULL; dax_dev = to_dax_dev(inode); if (inode->i_state & I_NEW) { set_bit(DAXDEV_ALIVE, &dax_dev->flags); inode->i_cdev = &dax_dev->cdev; inode->i_mode = S_IFCHR; inode->i_flags = S_DAX; mapping_set_gfp_mask(&inode->i_data, GFP_USER); unlock_new_inode(inode); } return dax_dev; } struct dax_device *alloc_dax(void *private, const struct dax_operations *ops) { struct dax_device *dax_dev; dev_t devt; int minor; if (WARN_ON_ONCE(ops && !ops->zero_page_range)) return ERR_PTR(-EINVAL); minor = ida_alloc_max(&dax_minor_ida, MINORMASK, GFP_KERNEL); if (minor < 0) return ERR_PTR(-ENOMEM); devt = MKDEV(MAJOR(dax_devt), minor); dax_dev = dax_dev_get(devt); if (!dax_dev) goto err_dev; dax_dev->ops = ops; dax_dev->private = private; return dax_dev; err_dev: ida_free(&dax_minor_ida, minor); return ERR_PTR(-ENOMEM); } EXPORT_SYMBOL_GPL(alloc_dax); void put_dax(struct dax_device *dax_dev) { if (!dax_dev) return; iput(&dax_dev->inode); } EXPORT_SYMBOL_GPL(put_dax); /** * dax_holder() - obtain the holder of a dax device * @dax_dev: a dax_device instance * * Return: the holder's data which represents the holder if registered, * otherwize NULL. */ void *dax_holder(struct dax_device *dax_dev) { return dax_dev->holder_data; } EXPORT_SYMBOL_GPL(dax_holder); /** * inode_dax: convert a public inode into its dax_dev * @inode: An inode with i_cdev pointing to a dax_dev * * Note this is not equivalent to to_dax_dev() which is for private * internal use where we know the inode filesystem type == dax_fs_type. */ struct dax_device *inode_dax(struct inode *inode) { struct cdev *cdev = inode->i_cdev; return container_of(cdev, struct dax_device, cdev); } EXPORT_SYMBOL_GPL(inode_dax); struct inode *dax_inode(struct dax_device *dax_dev) { return &dax_dev->inode; } EXPORT_SYMBOL_GPL(dax_inode); void *dax_get_private(struct dax_device *dax_dev) { if (!test_bit(DAXDEV_ALIVE, &dax_dev->flags)) return NULL; return dax_dev->private; } EXPORT_SYMBOL_GPL(dax_get_private); static void init_once(void *_dax_dev) { struct dax_device *dax_dev = _dax_dev; struct inode *inode = &dax_dev->inode; memset(dax_dev, 0, sizeof(*dax_dev)); inode_init_once(inode); } static int dax_fs_init(void) { int rc; dax_cache = kmem_cache_create("dax_cache", sizeof(struct dax_device), 0, (SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT| SLAB_MEM_SPREAD|SLAB_ACCOUNT), init_once); if (!dax_cache) return -ENOMEM; dax_mnt = kern_mount(&dax_fs_type); if (IS_ERR(dax_mnt)) { rc = PTR_ERR(dax_mnt); goto err_mount; } dax_superblock = dax_mnt->mnt_sb; return 0; err_mount: kmem_cache_destroy(dax_cache); return rc; } static void dax_fs_exit(void) { kern_unmount(dax_mnt); rcu_barrier(); kmem_cache_destroy(dax_cache); } static int __init dax_core_init(void) { int rc; rc = dax_fs_init(); if (rc) return rc; rc = alloc_chrdev_region(&dax_devt, 0, MINORMASK+1, "dax"); if (rc) goto err_chrdev; rc = dax_bus_init(); if (rc) goto err_bus; return 0; err_bus: unregister_chrdev_region(dax_devt, MINORMASK+1); err_chrdev: dax_fs_exit(); return 0; } static void __exit dax_core_exit(void) { dax_bus_exit(); unregister_chrdev_region(dax_devt, MINORMASK+1); ida_destroy(&dax_minor_ida); dax_fs_exit(); } MODULE_AUTHOR("Intel Corporation"); MODULE_LICENSE("GPL v2"); subsys_initcall(dax_core_init); module_exit(dax_core_exit); |
1 1 1 36 1 14 1 11 5 1 10 12 5 5 10 22 22 22 1 3 4 8 13 8 14 17 5 22 3 28 28 56 30 28 5 29 33 32 5 1 1 1 1 1 2 22 8 2 6 8 2 6 31 8 52 2 32 28 6 30 6 60 52 15 60 6 60 6 14 16 2 14 14 7 13 4 9 5 5 14 2 13 12 2 13 9 9 9 1 1 1 1 1 10 10 10 5 2 3 3 2 10 10 3 2 5 5 7 7 10 1 2 2 2 2 2 1 1 2 2 2 2 2 3 3 2 1 1 1 1 1 1 1 1 1 1 1 1 1 1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 | // SPDX-License-Identifier: GPL-2.0 /* * * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved. * * TODO: Merge attr_set_size/attr_data_get_block/attr_allocate_frame? */ #include <linux/fs.h> #include <linux/slab.h> #include <linux/kernel.h> #include "debug.h" #include "ntfs.h" #include "ntfs_fs.h" /* * You can set external NTFS_MIN_LOG2_OF_CLUMP/NTFS_MAX_LOG2_OF_CLUMP to manage * preallocate algorithm. */ #ifndef NTFS_MIN_LOG2_OF_CLUMP #define NTFS_MIN_LOG2_OF_CLUMP 16 #endif #ifndef NTFS_MAX_LOG2_OF_CLUMP #define NTFS_MAX_LOG2_OF_CLUMP 26 #endif // 16M #define NTFS_CLUMP_MIN (1 << (NTFS_MIN_LOG2_OF_CLUMP + 8)) // 16G #define NTFS_CLUMP_MAX (1ull << (NTFS_MAX_LOG2_OF_CLUMP + 8)) static inline u64 get_pre_allocated(u64 size) { u32 clump; u8 align_shift; u64 ret; if (size <= NTFS_CLUMP_MIN) { clump = 1 << NTFS_MIN_LOG2_OF_CLUMP; align_shift = NTFS_MIN_LOG2_OF_CLUMP; } else if (size >= NTFS_CLUMP_MAX) { clump = 1 << NTFS_MAX_LOG2_OF_CLUMP; align_shift = NTFS_MAX_LOG2_OF_CLUMP; } else { align_shift = NTFS_MIN_LOG2_OF_CLUMP - 1 + __ffs(size >> (8 + NTFS_MIN_LOG2_OF_CLUMP)); clump = 1u << align_shift; } ret = (((size + clump - 1) >> align_shift)) << align_shift; return ret; } /* * attr_load_runs - Load all runs stored in @attr. */ static int attr_load_runs(struct ATTRIB *attr, struct ntfs_inode *ni, struct runs_tree *run, const CLST *vcn) { int err; CLST svcn = le64_to_cpu(attr->nres.svcn); CLST evcn = le64_to_cpu(attr->nres.evcn); u32 asize; u16 run_off; if (svcn >= evcn + 1 || run_is_mapped_full(run, svcn, evcn)) return 0; if (vcn && (evcn < *vcn || *vcn < svcn)) return -EINVAL; asize = le32_to_cpu(attr->size); run_off = le16_to_cpu(attr->nres.run_off); if (run_off > asize) return -EINVAL; err = run_unpack_ex(run, ni->mi.sbi, ni->mi.rno, svcn, evcn, vcn ? *vcn : svcn, Add2Ptr(attr, run_off), asize - run_off); if (err < 0) return err; return 0; } /* * run_deallocate_ex - Deallocate clusters. */ static int run_deallocate_ex(struct ntfs_sb_info *sbi, struct runs_tree *run, CLST vcn, CLST len, CLST *done, bool trim) { int err = 0; CLST vcn_next, vcn0 = vcn, lcn, clen, dn = 0; size_t idx; if (!len) goto out; if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx)) { failed: run_truncate(run, vcn0); err = -EINVAL; goto out; } for (;;) { if (clen > len) clen = len; if (!clen) { err = -EINVAL; goto out; } if (lcn != SPARSE_LCN) { if (sbi) { /* mark bitmap range [lcn + clen) as free and trim clusters. */ mark_as_free_ex(sbi, lcn, clen, trim); } dn += clen; } len -= clen; if (!len) break; vcn_next = vcn + clen; if (!run_get_entry(run, ++idx, &vcn, &lcn, &clen) || vcn != vcn_next) { /* Save memory - don't load entire run. */ goto failed; } } out: if (done) *done += dn; return err; } /* * attr_allocate_clusters - Find free space, mark it as used and store in @run. */ int attr_allocate_clusters(struct ntfs_sb_info *sbi, struct runs_tree *run, CLST vcn, CLST lcn, CLST len, CLST *pre_alloc, enum ALLOCATE_OPT opt, CLST *alen, const size_t fr, CLST *new_lcn, CLST *new_len) { int err; CLST flen, vcn0 = vcn, pre = pre_alloc ? *pre_alloc : 0; size_t cnt = run->count; for (;;) { err = ntfs_look_for_free_space(sbi, lcn, len + pre, &lcn, &flen, opt); if (err == -ENOSPC && pre) { pre = 0; if (*pre_alloc) *pre_alloc = 0; continue; } if (err) goto out; if (vcn == vcn0) { /* Return the first fragment. */ if (new_lcn) *new_lcn = lcn; if (new_len) *new_len = flen; } /* Add new fragment into run storage. */ if (!run_add_entry(run, vcn, lcn, flen, opt & ALLOCATE_MFT)) { /* Undo last 'ntfs_look_for_free_space' */ mark_as_free_ex(sbi, lcn, len, false); err = -ENOMEM; goto out; } if (opt & ALLOCATE_ZERO) { u8 shift = sbi->cluster_bits - SECTOR_SHIFT; err = blkdev_issue_zeroout(sbi->sb->s_bdev, (sector_t)lcn << shift, (sector_t)flen << shift, GFP_NOFS, 0); if (err) goto out; } vcn += flen; if (flen >= len || (opt & ALLOCATE_MFT) || (fr && run->count - cnt >= fr)) { *alen = vcn - vcn0; return 0; } len -= flen; } out: /* Undo 'ntfs_look_for_free_space' */ if (vcn - vcn0) { run_deallocate_ex(sbi, run, vcn0, vcn - vcn0, NULL, false); run_truncate(run, vcn0); } return err; } /* * attr_make_nonresident * * If page is not NULL - it is already contains resident data * and locked (called from ni_write_frame()). */ int attr_make_nonresident(struct ntfs_inode *ni, struct ATTRIB *attr, struct ATTR_LIST_ENTRY *le, struct mft_inode *mi, u64 new_size, struct runs_tree *run, struct ATTRIB **ins_attr, struct page *page) { struct ntfs_sb_info *sbi; struct ATTRIB *attr_s; struct MFT_REC *rec; u32 used, asize, rsize, aoff, align; bool is_data; CLST len, alen; char *next; int err; if (attr->non_res) { *ins_attr = attr; return 0; } sbi = mi->sbi; rec = mi->mrec; attr_s = NULL; used = le32_to_cpu(rec->used); asize = le32_to_cpu(attr->size); next = Add2Ptr(attr, asize); aoff = PtrOffset(rec, attr); rsize = le32_to_cpu(attr->res.data_size); is_data = attr->type == ATTR_DATA && !attr->name_len; align = sbi->cluster_size; if (is_attr_compressed(attr)) align <<= COMPRESSION_UNIT; len = (rsize + align - 1) >> sbi->cluster_bits; run_init(run); /* Make a copy of original attribute. */ attr_s = kmemdup(attr, asize, GFP_NOFS); if (!attr_s) { err = -ENOMEM; goto out; } if (!len) { /* Empty resident -> Empty nonresident. */ alen = 0; } else { const char *data = resident_data(attr); err = attr_allocate_clusters(sbi, run, 0, 0, len, NULL, ALLOCATE_DEF, &alen, 0, NULL, NULL); if (err) goto out1; if (!rsize) { /* Empty resident -> Non empty nonresident. */ } else if (!is_data) { err = ntfs_sb_write_run(sbi, run, 0, data, rsize, 0); if (err) goto out2; } else if (!page) { char *kaddr; page = grab_cache_page(ni->vfs_inode.i_mapping, 0); if (!page) { err = -ENOMEM; goto out2; } kaddr = kmap_atomic(page); memcpy(kaddr, data, rsize); memset(kaddr + rsize, 0, PAGE_SIZE - rsize); kunmap_atomic(kaddr); flush_dcache_page(page); SetPageUptodate(page); set_page_dirty(page); unlock_page(page); put_page(page); } } /* Remove original attribute. */ used -= asize; memmove(attr, Add2Ptr(attr, asize), used - aoff); rec->used = cpu_to_le32(used); mi->dirty = true; if (le) al_remove_le(ni, le); err = ni_insert_nonresident(ni, attr_s->type, attr_name(attr_s), attr_s->name_len, run, 0, alen, attr_s->flags, &attr, NULL, NULL); if (err) goto out3; kfree(attr_s); attr->nres.data_size = cpu_to_le64(rsize); attr->nres.valid_size = attr->nres.data_size; *ins_attr = attr; if (is_data) ni->ni_flags &= ~NI_FLAG_RESIDENT; /* Resident attribute becomes non resident. */ return 0; out3: attr = Add2Ptr(rec, aoff); memmove(next, attr, used - aoff); memcpy(attr, attr_s, asize); rec->used = cpu_to_le32(used + asize); mi->dirty = true; out2: /* Undo: do not trim new allocated clusters. */ run_deallocate(sbi, run, false); run_close(run); out1: kfree(attr_s); out: return err; } /* * attr_set_size_res - Helper for attr_set_size(). */ static int attr_set_size_res(struct ntfs_inode *ni, struct ATTRIB *attr, struct ATTR_LIST_ENTRY *le, struct mft_inode *mi, u64 new_size, struct runs_tree *run, struct ATTRIB **ins_attr) { struct ntfs_sb_info *sbi = mi->sbi; struct MFT_REC *rec = mi->mrec; u32 used = le32_to_cpu(rec->used); u32 asize = le32_to_cpu(attr->size); u32 aoff = PtrOffset(rec, attr); u32 rsize = le32_to_cpu(attr->res.data_size); u32 tail = used - aoff - asize; char *next = Add2Ptr(attr, asize); s64 dsize = ALIGN(new_size, 8) - ALIGN(rsize, 8); if (dsize < 0) { memmove(next + dsize, next, tail); } else if (dsize > 0) { if (used + dsize > sbi->max_bytes_per_attr) return attr_make_nonresident(ni, attr, le, mi, new_size, run, ins_attr, NULL); memmove(next + dsize, next, tail); memset(next, 0, dsize); } if (new_size > rsize) memset(Add2Ptr(resident_data(attr), rsize), 0, new_size - rsize); rec->used = cpu_to_le32(used + dsize); attr->size = cpu_to_le32(asize + dsize); attr->res.data_size = cpu_to_le32(new_size); mi->dirty = true; *ins_attr = attr; return 0; } /* * attr_set_size - Change the size of attribute. * * Extend: * - Sparse/compressed: No allocated clusters. * - Normal: Append allocated and preallocated new clusters. * Shrink: * - No deallocate if @keep_prealloc is set. */ int attr_set_size(struct ntfs_inode *ni, enum ATTR_TYPE type, const __le16 *name, u8 name_len, struct runs_tree *run, u64 new_size, const u64 *new_valid, bool keep_prealloc, struct ATTRIB **ret) { int err = 0; struct ntfs_sb_info *sbi = ni->mi.sbi; u8 cluster_bits = sbi->cluster_bits; bool is_mft = ni->mi.rno == MFT_REC_MFT && type == ATTR_DATA && !name_len; u64 old_valid, old_size, old_alloc, new_alloc, new_alloc_tmp; struct ATTRIB *attr = NULL, *attr_b; struct ATTR_LIST_ENTRY *le, *le_b; struct mft_inode *mi, *mi_b; CLST alen, vcn, lcn, new_alen, old_alen, svcn, evcn; CLST next_svcn, pre_alloc = -1, done = 0; bool is_ext, is_bad = false; bool dirty = false; u32 align; struct MFT_REC *rec; again: alen = 0; le_b = NULL; attr_b = ni_find_attr(ni, NULL, &le_b, type, name, name_len, NULL, &mi_b); if (!attr_b) { err = -ENOENT; goto bad_inode; } if (!attr_b->non_res) { err = attr_set_size_res(ni, attr_b, le_b, mi_b, new_size, run, &attr_b); if (err) return err; /* Return if file is still resident. */ if (!attr_b->non_res) { dirty = true; goto ok1; } /* Layout of records may be changed, so do a full search. */ goto again; } is_ext = is_attr_ext(attr_b); align = sbi->cluster_size; if (is_ext) align <<= attr_b->nres.c_unit; old_valid = le64_to_cpu(attr_b->nres.valid_size); old_size = le64_to_cpu(attr_b->nres.data_size); old_alloc = le64_to_cpu(attr_b->nres.alloc_size); again_1: old_alen = old_alloc >> cluster_bits; new_alloc = (new_size + align - 1) & ~(u64)(align - 1); new_alen = new_alloc >> cluster_bits; if (keep_prealloc && new_size < old_size) { attr_b->nres.data_size = cpu_to_le64(new_size); mi_b->dirty = dirty = true; goto ok; } vcn = old_alen - 1; svcn = le64_to_cpu(attr_b->nres.svcn); evcn = le64_to_cpu(attr_b->nres.evcn); if (svcn <= vcn && vcn <= evcn) { attr = attr_b; le = le_b; mi = mi_b; } else if (!le_b) { err = -EINVAL; goto bad_inode; } else { le = le_b; attr = ni_find_attr(ni, attr_b, &le, type, name, name_len, &vcn, &mi); if (!attr) { err = -EINVAL; goto bad_inode; } next_le_1: svcn = le64_to_cpu(attr->nres.svcn); evcn = le64_to_cpu(attr->nres.evcn); } /* * Here we have: * attr,mi,le - last attribute segment (containing 'vcn'). * attr_b,mi_b,le_b - base (primary) attribute segment. */ next_le: rec = mi->mrec; err = attr_load_runs(attr, ni, run, NULL); if (err) goto out; if (new_size > old_size) { CLST to_allocate; size_t free; if (new_alloc <= old_alloc) { attr_b->nres.data_size = cpu_to_le64(new_size); mi_b->dirty = dirty = true; goto ok; } /* * Add clusters. In simple case we have to: * - allocate space (vcn, lcn, len) * - update packed run in 'mi' * - update attr->nres.evcn * - update attr_b->nres.data_size/attr_b->nres.alloc_size */ to_allocate = new_alen - old_alen; add_alloc_in_same_attr_seg: lcn = 0; if (is_mft) { /* MFT allocates clusters from MFT zone. */ pre_alloc = 0; } else if (is_ext) { /* No preallocate for sparse/compress. */ pre_alloc = 0; } else if (pre_alloc == -1) { pre_alloc = 0; if (type == ATTR_DATA && !name_len && sbi->options->prealloc) { pre_alloc = bytes_to_cluster( sbi, get_pre_allocated( new_size)) - new_alen; } /* Get the last LCN to allocate from. */ if (old_alen && !run_lookup_entry(run, vcn, &lcn, NULL, NULL)) { lcn = SPARSE_LCN; } if (lcn == SPARSE_LCN) lcn = 0; else if (lcn) lcn += 1; free = wnd_zeroes(&sbi->used.bitmap); if (to_allocate > free) { err = -ENOSPC; goto out; } if (pre_alloc && to_allocate + pre_alloc > free) pre_alloc = 0; } vcn = old_alen; if (is_ext) { if (!run_add_entry(run, vcn, SPARSE_LCN, to_allocate, false)) { err = -ENOMEM; goto out; } alen = to_allocate; } else { /* ~3 bytes per fragment. */ err = attr_allocate_clusters( sbi, run, vcn, lcn, to_allocate, &pre_alloc, is_mft ? ALLOCATE_MFT : ALLOCATE_DEF, &alen, is_mft ? 0 : (sbi->record_size - le32_to_cpu(rec->used) + 8) / 3 + 1, NULL, NULL); if (err) goto out; } done += alen; vcn += alen; if (to_allocate > alen) to_allocate -= alen; else to_allocate = 0; pack_runs: err = mi_pack_runs(mi, attr, run, vcn - svcn); if (err) goto undo_1; next_svcn = le64_to_cpu(attr->nres.evcn) + 1; new_alloc_tmp = (u64)next_svcn << cluster_bits; attr_b->nres.alloc_size = cpu_to_le64(new_alloc_tmp); mi_b->dirty = dirty = true; if (next_svcn >= vcn && !to_allocate) { /* Normal way. Update attribute and exit. */ attr_b->nres.data_size = cpu_to_le64(new_size); goto ok; } /* At least two MFT to avoid recursive loop. */ if (is_mft && next_svcn == vcn && ((u64)done << sbi->cluster_bits) >= 2 * sbi->record_size) { new_size = new_alloc_tmp; attr_b->nres.data_size = attr_b->nres.alloc_size; goto ok; } if (le32_to_cpu(rec->used) < sbi->record_size) { old_alen = next_svcn; evcn = old_alen - 1; goto add_alloc_in_same_attr_seg; } attr_b->nres.data_size = attr_b->nres.alloc_size; if (new_alloc_tmp < old_valid) attr_b->nres.valid_size = attr_b->nres.data_size; if (type == ATTR_LIST) { err = ni_expand_list(ni); if (err) goto undo_2; if (next_svcn < vcn) goto pack_runs; /* Layout of records is changed. */ goto again; } if (!ni->attr_list.size) { err = ni_create_attr_list(ni); /* In case of error layout of records is not changed. */ if (err) goto undo_2; /* Layout of records is changed. */ } if (next_svcn >= vcn) { /* This is MFT data, repeat. */ goto again; } /* Insert new attribute segment. */ err = ni_insert_nonresident(ni, type, name, name_len, run, next_svcn, vcn - next_svcn, attr_b->flags, &attr, &mi, NULL); /* * Layout of records maybe changed. * Find base attribute to update. */ le_b = NULL; attr_b = ni_find_attr(ni, NULL, &le_b, type, name, name_len, NULL, &mi_b); if (!attr_b) { err = -EINVAL; goto bad_inode; } if (err) { /* ni_insert_nonresident failed. */ attr = NULL; goto undo_2; } if (!is_mft) run_truncate_head(run, evcn + 1); svcn = le64_to_cpu(attr->nres.svcn); evcn = le64_to_cpu(attr->nres.evcn); /* * Attribute is in consistency state. * Save this point to restore to if next steps fail. */ old_valid = old_size = old_alloc = (u64)vcn << cluster_bits; attr_b->nres.valid_size = attr_b->nres.data_size = attr_b->nres.alloc_size = cpu_to_le64(old_size); mi_b->dirty = dirty = true; goto again_1; } if (new_size != old_size || (new_alloc != old_alloc && !keep_prealloc)) { /* * Truncate clusters. In simple case we have to: * - update packed run in 'mi' * - update attr->nres.evcn * - update attr_b->nres.data_size/attr_b->nres.alloc_size * - mark and trim clusters as free (vcn, lcn, len) */ CLST dlen = 0; vcn = max(svcn, new_alen); new_alloc_tmp = (u64)vcn << cluster_bits; if (vcn > svcn) { err = mi_pack_runs(mi, attr, run, vcn - svcn); if (err) goto out; } else if (le && le->vcn) { u16 le_sz = le16_to_cpu(le->size); /* * NOTE: List entries for one attribute are always * the same size. We deal with last entry (vcn==0) * and it is not first in entries array * (list entry for std attribute always first). * So it is safe to step back. */ mi_remove_attr(NULL, mi, attr); if (!al_remove_le(ni, le)) { err = -EINVAL; goto bad_inode; } le = (struct ATTR_LIST_ENTRY *)((u8 *)le - le_sz); } else { attr->nres.evcn = cpu_to_le64((u64)vcn - 1); mi->dirty = true; } attr_b->nres.alloc_size = cpu_to_le64(new_alloc_tmp); if (vcn == new_alen) { attr_b->nres.data_size = cpu_to_le64(new_size); if (new_size < old_valid) attr_b->nres.valid_size = attr_b->nres.data_size; } else { if (new_alloc_tmp <= le64_to_cpu(attr_b->nres.data_size)) attr_b->nres.data_size = attr_b->nres.alloc_size; if (new_alloc_tmp < le64_to_cpu(attr_b->nres.valid_size)) attr_b->nres.valid_size = attr_b->nres.alloc_size; } mi_b->dirty = dirty = true; err = run_deallocate_ex(sbi, run, vcn, evcn - vcn + 1, &dlen, true); if (err) goto out; if (is_ext) { /* dlen - really deallocated clusters. */ le64_sub_cpu(&attr_b->nres.total_size, ((u64)dlen << cluster_bits)); } run_truncate(run, vcn); if (new_alloc_tmp <= new_alloc) goto ok; old_size = new_alloc_tmp; vcn = svcn - 1; if (le == le_b) { attr = attr_b; mi = mi_b; evcn = svcn - 1; svcn = 0; goto next_le; } if (le->type != type || le->name_len != name_len || memcmp(le_name(le), name, name_len * sizeof(short))) { err = -EINVAL; goto bad_inode; } err = ni_load_mi(ni, le, &mi); if (err) goto out; attr = mi_find_attr(mi, NULL, type, name, name_len, &le->id); if (!attr) { err = -EINVAL; goto bad_inode; } goto next_le_1; } ok: if (new_valid) { __le64 valid = cpu_to_le64(min(*new_valid, new_size)); if (attr_b->nres.valid_size != valid) { attr_b->nres.valid_size = valid; mi_b->dirty = true; } } ok1: if (ret) *ret = attr_b; if (((type == ATTR_DATA && !name_len) || (type == ATTR_ALLOC && name == I30_NAME))) { /* Update inode_set_bytes. */ if (attr_b->non_res) { new_alloc = le64_to_cpu(attr_b->nres.alloc_size); if (inode_get_bytes(&ni->vfs_inode) != new_alloc) { inode_set_bytes(&ni->vfs_inode, new_alloc); dirty = true; } } /* Don't forget to update duplicate information in parent. */ if (dirty) { ni->ni_flags |= NI_FLAG_UPDATE_PARENT; mark_inode_dirty(&ni->vfs_inode); } } return 0; undo_2: vcn -= alen; attr_b->nres.data_size = cpu_to_le64(old_size); attr_b->nres.valid_size = cpu_to_le64(old_valid); attr_b->nres.alloc_size = cpu_to_le64(old_alloc); /* Restore 'attr' and 'mi'. */ if (attr) goto restore_run; if (le64_to_cpu(attr_b->nres.svcn) <= svcn && svcn <= le64_to_cpu(attr_b->nres.evcn)) { attr = attr_b; le = le_b; mi = mi_b; } else if (!le_b) { err = -EINVAL; goto bad_inode; } else { le = le_b; attr = ni_find_attr(ni, attr_b, &le, type, name, name_len, &svcn, &mi); if (!attr) goto bad_inode; } restore_run: if (mi_pack_runs(mi, attr, run, evcn - svcn + 1)) is_bad = true; undo_1: run_deallocate_ex(sbi, run, vcn, alen, NULL, false); run_truncate(run, vcn); out: if (is_bad) { bad_inode: _ntfs_bad_inode(&ni->vfs_inode); } return err; } /* * attr_data_get_block - Returns 'lcn' and 'len' for given 'vcn'. * * @new == NULL means just to get current mapping for 'vcn' * @new != NULL means allocate real cluster if 'vcn' maps to hole * @zero - zeroout new allocated clusters * * NOTE: * - @new != NULL is called only for sparsed or compressed attributes. * - new allocated clusters are zeroed via blkdev_issue_zeroout. */ int attr_data_get_block(struct ntfs_inode *ni, CLST vcn, CLST clen, CLST *lcn, CLST *len, bool *new, bool zero) { int err = 0; struct runs_tree *run = &ni->file.run; struct ntfs_sb_info *sbi; u8 cluster_bits; struct ATTRIB *attr, *attr_b; struct ATTR_LIST_ENTRY *le, *le_b; struct mft_inode *mi, *mi_b; CLST hint, svcn, to_alloc, evcn1, next_svcn, asize, end, vcn0, alen; CLST alloc, evcn; unsigned fr; u64 total_size, total_size0; int step = 0; if (new) *new = false; /* Try to find in cache. */ down_read(&ni->file.run_lock); if (!run_lookup_entry(run, vcn, lcn, len, NULL)) *len = 0; up_read(&ni->file.run_lock); if (*len && (*lcn != SPARSE_LCN || !new)) return 0; /* Fast normal way without allocation. */ /* No cluster in cache or we need to allocate cluster in hole. */ sbi = ni->mi.sbi; cluster_bits = sbi->cluster_bits; ni_lock(ni); down_write(&ni->file.run_lock); /* Repeat the code above (under write lock). */ if (!run_lookup_entry(run, vcn, lcn, len, NULL)) *len = 0; if (*len) { if (*lcn != SPARSE_LCN || !new) goto out; /* normal way without allocation. */ if (clen > *len) clen = *len; } le_b = NULL; attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL, &mi_b); if (!attr_b) { err = -ENOENT; goto out; } if (!attr_b->non_res) { *lcn = RESIDENT_LCN; *len = 1; goto out; } asize = le64_to_cpu(attr_b->nres.alloc_size) >> cluster_bits; if (vcn >= asize) { if (new) { err = -EINVAL; } else { *len = 1; *lcn = SPARSE_LCN; } goto out; } svcn = le64_to_cpu(attr_b->nres.svcn); evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1; attr = attr_b; le = le_b; mi = mi_b; if (le_b && (vcn < svcn || evcn1 <= vcn)) { attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn, &mi); if (!attr) { err = -EINVAL; goto out; } svcn = le64_to_cpu(attr->nres.svcn); evcn1 = le64_to_cpu(attr->nres.evcn) + 1; } /* Load in cache actual information. */ err = attr_load_runs(attr, ni, run, NULL); if (err) goto out; if (!*len) { if (run_lookup_entry(run, vcn, lcn, len, NULL)) { if (*lcn != SPARSE_LCN || !new) goto ok; /* Slow normal way without allocation. */ if (clen > *len) clen = *len; } else if (!new) { /* Here we may return -ENOENT. * In any case caller gets zero length. */ goto ok; } } if (!is_attr_ext(attr_b)) { /* The code below only for sparsed or compressed attributes. */ err = -EINVAL; goto out; } vcn0 = vcn; to_alloc = clen; fr = (sbi->record_size - le32_to_cpu(mi->mrec->used) + 8) / 3 + 1; /* Allocate frame aligned clusters. * ntfs.sys usually uses 16 clusters per frame for sparsed or compressed. * ntfs3 uses 1 cluster per frame for new created sparsed files. */ if (attr_b->nres.c_unit) { CLST clst_per_frame = 1u << attr_b->nres.c_unit; CLST cmask = ~(clst_per_frame - 1); /* Get frame aligned vcn and to_alloc. */ vcn = vcn0 & cmask; to_alloc = ((vcn0 + clen + clst_per_frame - 1) & cmask) - vcn; if (fr < clst_per_frame) fr = clst_per_frame; zero = true; /* Check if 'vcn' and 'vcn0' in different attribute segments. */ if (vcn < svcn || evcn1 <= vcn) { /* Load attribute for truncated vcn. */ attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn, &mi); if (!attr) { err = -EINVAL; goto out; } svcn = le64_to_cpu(attr->nres.svcn); evcn1 = le64_to_cpu(attr->nres.evcn) + 1; err = attr_load_runs(attr, ni, run, NULL); if (err) goto out; } } if (vcn + to_alloc > asize) to_alloc = asize - vcn; /* Get the last LCN to allocate from. */ hint = 0; if (vcn > evcn1) { if (!run_add_entry(run, evcn1, SPARSE_LCN, vcn - evcn1, false)) { err = -ENOMEM; goto out; } } else if (vcn && !run_lookup_entry(run, vcn - 1, &hint, NULL, NULL)) { hint = -1; } /* Allocate and zeroout new clusters. */ err = attr_allocate_clusters(sbi, run, vcn, hint + 1, to_alloc, NULL, zero ? ALLOCATE_ZERO : ALLOCATE_DEF, &alen, fr, lcn, len); if (err) goto out; *new = true; step = 1; end = vcn + alen; /* Save 'total_size0' to restore if error. */ total_size0 = le64_to_cpu(attr_b->nres.total_size); total_size = total_size0 + ((u64)alen << cluster_bits); if (vcn != vcn0) { if (!run_lookup_entry(run, vcn0, lcn, len, NULL)) { err = -EINVAL; goto out; } if (*lcn == SPARSE_LCN) { /* Internal error. Should not happened. */ WARN_ON(1); err = -EINVAL; goto out; } /* Check case when vcn0 + len overlaps new allocated clusters. */ if (vcn0 + *len > end) *len = end - vcn0; } repack: err = mi_pack_runs(mi, attr, run, max(end, evcn1) - svcn); if (err) goto out; attr_b->nres.total_size = cpu_to_le64(total_size); inode_set_bytes(&ni->vfs_inode, total_size); ni->ni_flags |= NI_FLAG_UPDATE_PARENT; mi_b->dirty = true; mark_inode_dirty(&ni->vfs_inode); /* Stored [vcn : next_svcn) from [vcn : end). */ next_svcn = le64_to_cpu(attr->nres.evcn) + 1; if (end <= evcn1) { if (next_svcn == evcn1) { /* Normal way. Update attribute and exit. */ goto ok; } /* Add new segment [next_svcn : evcn1 - next_svcn). */ if (!ni->attr_list.size) { err = ni_create_attr_list(ni); if (err) goto undo1; /* Layout of records is changed. */ le_b = NULL; attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL, &mi_b); if (!attr_b) { err = -ENOENT; goto out; } attr = attr_b; le = le_b; mi = mi_b; goto repack; } } /* * The code below may require additional cluster (to extend attribute list) * and / or one MFT record * It is too complex to undo operations if -ENOSPC occurs deep inside * in 'ni_insert_nonresident'. * Return in advance -ENOSPC here if there are no free cluster and no free MFT. */ if (!ntfs_check_for_free_space(sbi, 1, 1)) { /* Undo step 1. */ err = -ENOSPC; goto undo1; } step = 2; svcn = evcn1; /* Estimate next attribute. */ attr = ni_find_attr(ni, attr, &le, ATTR_DATA, NULL, 0, &svcn, &mi); if (!attr) { /* Insert new attribute segment. */ goto ins_ext; } /* Try to update existed attribute segment. */ alloc = bytes_to_cluster(sbi, le64_to_cpu(attr_b->nres.alloc_size)); evcn = le64_to_cpu(attr->nres.evcn); if (end < next_svcn) end = next_svcn; while (end > evcn) { /* Remove segment [svcn : evcn). */ mi_remove_attr(NULL, mi, attr); if (!al_remove_le(ni, le)) { err = -EINVAL; goto out; } if (evcn + 1 >= alloc) { /* Last attribute segment. */ evcn1 = evcn + 1; goto ins_ext; } if (ni_load_mi(ni, le, &mi)) { attr = NULL; goto out; } attr = mi_find_attr(mi, NULL, ATTR_DATA, NULL, 0, &le->id); if (!attr) { err = -EINVAL; goto out; } svcn = le64_to_cpu(attr->nres.svcn); evcn = le64_to_cpu(attr->nres.evcn); } if (end < svcn) end = svcn; err = attr_load_runs(attr, ni, run, &end); if (err) goto out; evcn1 = evcn + 1; attr->nres.svcn = cpu_to_le64(next_svcn); err = mi_pack_runs(mi, attr, run, evcn1 - next_svcn); if (err) goto out; le->vcn = cpu_to_le64(next_svcn); ni->attr_list.dirty = true; mi->dirty = true; next_svcn = le64_to_cpu(attr->nres.evcn) + 1; ins_ext: if (evcn1 > next_svcn) { err = ni_insert_nonresident(ni, ATTR_DATA, NULL, 0, run, next_svcn, evcn1 - next_svcn, attr_b->flags, &attr, &mi, NULL); if (err) goto out; } ok: run_truncate_around(run, vcn); out: if (err && step > 1) { /* Too complex to restore. */ _ntfs_bad_inode(&ni->vfs_inode); } up_write(&ni->file.run_lock); ni_unlock(ni); return err; undo1: /* Undo step1. */ attr_b->nres.total_size = cpu_to_le64(total_size0); inode_set_bytes(&ni->vfs_inode, total_size0); if (run_deallocate_ex(sbi, run, vcn, alen, NULL, false) || !run_add_entry(run, vcn, SPARSE_LCN, alen, false) || mi_pack_runs(mi, attr, run, max(end, evcn1) - svcn)) { _ntfs_bad_inode(&ni->vfs_inode); } goto out; } int attr_data_read_resident(struct ntfs_inode *ni, struct page *page) { u64 vbo; struct ATTRIB *attr; u32 data_size; attr = ni_find_attr(ni, NULL, NULL, ATTR_DATA, NULL, 0, NULL, NULL); if (!attr) return -EINVAL; if (attr->non_res) return E_NTFS_NONRESIDENT; vbo = page->index << PAGE_SHIFT; data_size = le32_to_cpu(attr->res.data_size); if (vbo < data_size) { const char *data = resident_data(attr); char *kaddr = kmap_atomic(page); u32 use = data_size - vbo; if (use > PAGE_SIZE) use = PAGE_SIZE; memcpy(kaddr, data + vbo, use); memset(kaddr + use, 0, PAGE_SIZE - use); kunmap_atomic(kaddr); flush_dcache_page(page); SetPageUptodate(page); } else if (!PageUptodate(page)) { zero_user_segment(page, 0, PAGE_SIZE); SetPageUptodate(page); } return 0; } int attr_data_write_resident(struct ntfs_inode *ni, struct page *page) { u64 vbo; struct mft_inode *mi; struct ATTRIB *attr; u32 data_size; attr = ni_find_attr(ni, NULL, NULL, ATTR_DATA, NULL, 0, NULL, &mi); if (!attr) return -EINVAL; if (attr->non_res) { /* Return special error code to check this case. */ return E_NTFS_NONRESIDENT; } vbo = page->index << PAGE_SHIFT; data_size = le32_to_cpu(attr->res.data_size); if (vbo < data_size) { char *data = resident_data(attr); char *kaddr = kmap_atomic(page); u32 use = data_size - vbo; if (use > PAGE_SIZE) use = PAGE_SIZE; memcpy(data + vbo, kaddr, use); kunmap_atomic(kaddr); mi->dirty = true; } ni->i_valid = data_size; return 0; } /* * attr_load_runs_vcn - Load runs with VCN. */ int attr_load_runs_vcn(struct ntfs_inode *ni, enum ATTR_TYPE type, const __le16 *name, u8 name_len, struct runs_tree *run, CLST vcn) { struct ATTRIB *attr; int err; CLST svcn, evcn; u16 ro; if (!ni) { /* Is record corrupted? */ return -ENOENT; } attr = ni_find_attr(ni, NULL, NULL, type, name, name_len, &vcn, NULL); if (!attr) { /* Is record corrupted? */ return -ENOENT; } svcn = le64_to_cpu(attr->nres.svcn); evcn = le64_to_cpu(attr->nres.evcn); if (evcn < vcn || vcn < svcn) { /* Is record corrupted? */ return -EINVAL; } ro = le16_to_cpu(attr->nres.run_off); if (ro > le32_to_cpu(attr->size)) return -EINVAL; err = run_unpack_ex(run, ni->mi.sbi, ni->mi.rno, svcn, evcn, svcn, Add2Ptr(attr, ro), le32_to_cpu(attr->size) - ro); if (err < 0) return err; return 0; } /* * attr_load_runs_range - Load runs for given range [from to). */ int attr_load_runs_range(struct ntfs_inode *ni, enum ATTR_TYPE type, const __le16 *name, u8 name_len, struct runs_tree *run, u64 from, u64 to) { struct ntfs_sb_info *sbi = ni->mi.sbi; u8 cluster_bits = sbi->cluster_bits; CLST vcn; CLST vcn_last = (to - 1) >> cluster_bits; CLST lcn, clen; int err; for (vcn = from >> cluster_bits; vcn <= vcn_last; vcn += clen) { if (!run_lookup_entry(run, vcn, &lcn, &clen, NULL)) { err = attr_load_runs_vcn(ni, type, name, name_len, run, vcn); if (err) return err; clen = 0; /* Next run_lookup_entry(vcn) must be success. */ } } return 0; } #ifdef CONFIG_NTFS3_LZX_XPRESS /* * attr_wof_frame_info * * Read header of Xpress/LZX file to get info about frame. */ int attr_wof_frame_info(struct ntfs_inode *ni, struct ATTRIB *attr, struct runs_tree *run, u64 frame, u64 frames, u8 frame_bits, u32 *ondisk_size, u64 *vbo_data) { struct ntfs_sb_info *sbi = ni->mi.sbi; u64 vbo[2], off[2], wof_size; u32 voff; u8 bytes_per_off; char *addr; struct page *page; int i, err; __le32 *off32; __le64 *off64; if (ni->vfs_inode.i_size < 0x100000000ull) { /* File starts with array of 32 bit offsets. */ bytes_per_off = sizeof(__le32); vbo[1] = frame << 2; *vbo_data = frames << 2; } else { /* File starts with array of 64 bit offsets. */ bytes_per_off = sizeof(__le64); vbo[1] = frame << 3; *vbo_data = frames << 3; } /* * Read 4/8 bytes at [vbo - 4(8)] == offset where compressed frame starts. * Read 4/8 bytes at [vbo] == offset where compressed frame ends. */ if (!attr->non_res) { if (vbo[1] + bytes_per_off > le32_to_cpu(attr->res.data_size)) { ntfs_inode_err(&ni->vfs_inode, "is corrupted"); return -EINVAL; } addr = resident_data(attr); if (bytes_per_off == sizeof(__le32)) { off32 = Add2Ptr(addr, vbo[1]); off[0] = vbo[1] ? le32_to_cpu(off32[-1]) : 0; off[1] = le32_to_cpu(off32[0]); } else { off64 = Add2Ptr(addr, vbo[1]); off[0] = vbo[1] ? le64_to_cpu(off64[-1]) : 0; off[1] = le64_to_cpu(off64[0]); } *vbo_data += off[0]; *ondisk_size = off[1] - off[0]; return 0; } wof_size = le64_to_cpu(attr->nres.data_size); down_write(&ni->file.run_lock); page = ni->file.offs_page; if (!page) { page = alloc_page(GFP_KERNEL); if (!page) { err = -ENOMEM; goto out; } page->index = -1; ni->file.offs_page = page; } lock_page(page); addr = page_address(page); if (vbo[1]) { voff = vbo[1] & (PAGE_SIZE - 1); vbo[0] = vbo[1] - bytes_per_off; i = 0; } else { voff = 0; vbo[0] = 0; off[0] = 0; i = 1; } do { pgoff_t index = vbo[i] >> PAGE_SHIFT; if (index != page->index) { u64 from = vbo[i] & ~(u64)(PAGE_SIZE - 1); u64 to = min(from + PAGE_SIZE, wof_size); err = attr_load_runs_range(ni, ATTR_DATA, WOF_NAME, ARRAY_SIZE(WOF_NAME), run, from, to); if (err) goto out1; err = ntfs_bio_pages(sbi, run, &page, 1, from, to - from, REQ_OP_READ); if (err) { page->index = -1; goto out1; } page->index = index; } if (i) { if (bytes_per_off == sizeof(__le32)) { off32 = Add2Ptr(addr, voff); off[1] = le32_to_cpu(*off32); } else { off64 = Add2Ptr(addr, voff); off[1] = le64_to_cpu(*off64); } } else if (!voff) { if (bytes_per_off == sizeof(__le32)) { off32 = Add2Ptr(addr, PAGE_SIZE - sizeof(u32)); off[0] = le32_to_cpu(*off32); } else { off64 = Add2Ptr(addr, PAGE_SIZE - sizeof(u64)); off[0] = le64_to_cpu(*off64); } } else { /* Two values in one page. */ if (bytes_per_off == sizeof(__le32)) { off32 = Add2Ptr(addr, voff); off[0] = le32_to_cpu(off32[-1]); off[1] = le32_to_cpu(off32[0]); } else { off64 = Add2Ptr(addr, voff); off[0] = le64_to_cpu(off64[-1]); off[1] = le64_to_cpu(off64[0]); } break; } } while (++i < 2); *vbo_data += off[0]; *ondisk_size = off[1] - off[0]; out1: unlock_page(page); out: up_write(&ni->file.run_lock); return err; } #endif /* * attr_is_frame_compressed - Used to detect compressed frame. */ int attr_is_frame_compressed(struct ntfs_inode *ni, struct ATTRIB *attr, CLST frame, CLST *clst_data) { int err; u32 clst_frame; CLST clen, lcn, vcn, alen, slen, vcn_next; size_t idx; struct runs_tree *run; *clst_data = 0; if (!is_attr_compressed(attr)) return 0; if (!attr->non_res) return 0; clst_frame = 1u << attr->nres.c_unit; vcn = frame * clst_frame; run = &ni->file.run; if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx)) { err = attr_load_runs_vcn(ni, attr->type, attr_name(attr), attr->name_len, run, vcn); if (err) return err; if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx)) return -EINVAL; } if (lcn == SPARSE_LCN) { /* Sparsed frame. */ return 0; } if (clen >= clst_frame) { /* * The frame is not compressed 'cause * it does not contain any sparse clusters. */ *clst_data = clst_frame; return 0; } alen = bytes_to_cluster(ni->mi.sbi, le64_to_cpu(attr->nres.alloc_size)); slen = 0; *clst_data = clen; /* * The frame is compressed if *clst_data + slen >= clst_frame. * Check next fragments. */ while ((vcn += clen) < alen) { vcn_next = vcn; if (!run_get_entry(run, ++idx, &vcn, &lcn, &clen) || vcn_next != vcn) { err = attr_load_runs_vcn(ni, attr->type, attr_name(attr), attr->name_len, run, vcn_next); if (err) return err; vcn = vcn_next; if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx)) return -EINVAL; } if (lcn == SPARSE_LCN) { slen += clen; } else { if (slen) { /* * Data_clusters + sparse_clusters = * not enough for frame. */ return -EINVAL; } *clst_data += clen; } if (*clst_data + slen >= clst_frame) { if (!slen) { /* * There is no sparsed clusters in this frame * so it is not compressed. */ *clst_data = clst_frame; } else { /* Frame is compressed. */ } break; } } return 0; } /* * attr_allocate_frame - Allocate/free clusters for @frame. * * Assumed: down_write(&ni->file.run_lock); */ int attr_allocate_frame(struct ntfs_inode *ni, CLST frame, size_t compr_size, u64 new_valid) { int err = 0; struct runs_tree *run = &ni->file.run; struct ntfs_sb_info *sbi = ni->mi.sbi; struct ATTRIB *attr = NULL, *attr_b; struct ATTR_LIST_ENTRY *le, *le_b; struct mft_inode *mi, *mi_b; CLST svcn, evcn1, next_svcn, len; CLST vcn, end, clst_data; u64 total_size, valid_size, data_size; le_b = NULL; attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL, &mi_b); if (!attr_b) return -ENOENT; if (!is_attr_ext(attr_b)) return -EINVAL; vcn = frame << NTFS_LZNT_CUNIT; total_size = le64_to_cpu(attr_b->nres.total_size); svcn = le64_to_cpu(attr_b->nres.svcn); evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1; data_size = le64_to_cpu(attr_b->nres.data_size); if (svcn <= vcn && vcn < evcn1) { attr = attr_b; le = le_b; mi = mi_b; } else if (!le_b) { err = -EINVAL; goto out; } else { le = le_b; attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn, &mi); if (!attr) { err = -EINVAL; goto out; } svcn = le64_to_cpu(attr->nres.svcn); evcn1 = le64_to_cpu(attr->nres.evcn) + 1; } err = attr_load_runs(attr, ni, run, NULL); if (err) goto out; err = attr_is_frame_compressed(ni, attr_b, frame, &clst_data); if (err) goto out; total_size -= (u64)clst_data << sbi->cluster_bits; len = bytes_to_cluster(sbi, compr_size); if (len == clst_data) goto out; if (len < clst_data) { err = run_deallocate_ex(sbi, run, vcn + len, clst_data - len, NULL, true); if (err) goto out; if (!run_add_entry(run, vcn + len, SPARSE_LCN, clst_data - len, false)) { err = -ENOMEM; goto out; } end = vcn + clst_data; /* Run contains updated range [vcn + len : end). */ } else { CLST alen, hint = 0; /* Get the last LCN to allocate from. */ if (vcn + clst_data && !run_lookup_entry(run, vcn + clst_data - 1, &hint, NULL, NULL)) { hint = -1; } err = attr_allocate_clusters(sbi, run, vcn + clst_data, hint + 1, len - clst_data, NULL, ALLOCATE_DEF, &alen, 0, NULL, NULL); if (err) goto out; end = vcn + len; /* Run contains updated range [vcn + clst_data : end). */ } total_size += (u64)len << sbi->cluster_bits; repack: err = mi_pack_runs(mi, attr, run, max(end, evcn1) - svcn); if (err) goto out; attr_b->nres.total_size = cpu_to_le64(total_size); inode_set_bytes(&ni->vfs_inode, total_size); mi_b->dirty = true; mark_inode_dirty(&ni->vfs_inode); /* Stored [vcn : next_svcn) from [vcn : end). */ next_svcn = le64_to_cpu(attr->nres.evcn) + 1; if (end <= evcn1) { if (next_svcn == evcn1) { /* Normal way. Update attribute and exit. */ goto ok; } /* Add new segment [next_svcn : evcn1 - next_svcn). */ if (!ni->attr_list.size) { err = ni_create_attr_list(ni); if (err) goto out; /* Layout of records is changed. */ le_b = NULL; attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL, &mi_b); if (!attr_b) { err = -ENOENT; goto out; } attr = attr_b; le = le_b; mi = mi_b; goto repack; } } svcn = evcn1; /* Estimate next attribute. */ attr = ni_find_attr(ni, attr, &le, ATTR_DATA, NULL, 0, &svcn, &mi); if (attr) { CLST alloc = bytes_to_cluster( sbi, le64_to_cpu(attr_b->nres.alloc_size)); CLST evcn = le64_to_cpu(attr->nres.evcn); if (end < next_svcn) end = next_svcn; while (end > evcn) { /* Remove segment [svcn : evcn). */ mi_remove_attr(NULL, mi, attr); if (!al_remove_le(ni, le)) { err = -EINVAL; goto out; } if (evcn + 1 >= alloc) { /* Last attribute segment. */ evcn1 = evcn + 1; goto ins_ext; } if (ni_load_mi(ni, le, &mi)) { attr = NULL; goto out; } attr = mi_find_attr(mi, NULL, ATTR_DATA, NULL, 0, &le->id); if (!attr) { err = -EINVAL; goto out; } svcn = le64_to_cpu(attr->nres.svcn); evcn = le64_to_cpu(attr->nres.evcn); } if (end < svcn) end = svcn; err = attr_load_runs(attr, ni, run, &end); if (err) goto out; evcn1 = evcn + 1; attr->nres.svcn = cpu_to_le64(next_svcn); err = mi_pack_runs(mi, attr, run, evcn1 - next_svcn); if (err) goto out; le->vcn = cpu_to_le64(next_svcn); ni->attr_list.dirty = true; mi->dirty = true; next_svcn = le64_to_cpu(attr->nres.evcn) + 1; } ins_ext: if (evcn1 > next_svcn) { err = ni_insert_nonresident(ni, ATTR_DATA, NULL, 0, run, next_svcn, evcn1 - next_svcn, attr_b->flags, &attr, &mi, NULL); if (err) goto out; } ok: run_truncate_around(run, vcn); out: if (attr_b) { if (new_valid > data_size) new_valid = data_size; valid_size = le64_to_cpu(attr_b->nres.valid_size); if (new_valid != valid_size) { attr_b->nres.valid_size = cpu_to_le64(valid_size); mi_b->dirty = true; } } return err; } /* * attr_collapse_range - Collapse range in file. */ int attr_collapse_range(struct ntfs_inode *ni, u64 vbo, u64 bytes) { int err = 0; struct runs_tree *run = &ni->file.run; struct ntfs_sb_info *sbi = ni->mi.sbi; struct ATTRIB *attr = NULL, *attr_b; struct ATTR_LIST_ENTRY *le, *le_b; struct mft_inode *mi, *mi_b; CLST svcn, evcn1, len, dealloc, alen; CLST vcn, end; u64 valid_size, data_size, alloc_size, total_size; u32 mask; __le16 a_flags; if (!bytes) return 0; le_b = NULL; attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL, &mi_b); if (!attr_b) return -ENOENT; if (!attr_b->non_res) { /* Attribute is resident. Nothing to do? */ return 0; } data_size = le64_to_cpu(attr_b->nres.data_size); alloc_size = le64_to_cpu(attr_b->nres.alloc_size); a_flags = attr_b->flags; if (is_attr_ext(attr_b)) { total_size = le64_to_cpu(attr_b->nres.total_size); mask = (sbi->cluster_size << attr_b->nres.c_unit) - 1; } else { total_size = alloc_size; mask = sbi->cluster_mask; } if ((vbo & mask) || (bytes & mask)) { /* Allow to collapse only cluster aligned ranges. */ return -EINVAL; } if (vbo > data_size) return -EINVAL; down_write(&ni->file.run_lock); if (vbo + bytes >= data_size) { u64 new_valid = min(ni->i_valid, vbo); /* Simple truncate file at 'vbo'. */ truncate_setsize(&ni->vfs_inode, vbo); err = attr_set_size(ni, ATTR_DATA, NULL, 0, &ni->file.run, vbo, &new_valid, true, NULL); if (!err && new_valid < ni->i_valid) ni->i_valid = new_valid; goto out; } /* * Enumerate all attribute segments and collapse. */ alen = alloc_size >> sbi->cluster_bits; vcn = vbo >> sbi->cluster_bits; len = bytes >> sbi->cluster_bits; end = vcn + len; dealloc = 0; svcn = le64_to_cpu(attr_b->nres.svcn); evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1; if (svcn <= vcn && vcn < evcn1) { attr = attr_b; le = le_b; mi = mi_b; } else if (!le_b) { err = -EINVAL; goto out; } else { le = le_b; attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn, &mi); if (!attr) { err = -EINVAL; goto out; } svcn = le64_to_cpu(attr->nres.svcn); evcn1 = le64_to_cpu(attr->nres.evcn) + 1; } for (;;) { if (svcn >= end) { /* Shift VCN- */ attr->nres.svcn = cpu_to_le64(svcn - len); attr->nres.evcn = cpu_to_le64(evcn1 - 1 - len); if (le) { le->vcn = attr->nres.svcn; ni->attr_list.dirty = true; } mi->dirty = true; } else if (svcn < vcn || end < evcn1) { CLST vcn1, eat, next_svcn; /* Collapse a part of this attribute segment. */ err = attr_load_runs(attr, ni, run, &svcn); if (err) goto out; vcn1 = max(vcn, svcn); eat = min(end, evcn1) - vcn1; err = run_deallocate_ex(sbi, run, vcn1, eat, &dealloc, true); if (err) goto out; if (!run_collapse_range(run, vcn1, eat)) { err = -ENOMEM; goto out; } if (svcn >= vcn) { /* Shift VCN */ attr->nres.svcn = cpu_to_le64(vcn); if (le) { le->vcn = attr->nres.svcn; ni->attr_list.dirty = true; } } err = mi_pack_runs(mi, attr, run, evcn1 - svcn - eat); if (err) goto out; next_svcn = le64_to_cpu(attr->nres.evcn) + 1; if (next_svcn + eat < evcn1) { err = ni_insert_nonresident( ni, ATTR_DATA, NULL, 0, run, next_svcn, evcn1 - eat - next_svcn, a_flags, &attr, &mi, &le); if (err) goto out; /* Layout of records maybe changed. */ attr_b = NULL; } /* Free all allocated memory. */ run_truncate(run, 0); } else { u16 le_sz; u16 roff = le16_to_cpu(attr->nres.run_off); if (roff > le32_to_cpu(attr->size)) { err = -EINVAL; goto out; } run_unpack_ex(RUN_DEALLOCATE, sbi, ni->mi.rno, svcn, evcn1 - 1, svcn, Add2Ptr(attr, roff), le32_to_cpu(attr->size) - roff); /* Delete this attribute segment. */ mi_remove_attr(NULL, mi, attr); if (!le) break; le_sz = le16_to_cpu(le->size); if (!al_remove_le(ni, le)) { err = -EINVAL; goto out; } if (evcn1 >= alen) break; if (!svcn) { /* Load next record that contains this attribute. */ if (ni_load_mi(ni, le, &mi)) { err = -EINVAL; goto out; } /* Look for required attribute. */ attr = mi_find_attr(mi, NULL, ATTR_DATA, NULL, 0, &le->id); if (!attr) { err = -EINVAL; goto out; } goto next_attr; } le = (struct ATTR_LIST_ENTRY *)((u8 *)le - le_sz); } if (evcn1 >= alen) break; attr = ni_enum_attr_ex(ni, attr, &le, &mi); if (!attr) { err = -EINVAL; goto out; } next_attr: svcn = le64_to_cpu(attr->nres.svcn); evcn1 = le64_to_cpu(attr->nres.evcn) + 1; } if (!attr_b) { le_b = NULL; attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL, &mi_b); if (!attr_b) { err = -ENOENT; goto out; } } data_size -= bytes; valid_size = ni->i_valid; if (vbo + bytes <= valid_size) valid_size -= bytes; else if (vbo < valid_size) valid_size = vbo; attr_b->nres.alloc_size = cpu_to_le64(alloc_size - bytes); attr_b->nres.data_size = cpu_to_le64(data_size); attr_b->nres.valid_size = cpu_to_le64(min(valid_size, data_size)); total_size -= (u64)dealloc << sbi->cluster_bits; if (is_attr_ext(attr_b)) attr_b->nres.total_size = cpu_to_le64(total_size); mi_b->dirty = true; /* Update inode size. */ ni->i_valid = valid_size; i_size_write(&ni->vfs_inode, data_size); inode_set_bytes(&ni->vfs_inode, total_size); ni->ni_flags |= NI_FLAG_UPDATE_PARENT; mark_inode_dirty(&ni->vfs_inode); out: up_write(&ni->file.run_lock); if (err) _ntfs_bad_inode(&ni->vfs_inode); return err; } /* * attr_punch_hole * * Not for normal files. */ int attr_punch_hole(struct ntfs_inode *ni, u64 vbo, u64 bytes, u32 *frame_size) { int err = 0; struct runs_tree *run = &ni->file.run; struct ntfs_sb_info *sbi = ni->mi.sbi; struct ATTRIB *attr = NULL, *attr_b; struct ATTR_LIST_ENTRY *le, *le_b; struct mft_inode *mi, *mi_b; CLST svcn, evcn1, vcn, len, end, alen, hole, next_svcn; u64 total_size, alloc_size; u32 mask; __le16 a_flags; struct runs_tree run2; if (!bytes) return 0; le_b = NULL; attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL, &mi_b); if (!attr_b) return -ENOENT; if (!attr_b->non_res) { u32 data_size = le32_to_cpu(attr_b->res.data_size); u32 from, to; if (vbo > data_size) return 0; from = vbo; to = min_t(u64, vbo + bytes, data_size); memset(Add2Ptr(resident_data(attr_b), from), 0, to - from); return 0; } if (!is_attr_ext(attr_b)) return -EOPNOTSUPP; alloc_size = le64_to_cpu(attr_b->nres.alloc_size); total_size = le64_to_cpu(attr_b->nres.total_size); if (vbo >= alloc_size) { /* NOTE: It is allowed. */ return 0; } mask = (sbi->cluster_size << attr_b->nres.c_unit) - 1; bytes += vbo; if (bytes > alloc_size) bytes = alloc_size; bytes -= vbo; if ((vbo & mask) || (bytes & mask)) { /* We have to zero a range(s). */ if (frame_size == NULL) { /* Caller insists range is aligned. */ return -EINVAL; } *frame_size = mask + 1; return E_NTFS_NOTALIGNED; } down_write(&ni->file.run_lock); run_init(&run2); run_truncate(run, 0); /* * Enumerate all attribute segments and punch hole where necessary. */ alen = alloc_size >> sbi->cluster_bits; vcn = vbo >> sbi->cluster_bits; len = bytes >> sbi->cluster_bits; end = vcn + len; hole = 0; svcn = le64_to_cpu(attr_b->nres.svcn); evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1; a_flags = attr_b->flags; if (svcn <= vcn && vcn < evcn1) { attr = attr_b; le = le_b; mi = mi_b; } else if (!le_b) { err = -EINVAL; goto bad_inode; } else { le = le_b; attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn, &mi); if (!attr) { err = -EINVAL; goto bad_inode; } svcn = le64_to_cpu(attr->nres.svcn); evcn1 = le64_to_cpu(attr->nres.evcn) + 1; } while (svcn < end) { CLST vcn1, zero, hole2 = hole; err = attr_load_runs(attr, ni, run, &svcn); if (err) goto done; vcn1 = max(vcn, svcn); zero = min(end, evcn1) - vcn1; /* * Check range [vcn1 + zero). * Calculate how many clusters there are. * Don't do any destructive actions. */ err = run_deallocate_ex(NULL, run, vcn1, zero, &hole2, false); if (err) goto done; /* Check if required range is already hole. */ if (hole2 == hole) goto next_attr; /* Make a clone of run to undo. */ err = run_clone(run, &run2); if (err) goto done; /* Make a hole range (sparse) [vcn1 + zero). */ if (!run_add_entry(run, vcn1, SPARSE_LCN, zero, false)) { err = -ENOMEM; goto done; } /* Update run in attribute segment. */ err = mi_pack_runs(mi, attr, run, evcn1 - svcn); if (err) goto done; next_svcn = le64_to_cpu(attr->nres.evcn) + 1; if (next_svcn < evcn1) { /* Insert new attribute segment. */ err = ni_insert_nonresident(ni, ATTR_DATA, NULL, 0, run, next_svcn, evcn1 - next_svcn, a_flags, &attr, &mi, &le); if (err) goto undo_punch; /* Layout of records maybe changed. */ attr_b = NULL; } /* Real deallocate. Should not fail. */ run_deallocate_ex(sbi, &run2, vcn1, zero, &hole, true); next_attr: /* Free all allocated memory. */ run_truncate(run, 0); if (evcn1 >= alen) break; /* Get next attribute segment. */ attr = ni_enum_attr_ex(ni, attr, &le, &mi); if (!attr) { err = -EINVAL; goto bad_inode; } svcn = le64_to_cpu(attr->nres.svcn); evcn1 = le64_to_cpu(attr->nres.evcn) + 1; } done: if (!hole) goto out; if (!attr_b) { attr_b = ni_find_attr(ni, NULL, NULL, ATTR_DATA, NULL, 0, NULL, &mi_b); if (!attr_b) { err = -EINVAL; goto bad_inode; } } total_size -= (u64)hole << sbi->cluster_bits; attr_b->nres.total_size = cpu_to_le64(total_size); mi_b->dirty = true; /* Update inode size. */ inode_set_bytes(&ni->vfs_inode, total_size); ni->ni_flags |= NI_FLAG_UPDATE_PARENT; mark_inode_dirty(&ni->vfs_inode); out: run_close(&run2); up_write(&ni->file.run_lock); return err; bad_inode: _ntfs_bad_inode(&ni->vfs_inode); goto out; undo_punch: /* * Restore packed runs. * 'mi_pack_runs' should not fail, cause we restore original. */ if (mi_pack_runs(mi, attr, &run2, evcn1 - svcn)) goto bad_inode; goto done; } /* * attr_insert_range - Insert range (hole) in file. * Not for normal files. */ int attr_insert_range(struct ntfs_inode *ni, u64 vbo, u64 bytes) { int err = 0; struct runs_tree *run = &ni->file.run; struct ntfs_sb_info *sbi = ni->mi.sbi; struct ATTRIB *attr = NULL, *attr_b; struct ATTR_LIST_ENTRY *le, *le_b; struct mft_inode *mi, *mi_b; CLST vcn, svcn, evcn1, len, next_svcn; u64 data_size, alloc_size; u32 mask; __le16 a_flags; if (!bytes) return 0; le_b = NULL; attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL, &mi_b); if (!attr_b) return -ENOENT; if (!is_attr_ext(attr_b)) { /* It was checked above. See fallocate. */ return -EOPNOTSUPP; } if (!attr_b->non_res) { data_size = le32_to_cpu(attr_b->res.data_size); alloc_size = data_size; mask = sbi->cluster_mask; /* cluster_size - 1 */ } else { data_size = le64_to_cpu(attr_b->nres.data_size); alloc_size = le64_to_cpu(attr_b->nres.alloc_size); mask = (sbi->cluster_size << attr_b->nres.c_unit) - 1; } if (vbo > data_size) { /* Insert range after the file size is not allowed. */ return -EINVAL; } if ((vbo & mask) || (bytes & mask)) { /* Allow to insert only frame aligned ranges. */ return -EINVAL; } /* * valid_size <= data_size <= alloc_size * Check alloc_size for maximum possible. */ if (bytes > sbi->maxbytes_sparse - alloc_size) return -EFBIG; vcn = vbo >> sbi->cluster_bits; len = bytes >> sbi->cluster_bits; down_write(&ni->file.run_lock); if (!attr_b->non_res) { err = attr_set_size(ni, ATTR_DATA, NULL, 0, run, data_size + bytes, NULL, false, NULL); le_b = NULL; attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL, &mi_b); if (!attr_b) { err = -EINVAL; goto bad_inode; } if (err) goto out; if (!attr_b->non_res) { /* Still resident. */ char *data = Add2Ptr(attr_b, le16_to_cpu(attr_b->res.data_off)); memmove(data + bytes, data, bytes); memset(data, 0, bytes); goto done; } /* Resident files becomes nonresident. */ data_size = le64_to_cpu(attr_b->nres.data_size); alloc_size = le64_to_cpu(attr_b->nres.alloc_size); } /* * Enumerate all attribute segments and shift start vcn. */ a_flags = attr_b->flags; svcn = le64_to_cpu(attr_b->nres.svcn); evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1; if (svcn <= vcn && vcn < evcn1) { attr = attr_b; le = le_b; mi = mi_b; } else if (!le_b) { err = -EINVAL; goto bad_inode; } else { le = le_b; attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn, &mi); if (!attr) { err = -EINVAL; goto bad_inode; } svcn = le64_to_cpu(attr->nres.svcn); evcn1 = le64_to_cpu(attr->nres.evcn) + 1; } run_truncate(run, 0); /* clear cached values. */ err = attr_load_runs(attr, ni, run, NULL); if (err) goto out; if (!run_insert_range(run, vcn, len)) { err = -ENOMEM; goto out; } /* Try to pack in current record as much as possible. */ err = mi_pack_runs(mi, attr, run, evcn1 + len - svcn); if (err) goto out; next_svcn = le64_to_cpu(attr->nres.evcn) + 1; while ((attr = ni_enum_attr_ex(ni, attr, &le, &mi)) && attr->type == ATTR_DATA && !attr->name_len) { le64_add_cpu(&attr->nres.svcn, len); le64_add_cpu(&attr->nres.evcn, len); if (le) { le->vcn = attr->nres.svcn; ni->attr_list.dirty = true; } mi->dirty = true; } if (next_svcn < evcn1 + len) { err = ni_insert_nonresident(ni, ATTR_DATA, NULL, 0, run, next_svcn, evcn1 + len - next_svcn, a_flags, NULL, NULL, NULL); le_b = NULL; attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL, &mi_b); if (!attr_b) { err = -EINVAL; goto bad_inode; } if (err) { /* ni_insert_nonresident failed. Try to undo. */ goto undo_insert_range; } } /* * Update primary attribute segment. */ if (vbo <= ni->i_valid) ni->i_valid += bytes; attr_b->nres.data_size = cpu_to_le64(data_size + bytes); attr_b->nres.alloc_size = cpu_to_le64(alloc_size + bytes); /* ni->valid may be not equal valid_size (temporary). */ if (ni->i_valid > data_size + bytes) attr_b->nres.valid_size = attr_b->nres.data_size; else attr_b->nres.valid_size = cpu_to_le64(ni->i_valid); mi_b->dirty = true; done: i_size_write(&ni->vfs_inode, ni->vfs_inode.i_size + bytes); ni->ni_flags |= NI_FLAG_UPDATE_PARENT; mark_inode_dirty(&ni->vfs_inode); out: run_truncate(run, 0); /* clear cached values. */ up_write(&ni->file.run_lock); return err; bad_inode: _ntfs_bad_inode(&ni->vfs_inode); goto out; undo_insert_range: svcn = le64_to_cpu(attr_b->nres.svcn); evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1; if (svcn <= vcn && vcn < evcn1) { attr = attr_b; le = le_b; mi = mi_b; } else if (!le_b) { goto bad_inode; } else { le = le_b; attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn, &mi); if (!attr) { goto bad_inode; } svcn = le64_to_cpu(attr->nres.svcn); evcn1 = le64_to_cpu(attr->nres.evcn) + 1; } if (attr_load_runs(attr, ni, run, NULL)) goto bad_inode; if (!run_collapse_range(run, vcn, len)) goto bad_inode; if (mi_pack_runs(mi, attr, run, evcn1 + len - svcn)) goto bad_inode; while ((attr = ni_enum_attr_ex(ni, attr, &le, &mi)) && attr->type == ATTR_DATA && !attr->name_len) { le64_sub_cpu(&attr->nres.svcn, len); le64_sub_cpu(&attr->nres.evcn, len); if (le) { le->vcn = attr->nres.svcn; ni->attr_list.dirty = true; } mi->dirty = true; } goto out; } |
14 1 7 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 | /* SPDX-License-Identifier: GPL-2.0-only */ /* * File: af_phonet.h * * Phonet sockets kernel definitions * * Copyright (C) 2008 Nokia Corporation. */ #ifndef AF_PHONET_H #define AF_PHONET_H #include <linux/phonet.h> #include <linux/skbuff.h> #include <net/sock.h> /* * The lower layers may not require more space, ever. Make sure it's * enough. */ #define MAX_PHONET_HEADER (8 + MAX_HEADER) /* * Every Phonet* socket has this structure first in its * protocol-specific structure under name c. */ struct pn_sock { struct sock sk; u16 sobject; u16 dobject; u8 resource; }; static inline struct pn_sock *pn_sk(struct sock *sk) { return (struct pn_sock *)sk; } extern const struct proto_ops phonet_dgram_ops; void pn_sock_init(void); struct sock *pn_find_sock_by_sa(struct net *net, const struct sockaddr_pn *sa); void pn_deliver_sock_broadcast(struct net *net, struct sk_buff *skb); void phonet_get_local_port_range(int *min, int *max); int pn_sock_hash(struct sock *sk); void pn_sock_unhash(struct sock *sk); int pn_sock_get_port(struct sock *sk, unsigned short sport); struct sock *pn_find_sock_by_res(struct net *net, u8 res); int pn_sock_bind_res(struct sock *sock, u8 res); int pn_sock_unbind_res(struct sock *sk, u8 res); void pn_sock_unbind_all_res(struct sock *sk); int pn_skb_send(struct sock *sk, struct sk_buff *skb, const struct sockaddr_pn *target); static inline struct phonethdr *pn_hdr(struct sk_buff *skb) { return (struct phonethdr *)skb_network_header(skb); } static inline struct phonetmsg *pn_msg(struct sk_buff *skb) { return (struct phonetmsg *)skb_transport_header(skb); } /* * Get the other party's sockaddr from received skb. The skb begins * with a Phonet header. */ static inline void pn_skb_get_src_sockaddr(struct sk_buff *skb, struct sockaddr_pn *sa) { struct phonethdr *ph = pn_hdr(skb); u16 obj = pn_object(ph->pn_sdev, ph->pn_sobj); sa->spn_family = AF_PHONET; pn_sockaddr_set_object(sa, obj); pn_sockaddr_set_resource(sa, ph->pn_res); memset(sa->spn_zero, 0, sizeof(sa->spn_zero)); } static inline void pn_skb_get_dst_sockaddr(struct sk_buff *skb, struct sockaddr_pn *sa) { struct phonethdr *ph = pn_hdr(skb); u16 obj = pn_object(ph->pn_rdev, ph->pn_robj); sa->spn_family = AF_PHONET; pn_sockaddr_set_object(sa, obj); pn_sockaddr_set_resource(sa, ph->pn_res); memset(sa->spn_zero, 0, sizeof(sa->spn_zero)); } /* Protocols in Phonet protocol family. */ struct phonet_protocol { const struct proto_ops *ops; struct proto *prot; int sock_type; }; int phonet_proto_register(unsigned int protocol, const struct phonet_protocol *pp); void phonet_proto_unregister(unsigned int protocol, const struct phonet_protocol *pp); int phonet_sysctl_init(void); void phonet_sysctl_exit(void); int isi_register(void); void isi_unregister(void); static inline bool sk_is_phonet(struct sock *sk) { return sk->sk_family == PF_PHONET; } static inline int phonet_sk_ioctl(struct sock *sk, unsigned int cmd, void __user *arg) { int karg; switch (cmd) { case SIOCPNADDRESOURCE: case SIOCPNDELRESOURCE: if (get_user(karg, (int __user *)arg)) return -EFAULT; return sk->sk_prot->ioctl(sk, cmd, &karg); } /* A positive return value means that the ioctl was not processed */ return 1; } #endif |
149 150 2 188 1 203 53 37 174 167 47 1 164 50 170 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 | /* SPDX-License-Identifier: GPL-2.0 */ #ifndef ASM_KVM_CACHE_REGS_H #define ASM_KVM_CACHE_REGS_H #include <linux/kvm_host.h> #define KVM_POSSIBLE_CR0_GUEST_BITS (X86_CR0_TS | X86_CR0_WP) #define KVM_POSSIBLE_CR4_GUEST_BITS \ (X86_CR4_PVI | X86_CR4_DE | X86_CR4_PCE | X86_CR4_OSFXSR \ | X86_CR4_OSXMMEXCPT | X86_CR4_PGE | X86_CR4_TSD | X86_CR4_FSGSBASE) #define X86_CR0_PDPTR_BITS (X86_CR0_CD | X86_CR0_NW | X86_CR0_PG) #define X86_CR4_TLBFLUSH_BITS (X86_CR4_PGE | X86_CR4_PCIDE | X86_CR4_PAE | X86_CR4_SMEP) #define X86_CR4_PDPTR_BITS (X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_SMEP) static_assert(!(KVM_POSSIBLE_CR0_GUEST_BITS & X86_CR0_PDPTR_BITS)); #define BUILD_KVM_GPR_ACCESSORS(lname, uname) \ static __always_inline unsigned long kvm_##lname##_read(struct kvm_vcpu *vcpu)\ { \ return vcpu->arch.regs[VCPU_REGS_##uname]; \ } \ static __always_inline void kvm_##lname##_write(struct kvm_vcpu *vcpu, \ unsigned long val) \ { \ vcpu->arch.regs[VCPU_REGS_##uname] = val; \ } BUILD_KVM_GPR_ACCESSORS(rax, RAX) BUILD_KVM_GPR_ACCESSORS(rbx, RBX) BUILD_KVM_GPR_ACCESSORS(rcx, RCX) BUILD_KVM_GPR_ACCESSORS(rdx, RDX) BUILD_KVM_GPR_ACCESSORS(rbp, RBP) BUILD_KVM_GPR_ACCESSORS(rsi, RSI) BUILD_KVM_GPR_ACCESSORS(rdi, RDI) #ifdef CONFIG_X86_64 BUILD_KVM_GPR_ACCESSORS(r8, R8) BUILD_KVM_GPR_ACCESSORS(r9, R9) BUILD_KVM_GPR_ACCESSORS(r10, R10) BUILD_KVM_GPR_ACCESSORS(r11, R11) BUILD_KVM_GPR_ACCESSORS(r12, R12) BUILD_KVM_GPR_ACCESSORS(r13, R13) BUILD_KVM_GPR_ACCESSORS(r14, R14) BUILD_KVM_GPR_ACCESSORS(r15, R15) #endif /* * avail dirty * 0 0 register in VMCS/VMCB * 0 1 *INVALID* * 1 0 register in vcpu->arch * 1 1 register in vcpu->arch, needs to be stored back */ static inline bool kvm_register_is_available(struct kvm_vcpu *vcpu, enum kvm_reg reg) { return test_bit(reg, (unsigned long *)&vcpu->arch.regs_avail); } static inline bool kvm_register_is_dirty(struct kvm_vcpu *vcpu, enum kvm_reg reg) { return test_bit(reg, (unsigned long *)&vcpu->arch.regs_dirty); } static inline void kvm_register_mark_available(struct kvm_vcpu *vcpu, enum kvm_reg reg) { __set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail); } static inline void kvm_register_mark_dirty(struct kvm_vcpu *vcpu, enum kvm_reg reg) { __set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail); __set_bit(reg, (unsigned long *)&vcpu->arch.regs_dirty); } /* * kvm_register_test_and_mark_available() is a special snowflake that uses an * arch bitop directly to avoid the explicit instrumentation that comes with * the generic bitops. This allows code that cannot be instrumented (noinstr * functions), e.g. the low level VM-Enter/VM-Exit paths, to cache registers. */ static __always_inline bool kvm_register_test_and_mark_available(struct kvm_vcpu *vcpu, enum kvm_reg reg) { return arch___test_and_set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail); } /* * The "raw" register helpers are only for cases where the full 64 bits of a * register are read/written irrespective of current vCPU mode. In other words, * odds are good you shouldn't be using the raw variants. */ static inline unsigned long kvm_register_read_raw(struct kvm_vcpu *vcpu, int reg) { if (WARN_ON_ONCE((unsigned int)reg >= NR_VCPU_REGS)) return 0; if (!kvm_register_is_available(vcpu, reg)) static_call(kvm_x86_cache_reg)(vcpu, reg); return vcpu->arch.regs[reg]; } static inline void kvm_register_write_raw(struct kvm_vcpu *vcpu, int reg, unsigned long val) { if (WARN_ON_ONCE((unsigned int)reg >= NR_VCPU_REGS)) return; vcpu->arch.regs[reg] = val; kvm_register_mark_dirty(vcpu, reg); } static inline unsigned long kvm_rip_read(struct kvm_vcpu *vcpu) { return kvm_register_read_raw(vcpu, VCPU_REGS_RIP); } static inline void kvm_rip_write(struct kvm_vcpu *vcpu, unsigned long val) { kvm_register_write_raw(vcpu, VCPU_REGS_RIP, val); } static inline unsigned long kvm_rsp_read(struct kvm_vcpu *vcpu) { return kvm_register_read_raw(vcpu, VCPU_REGS_RSP); } static inline void kvm_rsp_write(struct kvm_vcpu *vcpu, unsigned long val) { kvm_register_write_raw(vcpu, VCPU_REGS_RSP, val); } static inline u64 kvm_pdptr_read(struct kvm_vcpu *vcpu, int index) { might_sleep(); /* on svm */ if (!kvm_register_is_available(vcpu, VCPU_EXREG_PDPTR)) static_call(kvm_x86_cache_reg)(vcpu, VCPU_EXREG_PDPTR); return vcpu->arch.walk_mmu->pdptrs[index]; } static inline void kvm_pdptr_write(struct kvm_vcpu *vcpu, int index, u64 value) { vcpu->arch.walk_mmu->pdptrs[index] = value; } static inline ulong kvm_read_cr0_bits(struct kvm_vcpu *vcpu, ulong mask) { ulong tmask = mask & KVM_POSSIBLE_CR0_GUEST_BITS; if ((tmask & vcpu->arch.cr0_guest_owned_bits) && !kvm_register_is_available(vcpu, VCPU_EXREG_CR0)) static_call(kvm_x86_cache_reg)(vcpu, VCPU_EXREG_CR0); return vcpu->arch.cr0 & mask; } static __always_inline bool kvm_is_cr0_bit_set(struct kvm_vcpu *vcpu, unsigned long cr0_bit) { BUILD_BUG_ON(!is_power_of_2(cr0_bit)); return !!kvm_read_cr0_bits(vcpu, cr0_bit); } static inline ulong kvm_read_cr0(struct kvm_vcpu *vcpu) { return kvm_read_cr0_bits(vcpu, ~0UL); } static inline ulong kvm_read_cr4_bits(struct kvm_vcpu *vcpu, ulong mask) { ulong tmask = mask & KVM_POSSIBLE_CR4_GUEST_BITS; if ((tmask & vcpu->arch.cr4_guest_owned_bits) && !kvm_register_is_available(vcpu, VCPU_EXREG_CR4)) static_call(kvm_x86_cache_reg)(vcpu, VCPU_EXREG_CR4); return vcpu->arch.cr4 & mask; } static __always_inline bool kvm_is_cr4_bit_set(struct kvm_vcpu *vcpu, unsigned long cr4_bit) { BUILD_BUG_ON(!is_power_of_2(cr4_bit)); return !!kvm_read_cr4_bits(vcpu, cr4_bit); } static inline ulong kvm_read_cr3(struct kvm_vcpu *vcpu) { if (!kvm_register_is_available(vcpu, VCPU_EXREG_CR3)) static_call(kvm_x86_cache_reg)(vcpu, VCPU_EXREG_CR3); return vcpu->arch.cr3; } static inline ulong kvm_read_cr4(struct kvm_vcpu *vcpu) { return kvm_read_cr4_bits(vcpu, ~0UL); } static inline u64 kvm_read_edx_eax(struct kvm_vcpu *vcpu) { return (kvm_rax_read(vcpu) & -1u) | ((u64)(kvm_rdx_read(vcpu) & -1u) << 32); } static inline void enter_guest_mode(struct kvm_vcpu *vcpu) { vcpu->arch.hflags |= HF_GUEST_MASK; vcpu->stat.guest_mode = 1; } static inline void leave_guest_mode(struct kvm_vcpu *vcpu) { vcpu->arch.hflags &= ~HF_GUEST_MASK; if (vcpu->arch.load_eoi_exitmap_pending) { vcpu->arch.load_eoi_exitmap_pending = false; kvm_make_request(KVM_REQ_LOAD_EOI_EXITMAP, vcpu); } vcpu->stat.guest_mode = 0; } static inline bool is_guest_mode(struct kvm_vcpu *vcpu) { return vcpu->arch.hflags & HF_GUEST_MASK; } #endif |
43 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 | /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _FB_DRAW_H #define _FB_DRAW_H #include <asm/types.h> #include <linux/fb.h> #include <linux/bug.h> /* * Compose two values, using a bitmask as decision value * This is equivalent to (a & mask) | (b & ~mask) */ static inline unsigned long comp(unsigned long a, unsigned long b, unsigned long mask) { return ((a ^ b) & mask) ^ b; } /* * Create a pattern with the given pixel's color */ #if BITS_PER_LONG == 64 static inline unsigned long pixel_to_pat( u32 bpp, u32 pixel) { switch (bpp) { case 1: return 0xfffffffffffffffful*pixel; case 2: return 0x5555555555555555ul*pixel; case 4: return 0x1111111111111111ul*pixel; case 8: return 0x0101010101010101ul*pixel; case 12: return 0x1001001001001001ul*pixel; case 16: return 0x0001000100010001ul*pixel; case 24: return 0x0001000001000001ul*pixel; case 32: return 0x0000000100000001ul*pixel; default: WARN(1, "pixel_to_pat(): unsupported pixelformat %d\n", bpp); return 0; } } #else static inline unsigned long pixel_to_pat( u32 bpp, u32 pixel) { switch (bpp) { case 1: return 0xfffffffful*pixel; case 2: return 0x55555555ul*pixel; case 4: return 0x11111111ul*pixel; case 8: return 0x01010101ul*pixel; case 12: return 0x01001001ul*pixel; case 16: return 0x00010001ul*pixel; case 24: return 0x01000001ul*pixel; case 32: return 0x00000001ul*pixel; default: WARN(1, "pixel_to_pat(): unsupported pixelformat %d\n", bpp); return 0; } } #endif #ifdef CONFIG_FB_CFB_REV_PIXELS_IN_BYTE #if BITS_PER_LONG == 64 #define REV_PIXELS_MASK1 0x5555555555555555ul #define REV_PIXELS_MASK2 0x3333333333333333ul #define REV_PIXELS_MASK4 0x0f0f0f0f0f0f0f0ful #else #define REV_PIXELS_MASK1 0x55555555ul #define REV_PIXELS_MASK2 0x33333333ul #define REV_PIXELS_MASK4 0x0f0f0f0ful #endif static inline unsigned long fb_rev_pixels_in_long(unsigned long val, u32 bswapmask) { if (bswapmask & 1) val = comp(val >> 1, val << 1, REV_PIXELS_MASK1); if (bswapmask & 2) val = comp(val >> 2, val << 2, REV_PIXELS_MASK2); if (bswapmask & 3) val = comp(val >> 4, val << 4, REV_PIXELS_MASK4); return val; } static inline u32 fb_shifted_pixels_mask_u32(struct fb_info *p, u32 index, u32 bswapmask) { u32 mask; if (!bswapmask) { mask = FB_SHIFT_HIGH(p, ~(u32)0, index); } else { mask = 0xff << FB_LEFT_POS(p, 8); mask = FB_SHIFT_LOW(p, mask, index & (bswapmask)) & mask; mask = FB_SHIFT_HIGH(p, mask, index & ~(bswapmask)); #if defined(__i386__) || defined(__x86_64__) /* Shift argument is limited to 0 - 31 on x86 based CPU's */ if(index + bswapmask < 32) #endif mask |= FB_SHIFT_HIGH(p, ~(u32)0, (index + bswapmask) & ~(bswapmask)); } return mask; } static inline unsigned long fb_shifted_pixels_mask_long(struct fb_info *p, u32 index, u32 bswapmask) { unsigned long mask; if (!bswapmask) { mask = FB_SHIFT_HIGH(p, ~0UL, index); } else { mask = 0xff << FB_LEFT_POS(p, 8); mask = FB_SHIFT_LOW(p, mask, index & (bswapmask)) & mask; mask = FB_SHIFT_HIGH(p, mask, index & ~(bswapmask)); #if defined(__i386__) || defined(__x86_64__) /* Shift argument is limited to 0 - 31 on x86 based CPU's */ if(index + bswapmask < BITS_PER_LONG) #endif mask |= FB_SHIFT_HIGH(p, ~0UL, (index + bswapmask) & ~(bswapmask)); } return mask; } static inline u32 fb_compute_bswapmask(struct fb_info *info) { u32 bswapmask = 0; unsigned bpp = info->var.bits_per_pixel; if ((bpp < 8) && (info->var.nonstd & FB_NONSTD_REV_PIX_IN_B)) { /* * Reversed order of pixel layout in bytes * works only for 1, 2 and 4 bpp */ bswapmask = 7 - bpp + 1; } return bswapmask; } #else /* CONFIG_FB_CFB_REV_PIXELS_IN_BYTE */ static inline unsigned long fb_rev_pixels_in_long(unsigned long val, u32 bswapmask) { return val; } #define fb_shifted_pixels_mask_u32(p, i, b) FB_SHIFT_HIGH((p), ~(u32)0, (i)) #define fb_shifted_pixels_mask_long(p, i, b) FB_SHIFT_HIGH((p), ~0UL, (i)) #define fb_compute_bswapmask(...) 0 #endif /* CONFIG_FB_CFB_REV_PIXELS_IN_BYTE */ #define cpu_to_le_long _cpu_to_le_long(BITS_PER_LONG) #define _cpu_to_le_long(x) __cpu_to_le_long(x) #define __cpu_to_le_long(x) cpu_to_le##x #define le_long_to_cpu _le_long_to_cpu(BITS_PER_LONG) #define _le_long_to_cpu(x) __le_long_to_cpu(x) #define __le_long_to_cpu(x) le##x##_to_cpu static inline unsigned long rolx(unsigned long word, unsigned int shift, unsigned int x) { return (word << shift) | (word >> (x - shift)); } #endif /* FB_DRAW_H */ |
2 8 6 3 1 11 11 14 1 5 5 3 7 3 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 | // SPDX-License-Identifier: GPL-2.0-only /* Xtables module to match packets using a BPF filter. * Copyright 2013 Google Inc. * Written by Willem de Bruijn <willemb@google.com> */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/syscalls.h> #include <linux/skbuff.h> #include <linux/filter.h> #include <linux/bpf.h> #include <linux/netfilter/xt_bpf.h> #include <linux/netfilter/x_tables.h> MODULE_AUTHOR("Willem de Bruijn <willemb@google.com>"); MODULE_DESCRIPTION("Xtables: BPF filter match"); MODULE_LICENSE("GPL"); MODULE_ALIAS("ipt_bpf"); MODULE_ALIAS("ip6t_bpf"); static int __bpf_mt_check_bytecode(struct sock_filter *insns, __u16 len, struct bpf_prog **ret) { struct sock_fprog_kern program; if (len > XT_BPF_MAX_NUM_INSTR) return -EINVAL; program.len = len; program.filter = insns; if (bpf_prog_create(ret, &program)) { pr_info_ratelimited("check failed: parse error\n"); return -EINVAL; } return 0; } static int __bpf_mt_check_fd(int fd, struct bpf_prog **ret) { struct bpf_prog *prog; prog = bpf_prog_get_type(fd, BPF_PROG_TYPE_SOCKET_FILTER); if (IS_ERR(prog)) return PTR_ERR(prog); *ret = prog; return 0; } static int __bpf_mt_check_path(const char *path, struct bpf_prog **ret) { if (strnlen(path, XT_BPF_PATH_MAX) == XT_BPF_PATH_MAX) return -EINVAL; *ret = bpf_prog_get_type_path(path, BPF_PROG_TYPE_SOCKET_FILTER); return PTR_ERR_OR_ZERO(*ret); } static int bpf_mt_check(const struct xt_mtchk_param *par) { struct xt_bpf_info *info = par->matchinfo; return __bpf_mt_check_bytecode(info->bpf_program, info->bpf_program_num_elem, &info->filter); } static int bpf_mt_check_v1(const struct xt_mtchk_param *par) { struct xt_bpf_info_v1 *info = par->matchinfo; if (info->mode == XT_BPF_MODE_BYTECODE) return __bpf_mt_check_bytecode(info->bpf_program, info->bpf_program_num_elem, &info->filter); else if (info->mode == XT_BPF_MODE_FD_ELF) return __bpf_mt_check_fd(info->fd, &info->filter); else if (info->mode == XT_BPF_MODE_PATH_PINNED) return __bpf_mt_check_path(info->path, &info->filter); else return -EINVAL; } static bool bpf_mt(const struct sk_buff *skb, struct xt_action_param *par) { const struct xt_bpf_info *info = par->matchinfo; return bpf_prog_run(info->filter, skb); } static bool bpf_mt_v1(const struct sk_buff *skb, struct xt_action_param *par) { const struct xt_bpf_info_v1 *info = par->matchinfo; return !!bpf_prog_run_save_cb(info->filter, (struct sk_buff *) skb); } static void bpf_mt_destroy(const struct xt_mtdtor_param *par) { const struct xt_bpf_info *info = par->matchinfo; bpf_prog_destroy(info->filter); } static void bpf_mt_destroy_v1(const struct xt_mtdtor_param *par) { const struct xt_bpf_info_v1 *info = par->matchinfo; bpf_prog_destroy(info->filter); } static struct xt_match bpf_mt_reg[] __read_mostly = { { .name = "bpf", .revision = 0, .family = NFPROTO_UNSPEC, .checkentry = bpf_mt_check, .match = bpf_mt, .destroy = bpf_mt_destroy, .matchsize = sizeof(struct xt_bpf_info), .usersize = offsetof(struct xt_bpf_info, filter), .me = THIS_MODULE, }, { .name = "bpf", .revision = 1, .family = NFPROTO_UNSPEC, .checkentry = bpf_mt_check_v1, .match = bpf_mt_v1, .destroy = bpf_mt_destroy_v1, .matchsize = sizeof(struct xt_bpf_info_v1), .usersize = offsetof(struct xt_bpf_info_v1, filter), .me = THIS_MODULE, }, }; static int __init bpf_mt_init(void) { return xt_register_matches(bpf_mt_reg, ARRAY_SIZE(bpf_mt_reg)); } static void __exit bpf_mt_exit(void) { xt_unregister_matches(bpf_mt_reg, ARRAY_SIZE(bpf_mt_reg)); } module_init(bpf_mt_init); module_exit(bpf_mt_exit); |
53 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 | /* SPDX-License-Identifier: GPL-2.0 */ #undef TRACE_SYSTEM #define TRACE_SYSTEM vsock #if !defined(_TRACE_VSOCK_VIRTIO_TRANSPORT_COMMON_H) || \ defined(TRACE_HEADER_MULTI_READ) #define _TRACE_VSOCK_VIRTIO_TRANSPORT_COMMON_H #include <linux/tracepoint.h> TRACE_DEFINE_ENUM(VIRTIO_VSOCK_TYPE_STREAM); TRACE_DEFINE_ENUM(VIRTIO_VSOCK_TYPE_SEQPACKET); #define show_type(val) \ __print_symbolic(val, \ { VIRTIO_VSOCK_TYPE_STREAM, "STREAM" }, \ { VIRTIO_VSOCK_TYPE_SEQPACKET, "SEQPACKET" }) TRACE_DEFINE_ENUM(VIRTIO_VSOCK_OP_INVALID); TRACE_DEFINE_ENUM(VIRTIO_VSOCK_OP_REQUEST); TRACE_DEFINE_ENUM(VIRTIO_VSOCK_OP_RESPONSE); TRACE_DEFINE_ENUM(VIRTIO_VSOCK_OP_RST); TRACE_DEFINE_ENUM(VIRTIO_VSOCK_OP_SHUTDOWN); TRACE_DEFINE_ENUM(VIRTIO_VSOCK_OP_RW); TRACE_DEFINE_ENUM(VIRTIO_VSOCK_OP_CREDIT_UPDATE); TRACE_DEFINE_ENUM(VIRTIO_VSOCK_OP_CREDIT_REQUEST); #define show_op(val) \ __print_symbolic(val, \ { VIRTIO_VSOCK_OP_INVALID, "INVALID" }, \ { VIRTIO_VSOCK_OP_REQUEST, "REQUEST" }, \ { VIRTIO_VSOCK_OP_RESPONSE, "RESPONSE" }, \ { VIRTIO_VSOCK_OP_RST, "RST" }, \ { VIRTIO_VSOCK_OP_SHUTDOWN, "SHUTDOWN" }, \ { VIRTIO_VSOCK_OP_RW, "RW" }, \ { VIRTIO_VSOCK_OP_CREDIT_UPDATE, "CREDIT_UPDATE" }, \ { VIRTIO_VSOCK_OP_CREDIT_REQUEST, "CREDIT_REQUEST" }) TRACE_EVENT(virtio_transport_alloc_pkt, TP_PROTO( __u32 src_cid, __u32 src_port, __u32 dst_cid, __u32 dst_port, __u32 len, __u16 type, __u16 op, __u32 flags, bool zcopy ), TP_ARGS( src_cid, src_port, dst_cid, dst_port, len, type, op, flags, zcopy ), TP_STRUCT__entry( __field(__u32, src_cid) __field(__u32, src_port) __field(__u32, dst_cid) __field(__u32, dst_port) __field(__u32, len) __field(__u16, type) __field(__u16, op) __field(__u32, flags) __field(bool, zcopy) ), TP_fast_assign( __entry->src_cid = src_cid; __entry->src_port = src_port; __entry->dst_cid = dst_cid; __entry->dst_port = dst_port; __entry->len = len; __entry->type = type; __entry->op = op; __entry->flags = flags; __entry->zcopy = zcopy; ), TP_printk("%u:%u -> %u:%u len=%u type=%s op=%s flags=%#x zcopy=%s", __entry->src_cid, __entry->src_port, __entry->dst_cid, __entry->dst_port, __entry->len, show_type(__entry->type), show_op(__entry->op), __entry->flags, __entry->zcopy ? "true" : "false") ); TRACE_EVENT(virtio_transport_recv_pkt, TP_PROTO( __u32 src_cid, __u32 src_port, __u32 dst_cid, __u32 dst_port, __u32 len, __u16 type, __u16 op, __u32 flags, __u32 buf_alloc, __u32 fwd_cnt ), TP_ARGS( src_cid, src_port, dst_cid, dst_port, len, type, op, flags, buf_alloc, fwd_cnt ), TP_STRUCT__entry( __field(__u32, src_cid) __field(__u32, src_port) __field(__u32, dst_cid) __field(__u32, dst_port) __field(__u32, len) __field(__u16, type) __field(__u16, op) __field(__u32, flags) __field(__u32, buf_alloc) __field(__u32, fwd_cnt) ), TP_fast_assign( __entry->src_cid = src_cid; __entry->src_port = src_port; __entry->dst_cid = dst_cid; __entry->dst_port = dst_port; __entry->len = len; __entry->type = type; __entry->op = op; __entry->flags = flags; __entry->buf_alloc = buf_alloc; __entry->fwd_cnt = fwd_cnt; ), TP_printk("%u:%u -> %u:%u len=%u type=%s op=%s flags=%#x " "buf_alloc=%u fwd_cnt=%u", __entry->src_cid, __entry->src_port, __entry->dst_cid, __entry->dst_port, __entry->len, show_type(__entry->type), show_op(__entry->op), __entry->flags, __entry->buf_alloc, __entry->fwd_cnt) ); #endif /* _TRACE_VSOCK_VIRTIO_TRANSPORT_COMMON_H */ #undef TRACE_INCLUDE_FILE #define TRACE_INCLUDE_FILE vsock_virtio_transport_common /* This part must be outside protection */ #include <trace/define_trace.h> |
1 1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 | // SPDX-License-Identifier: GPL-2.0-or-later /* CacheFiles path walking and related routines * * Copyright (C) 2021 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) */ #include <linux/fs.h> #include <linux/namei.h> #include "internal.h" /* * Mark the backing file as being a cache file if it's not already in use. The * mark tells the culling request command that it's not allowed to cull the * file or directory. The caller must hold the inode lock. */ static bool __cachefiles_mark_inode_in_use(struct cachefiles_object *object, struct inode *inode) { bool can_use = false; if (!(inode->i_flags & S_KERNEL_FILE)) { inode->i_flags |= S_KERNEL_FILE; trace_cachefiles_mark_active(object, inode); can_use = true; } else { trace_cachefiles_mark_failed(object, inode); } return can_use; } static bool cachefiles_mark_inode_in_use(struct cachefiles_object *object, struct inode *inode) { bool can_use; inode_lock(inode); can_use = __cachefiles_mark_inode_in_use(object, inode); inode_unlock(inode); return can_use; } /* * Unmark a backing inode. The caller must hold the inode lock. */ static void __cachefiles_unmark_inode_in_use(struct cachefiles_object *object, struct inode *inode) { inode->i_flags &= ~S_KERNEL_FILE; trace_cachefiles_mark_inactive(object, inode); } static void cachefiles_do_unmark_inode_in_use(struct cachefiles_object *object, struct inode *inode) { inode_lock(inode); __cachefiles_unmark_inode_in_use(object, inode); inode_unlock(inode); } /* * Unmark a backing inode and tell cachefilesd that there's something that can * be culled. */ void cachefiles_unmark_inode_in_use(struct cachefiles_object *object, struct file *file) { struct cachefiles_cache *cache = object->volume->cache; struct inode *inode = file_inode(file); cachefiles_do_unmark_inode_in_use(object, inode); if (!test_bit(CACHEFILES_OBJECT_USING_TMPFILE, &object->flags)) { atomic_long_add(inode->i_blocks, &cache->b_released); if (atomic_inc_return(&cache->f_released)) cachefiles_state_changed(cache); } } /* * get a subdirectory */ struct dentry *cachefiles_get_directory(struct cachefiles_cache *cache, struct dentry *dir, const char *dirname, bool *_is_new) { struct dentry *subdir; struct path path; int ret; _enter(",,%s", dirname); /* search the current directory for the element name */ inode_lock_nested(d_inode(dir), I_MUTEX_PARENT); retry: ret = cachefiles_inject_read_error(); if (ret == 0) subdir = lookup_one_len(dirname, dir, strlen(dirname)); else subdir = ERR_PTR(ret); trace_cachefiles_lookup(NULL, dir, subdir); if (IS_ERR(subdir)) { trace_cachefiles_vfs_error(NULL, d_backing_inode(dir), PTR_ERR(subdir), cachefiles_trace_lookup_error); if (PTR_ERR(subdir) == -ENOMEM) goto nomem_d_alloc; goto lookup_error; } _debug("subdir -> %pd %s", subdir, d_backing_inode(subdir) ? "positive" : "negative"); /* we need to create the subdir if it doesn't exist yet */ if (d_is_negative(subdir)) { ret = cachefiles_has_space(cache, 1, 0, cachefiles_has_space_for_create); if (ret < 0) goto mkdir_error; _debug("attempt mkdir"); path.mnt = cache->mnt; path.dentry = dir; ret = security_path_mkdir(&path, subdir, 0700); if (ret < 0) goto mkdir_error; ret = cachefiles_inject_write_error(); if (ret == 0) ret = vfs_mkdir(&nop_mnt_idmap, d_inode(dir), subdir, 0700); if (ret < 0) { trace_cachefiles_vfs_error(NULL, d_inode(dir), ret, cachefiles_trace_mkdir_error); goto mkdir_error; } trace_cachefiles_mkdir(dir, subdir); if (unlikely(d_unhashed(subdir))) { cachefiles_put_directory(subdir); goto retry; } ASSERT(d_backing_inode(subdir)); _debug("mkdir -> %pd{ino=%lu}", subdir, d_backing_inode(subdir)->i_ino); if (_is_new) *_is_new = true; } /* Tell rmdir() it's not allowed to delete the subdir */ inode_lock(d_inode(subdir)); inode_unlock(d_inode(dir)); if (!__cachefiles_mark_inode_in_use(NULL, d_inode(subdir))) { pr_notice("cachefiles: Inode already in use: %pd (B=%lx)\n", subdir, d_inode(subdir)->i_ino); goto mark_error; } inode_unlock(d_inode(subdir)); /* we need to make sure the subdir is a directory */ ASSERT(d_backing_inode(subdir)); if (!d_can_lookup(subdir)) { pr_err("%s is not a directory\n", dirname); ret = -EIO; goto check_error; } ret = -EPERM; if (!(d_backing_inode(subdir)->i_opflags & IOP_XATTR) || !d_backing_inode(subdir)->i_op->lookup || !d_backing_inode(subdir)->i_op->mkdir || !d_backing_inode(subdir)->i_op->rename || !d_backing_inode(subdir)->i_op->rmdir || !d_backing_inode(subdir)->i_op->unlink) goto check_error; _leave(" = [%lu]", d_backing_inode(subdir)->i_ino); return subdir; check_error: cachefiles_put_directory(subdir); _leave(" = %d [check]", ret); return ERR_PTR(ret); mark_error: inode_unlock(d_inode(subdir)); dput(subdir); return ERR_PTR(-EBUSY); mkdir_error: inode_unlock(d_inode(dir)); dput(subdir); pr_err("mkdir %s failed with error %d\n", dirname, ret); return ERR_PTR(ret); lookup_error: inode_unlock(d_inode(dir)); ret = PTR_ERR(subdir); pr_err("Lookup %s failed with error %d\n", dirname, ret); return ERR_PTR(ret); nomem_d_alloc: inode_unlock(d_inode(dir)); _leave(" = -ENOMEM"); return ERR_PTR(-ENOMEM); } /* * Put a subdirectory. */ void cachefiles_put_directory(struct dentry *dir) { if (dir) { cachefiles_do_unmark_inode_in_use(NULL, d_inode(dir)); dput(dir); } } /* * Remove a regular file from the cache. */ static int cachefiles_unlink(struct cachefiles_cache *cache, struct cachefiles_object *object, struct dentry *dir, struct dentry *dentry, enum fscache_why_object_killed why) { struct path path = { .mnt = cache->mnt, .dentry = dir, }; int ret; trace_cachefiles_unlink(object, d_inode(dentry)->i_ino, why); ret = security_path_unlink(&path, dentry); if (ret < 0) { cachefiles_io_error(cache, "Unlink security error"); return ret; } ret = cachefiles_inject_remove_error(); if (ret == 0) { ret = vfs_unlink(&nop_mnt_idmap, d_backing_inode(dir), dentry, NULL); if (ret == -EIO) cachefiles_io_error(cache, "Unlink failed"); } if (ret != 0) trace_cachefiles_vfs_error(object, d_backing_inode(dir), ret, cachefiles_trace_unlink_error); return ret; } /* * Delete an object representation from the cache * - File backed objects are unlinked * - Directory backed objects are stuffed into the graveyard for userspace to * delete */ int cachefiles_bury_object(struct cachefiles_cache *cache, struct cachefiles_object *object, struct dentry *dir, struct dentry *rep, enum fscache_why_object_killed why) { struct dentry *grave, *trap; struct path path, path_to_graveyard; char nbuffer[8 + 8 + 1]; int ret; _enter(",'%pd','%pd'", dir, rep); if (rep->d_parent != dir) { inode_unlock(d_inode(dir)); _leave(" = -ESTALE"); return -ESTALE; } /* non-directories can just be unlinked */ if (!d_is_dir(rep)) { dget(rep); /* Stop the dentry being negated if it's only pinned * by a file struct. */ ret = cachefiles_unlink(cache, object, dir, rep, why); dput(rep); inode_unlock(d_inode(dir)); _leave(" = %d", ret); return ret; } /* directories have to be moved to the graveyard */ _debug("move stale object to graveyard"); inode_unlock(d_inode(dir)); try_again: /* first step is to make up a grave dentry in the graveyard */ sprintf(nbuffer, "%08x%08x", (uint32_t) ktime_get_real_seconds(), (uint32_t) atomic_inc_return(&cache->gravecounter)); /* do the multiway lock magic */ trap = lock_rename(cache->graveyard, dir); if (IS_ERR(trap)) return PTR_ERR(trap); /* do some checks before getting the grave dentry */ if (rep->d_parent != dir || IS_DEADDIR(d_inode(rep))) { /* the entry was probably culled when we dropped the parent dir * lock */ unlock_rename(cache->graveyard, dir); _leave(" = 0 [culled?]"); return 0; } if (!d_can_lookup(cache->graveyard)) { unlock_rename(cache->graveyard, dir); cachefiles_io_error(cache, "Graveyard no longer a directory"); return -EIO; } if (trap == rep) { unlock_rename(cache->graveyard, dir); cachefiles_io_error(cache, "May not make directory loop"); return -EIO; } if (d_mountpoint(rep)) { unlock_rename(cache->graveyard, dir); cachefiles_io_error(cache, "Mountpoint in cache"); return -EIO; } grave = lookup_one_len(nbuffer, cache->graveyard, strlen(nbuffer)); if (IS_ERR(grave)) { unlock_rename(cache->graveyard, dir); trace_cachefiles_vfs_error(object, d_inode(cache->graveyard), PTR_ERR(grave), cachefiles_trace_lookup_error); if (PTR_ERR(grave) == -ENOMEM) { _leave(" = -ENOMEM"); return -ENOMEM; } cachefiles_io_error(cache, "Lookup error %ld", PTR_ERR(grave)); return -EIO; } if (d_is_positive(grave)) { unlock_rename(cache->graveyard, dir); dput(grave); grave = NULL; cond_resched(); goto try_again; } if (d_mountpoint(grave)) { unlock_rename(cache->graveyard, dir); dput(grave); cachefiles_io_error(cache, "Mountpoint in graveyard"); return -EIO; } /* target should not be an ancestor of source */ if (trap == grave) { unlock_rename(cache->graveyard, dir); dput(grave); cachefiles_io_error(cache, "May not make directory loop"); return -EIO; } /* attempt the rename */ path.mnt = cache->mnt; path.dentry = dir; path_to_graveyard.mnt = cache->mnt; path_to_graveyard.dentry = cache->graveyard; ret = security_path_rename(&path, rep, &path_to_graveyard, grave, 0); if (ret < 0) { cachefiles_io_error(cache, "Rename security error %d", ret); } else { struct renamedata rd = { .old_mnt_idmap = &nop_mnt_idmap, .old_dir = d_inode(dir), .old_dentry = rep, .new_mnt_idmap = &nop_mnt_idmap, .new_dir = d_inode(cache->graveyard), .new_dentry = grave, }; trace_cachefiles_rename(object, d_inode(rep)->i_ino, why); ret = cachefiles_inject_read_error(); if (ret == 0) ret = vfs_rename(&rd); if (ret != 0) trace_cachefiles_vfs_error(object, d_inode(dir), ret, cachefiles_trace_rename_error); if (ret != 0 && ret != -ENOMEM) cachefiles_io_error(cache, "Rename failed with error %d", ret); } __cachefiles_unmark_inode_in_use(object, d_inode(rep)); unlock_rename(cache->graveyard, dir); dput(grave); _leave(" = 0"); return 0; } /* * Delete a cache file. */ int cachefiles_delete_object(struct cachefiles_object *object, enum fscache_why_object_killed why) { struct cachefiles_volume *volume = object->volume; struct dentry *dentry = object->file->f_path.dentry; struct dentry *fan = volume->fanout[(u8)object->cookie->key_hash]; int ret; _enter(",OBJ%x{%pD}", object->debug_id, object->file); /* Stop the dentry being negated if it's only pinned by a file struct. */ dget(dentry); inode_lock_nested(d_backing_inode(fan), I_MUTEX_PARENT); ret = cachefiles_unlink(volume->cache, object, fan, dentry, why); inode_unlock(d_backing_inode(fan)); dput(dentry); return ret; } /* * Create a temporary file and leave it unattached and un-xattr'd until the * time comes to discard the object from memory. */ struct file *cachefiles_create_tmpfile(struct cachefiles_object *object) { struct cachefiles_volume *volume = object->volume; struct cachefiles_cache *cache = volume->cache; const struct cred *saved_cred; struct dentry *fan = volume->fanout[(u8)object->cookie->key_hash]; struct file *file; const struct path parentpath = { .mnt = cache->mnt, .dentry = fan }; uint64_t ni_size; long ret; cachefiles_begin_secure(cache, &saved_cred); ret = cachefiles_inject_write_error(); if (ret == 0) { file = kernel_tmpfile_open(&nop_mnt_idmap, &parentpath, S_IFREG | 0600, O_RDWR | O_LARGEFILE | O_DIRECT, cache->cache_cred); ret = PTR_ERR_OR_ZERO(file); } if (ret) { trace_cachefiles_vfs_error(object, d_inode(fan), ret, cachefiles_trace_tmpfile_error); if (ret == -EIO) cachefiles_io_error_obj(object, "Failed to create tmpfile"); goto err; } trace_cachefiles_tmpfile(object, file_inode(file)); /* This is a newly created file with no other possible user */ if (!cachefiles_mark_inode_in_use(object, file_inode(file))) WARN_ON(1); ret = cachefiles_ondemand_init_object(object); if (ret < 0) goto err_unuse; ni_size = object->cookie->object_size; ni_size = round_up(ni_size, CACHEFILES_DIO_BLOCK_SIZE); if (ni_size > 0) { trace_cachefiles_trunc(object, file_inode(file), 0, ni_size, cachefiles_trunc_expand_tmpfile); ret = cachefiles_inject_write_error(); if (ret == 0) ret = vfs_truncate(&file->f_path, ni_size); if (ret < 0) { trace_cachefiles_vfs_error( object, file_inode(file), ret, cachefiles_trace_trunc_error); goto err_unuse; } } ret = -EINVAL; if (unlikely(!file->f_op->read_iter) || unlikely(!file->f_op->write_iter)) { fput(file); pr_notice("Cache does not support read_iter and write_iter\n"); goto err_unuse; } out: cachefiles_end_secure(cache, saved_cred); return file; err_unuse: cachefiles_do_unmark_inode_in_use(object, file_inode(file)); fput(file); err: file = ERR_PTR(ret); goto out; } /* * Create a new file. */ static bool cachefiles_create_file(struct cachefiles_object *object) { struct file *file; int ret; ret = cachefiles_has_space(object->volume->cache, 1, 0, cachefiles_has_space_for_create); if (ret < 0) return false; file = cachefiles_create_tmpfile(object); if (IS_ERR(file)) return false; set_bit(FSCACHE_COOKIE_NEEDS_UPDATE, &object->cookie->flags); set_bit(CACHEFILES_OBJECT_USING_TMPFILE, &object->flags); _debug("create -> %pD{ino=%lu}", file, file_inode(file)->i_ino); object->file = file; return true; } /* * Open an existing file, checking its attributes and replacing it if it is * stale. */ static bool cachefiles_open_file(struct cachefiles_object *object, struct dentry *dentry) { struct cachefiles_cache *cache = object->volume->cache; struct file *file; struct path path; int ret; _enter("%pd", dentry); if (!cachefiles_mark_inode_in_use(object, d_inode(dentry))) { pr_notice("cachefiles: Inode already in use: %pd (B=%lx)\n", dentry, d_inode(dentry)->i_ino); return false; } /* We need to open a file interface onto a data file now as we can't do * it on demand because writeback called from do_exit() sees * current->fs == NULL - which breaks d_path() called from ext4 open. */ path.mnt = cache->mnt; path.dentry = dentry; file = kernel_file_open(&path, O_RDWR | O_LARGEFILE | O_DIRECT, d_backing_inode(dentry), cache->cache_cred); if (IS_ERR(file)) { trace_cachefiles_vfs_error(object, d_backing_inode(dentry), PTR_ERR(file), cachefiles_trace_open_error); goto error; } if (unlikely(!file->f_op->read_iter) || unlikely(!file->f_op->write_iter)) { pr_notice("Cache does not support read_iter and write_iter\n"); goto error_fput; } _debug("file -> %pd positive", dentry); ret = cachefiles_ondemand_init_object(object); if (ret < 0) goto error_fput; ret = cachefiles_check_auxdata(object, file); if (ret < 0) goto check_failed; clear_bit(FSCACHE_COOKIE_NO_DATA_TO_READ, &object->cookie->flags); object->file = file; /* Always update the atime on an object we've just looked up (this is * used to keep track of culling, and atimes are only updated by read, * write and readdir but not lookup or open). */ touch_atime(&file->f_path); dput(dentry); return true; check_failed: fscache_cookie_lookup_negative(object->cookie); cachefiles_unmark_inode_in_use(object, file); fput(file); dput(dentry); if (ret == -ESTALE) return cachefiles_create_file(object); return false; error_fput: fput(file); error: cachefiles_do_unmark_inode_in_use(object, d_inode(dentry)); dput(dentry); return false; } /* * walk from the parent object to the child object through the backing * filesystem, creating directories as we go */ bool cachefiles_look_up_object(struct cachefiles_object *object) { struct cachefiles_volume *volume = object->volume; struct dentry *dentry, *fan = volume->fanout[(u8)object->cookie->key_hash]; int ret; _enter("OBJ%x,%s,", object->debug_id, object->d_name); /* Look up path "cache/vol/fanout/file". */ ret = cachefiles_inject_read_error(); if (ret == 0) dentry = lookup_positive_unlocked(object->d_name, fan, object->d_name_len); else dentry = ERR_PTR(ret); trace_cachefiles_lookup(object, fan, dentry); if (IS_ERR(dentry)) { if (dentry == ERR_PTR(-ENOENT)) goto new_file; if (dentry == ERR_PTR(-EIO)) cachefiles_io_error_obj(object, "Lookup failed"); return false; } if (!d_is_reg(dentry)) { pr_err("%pd is not a file\n", dentry); inode_lock_nested(d_inode(fan), I_MUTEX_PARENT); ret = cachefiles_bury_object(volume->cache, object, fan, dentry, FSCACHE_OBJECT_IS_WEIRD); dput(dentry); if (ret < 0) return false; goto new_file; } if (!cachefiles_open_file(object, dentry)) return false; _leave(" = t [%lu]", file_inode(object->file)->i_ino); return true; new_file: fscache_cookie_lookup_negative(object->cookie); return cachefiles_create_file(object); } /* * Attempt to link a temporary file into its rightful place in the cache. */ bool cachefiles_commit_tmpfile(struct cachefiles_cache *cache, struct cachefiles_object *object) { struct cachefiles_volume *volume = object->volume; struct dentry *dentry, *fan = volume->fanout[(u8)object->cookie->key_hash]; bool success = false; int ret; _enter(",%pD", object->file); inode_lock_nested(d_inode(fan), I_MUTEX_PARENT); ret = cachefiles_inject_read_error(); if (ret == 0) dentry = lookup_one_len(object->d_name, fan, object->d_name_len); else dentry = ERR_PTR(ret); if (IS_ERR(dentry)) { trace_cachefiles_vfs_error(object, d_inode(fan), PTR_ERR(dentry), cachefiles_trace_lookup_error); _debug("lookup fail %ld", PTR_ERR(dentry)); goto out_unlock; } if (!d_is_negative(dentry)) { if (d_backing_inode(dentry) == file_inode(object->file)) { success = true; goto out_dput; } ret = cachefiles_unlink(volume->cache, object, fan, dentry, FSCACHE_OBJECT_IS_STALE); if (ret < 0) goto out_dput; dput(dentry); ret = cachefiles_inject_read_error(); if (ret == 0) dentry = lookup_one_len(object->d_name, fan, object->d_name_len); else dentry = ERR_PTR(ret); if (IS_ERR(dentry)) { trace_cachefiles_vfs_error(object, d_inode(fan), PTR_ERR(dentry), cachefiles_trace_lookup_error); _debug("lookup fail %ld", PTR_ERR(dentry)); goto out_unlock; } } ret = cachefiles_inject_read_error(); if (ret == 0) ret = vfs_link(object->file->f_path.dentry, &nop_mnt_idmap, d_inode(fan), dentry, NULL); if (ret < 0) { trace_cachefiles_vfs_error(object, d_inode(fan), ret, cachefiles_trace_link_error); _debug("link fail %d", ret); } else { trace_cachefiles_link(object, file_inode(object->file)); spin_lock(&object->lock); /* TODO: Do we want to switch the file pointer to the new dentry? */ clear_bit(CACHEFILES_OBJECT_USING_TMPFILE, &object->flags); spin_unlock(&object->lock); success = true; } out_dput: dput(dentry); out_unlock: inode_unlock(d_inode(fan)); _leave(" = %u", success); return success; } /* * Look up an inode to be checked or culled. Return -EBUSY if the inode is * marked in use. */ static struct dentry *cachefiles_lookup_for_cull(struct cachefiles_cache *cache, struct dentry *dir, char *filename) { struct dentry *victim; int ret = -ENOENT; inode_lock_nested(d_inode(dir), I_MUTEX_PARENT); victim = lookup_one_len(filename, dir, strlen(filename)); if (IS_ERR(victim)) goto lookup_error; if (d_is_negative(victim)) goto lookup_put; if (d_inode(victim)->i_flags & S_KERNEL_FILE) goto lookup_busy; return victim; lookup_busy: ret = -EBUSY; lookup_put: inode_unlock(d_inode(dir)); dput(victim); return ERR_PTR(ret); lookup_error: inode_unlock(d_inode(dir)); ret = PTR_ERR(victim); if (ret == -ENOENT) |